Merge pull request #943 from liangyongxiang/fcitx5-gtk
[gentoo-zh.git] / sys-kernel / xanmod-hybird / files / patch-5.12.5-xanmod1
blob997dfedcd01f58b31af46da3737ff916bfd49981
1 diff --git a/.config b/.config
2 new file mode 100644
3 index 000000000000..90ba522138b1
4 --- /dev/null
5 +++ b/.config
6 @@ -0,0 +1,11068 @@
7 +#
8 +# Automatically generated file; DO NOT EDIT.
9 +# Linux/x86 5.12.5 Kernel Configuration
11 +CONFIG_CC_VERSION_TEXT="gcc-11 (Debian 11.1.0-1) 11.1.0"
12 +CONFIG_CC_IS_GCC=y
13 +CONFIG_GCC_VERSION=110100
14 +CONFIG_CLANG_VERSION=0
15 +CONFIG_LD_IS_BFD=y
16 +CONFIG_LD_VERSION=23502
17 +CONFIG_LLD_VERSION=0
18 +CONFIG_CC_CAN_LINK=y
19 +CONFIG_CC_CAN_LINK_STATIC=y
20 +CONFIG_CC_HAS_ASM_GOTO=y
21 +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
22 +CONFIG_CC_HAS_ASM_INLINE=y
23 +CONFIG_IRQ_WORK=y
24 +CONFIG_BUILDTIME_TABLE_SORT=y
25 +CONFIG_THREAD_INFO_IN_TASK=y
28 +# General setup
30 +CONFIG_INIT_ENV_ARG_LIMIT=32
31 +# CONFIG_COMPILE_TEST is not set
32 +CONFIG_LOCALVERSION=""
33 +# CONFIG_LOCALVERSION_AUTO is not set
34 +CONFIG_BUILD_SALT=""
35 +CONFIG_HAVE_KERNEL_GZIP=y
36 +CONFIG_HAVE_KERNEL_BZIP2=y
37 +CONFIG_HAVE_KERNEL_LZMA=y
38 +CONFIG_HAVE_KERNEL_XZ=y
39 +CONFIG_HAVE_KERNEL_LZO=y
40 +CONFIG_HAVE_KERNEL_LZ4=y
41 +CONFIG_HAVE_KERNEL_ZSTD=y
42 +# CONFIG_KERNEL_GZIP is not set
43 +# CONFIG_KERNEL_BZIP2 is not set
44 +# CONFIG_KERNEL_LZMA is not set
45 +# CONFIG_KERNEL_XZ is not set
46 +# CONFIG_KERNEL_LZO is not set
47 +# CONFIG_KERNEL_LZ4 is not set
48 +CONFIG_KERNEL_ZSTD=y
49 +CONFIG_DEFAULT_INIT=""
50 +CONFIG_DEFAULT_HOSTNAME="(none)"
51 +CONFIG_SWAP=y
52 +CONFIG_SYSVIPC=y
53 +CONFIG_SYSVIPC_SYSCTL=y
54 +CONFIG_POSIX_MQUEUE=y
55 +CONFIG_POSIX_MQUEUE_SYSCTL=y
56 +CONFIG_WATCH_QUEUE=y
57 +CONFIG_CROSS_MEMORY_ATTACH=y
58 +CONFIG_USELIB=y
59 +CONFIG_AUDIT=y
60 +CONFIG_HAVE_ARCH_AUDITSYSCALL=y
61 +CONFIG_AUDITSYSCALL=y
64 +# IRQ subsystem
66 +CONFIG_GENERIC_IRQ_PROBE=y
67 +CONFIG_GENERIC_IRQ_SHOW=y
68 +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
69 +CONFIG_GENERIC_PENDING_IRQ=y
70 +CONFIG_GENERIC_IRQ_MIGRATION=y
71 +CONFIG_HARDIRQS_SW_RESEND=y
72 +CONFIG_GENERIC_IRQ_CHIP=y
73 +CONFIG_IRQ_DOMAIN=y
74 +CONFIG_IRQ_DOMAIN_HIERARCHY=y
75 +CONFIG_GENERIC_MSI_IRQ=y
76 +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
77 +CONFIG_IRQ_MSI_IOMMU=y
78 +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y
79 +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y
80 +CONFIG_IRQ_FORCED_THREADING=y
81 +CONFIG_SPARSE_IRQ=y
82 +# CONFIG_GENERIC_IRQ_DEBUGFS is not set
83 +# end of IRQ subsystem
85 +CONFIG_CLOCKSOURCE_WATCHDOG=y
86 +CONFIG_ARCH_CLOCKSOURCE_INIT=y
87 +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y
88 +CONFIG_GENERIC_TIME_VSYSCALL=y
89 +CONFIG_GENERIC_CLOCKEVENTS=y
90 +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
91 +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
92 +CONFIG_GENERIC_CMOS_UPDATE=y
93 +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y
94 +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y
97 +# Timers subsystem
99 +CONFIG_TICK_ONESHOT=y
100 +CONFIG_NO_HZ_COMMON=y
101 +# CONFIG_HZ_PERIODIC is not set
102 +# CONFIG_NO_HZ_IDLE is not set
103 +CONFIG_NO_HZ_FULL=y
104 +CONFIG_CONTEXT_TRACKING=y
105 +# CONFIG_CONTEXT_TRACKING_FORCE is not set
106 +# CONFIG_NO_HZ is not set
107 +CONFIG_HIGH_RES_TIMERS=y
108 +# end of Timers subsystem
110 +# CONFIG_PREEMPT_NONE is not set
111 +# CONFIG_PREEMPT_VOLUNTARY is not set
112 +CONFIG_PREEMPT=y
113 +CONFIG_PREEMPT_COUNT=y
114 +CONFIG_PREEMPTION=y
115 +CONFIG_PREEMPT_DYNAMIC=y
118 +# CPU/Task time and stats accounting
120 +CONFIG_VIRT_CPU_ACCOUNTING=y
121 +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y
122 +# CONFIG_IRQ_TIME_ACCOUNTING is not set
123 +CONFIG_BSD_PROCESS_ACCT=y
124 +CONFIG_BSD_PROCESS_ACCT_V3=y
125 +CONFIG_TASKSTATS=y
126 +CONFIG_TASK_DELAY_ACCT=y
127 +CONFIG_TASK_XACCT=y
128 +CONFIG_TASK_IO_ACCOUNTING=y
129 +CONFIG_PSI=y
130 +CONFIG_PSI_DEFAULT_DISABLED=y
131 +# end of CPU/Task time and stats accounting
133 +CONFIG_CPU_ISOLATION=y
136 +# RCU Subsystem
138 +CONFIG_TREE_RCU=y
139 +CONFIG_PREEMPT_RCU=y
140 +CONFIG_RCU_EXPERT=y
141 +CONFIG_SRCU=y
142 +CONFIG_TREE_SRCU=y
143 +CONFIG_TASKS_RCU_GENERIC=y
144 +CONFIG_TASKS_RCU=y
145 +CONFIG_TASKS_TRACE_RCU=y
146 +CONFIG_RCU_STALL_COMMON=y
147 +CONFIG_RCU_NEED_SEGCBLIST=y
148 +CONFIG_RCU_FANOUT=64
149 +CONFIG_RCU_FANOUT_LEAF=16
150 +# CONFIG_RCU_FAST_NO_HZ is not set
151 +CONFIG_RCU_BOOST=y
152 +CONFIG_RCU_BOOST_DELAY=0
153 +CONFIG_RCU_NOCB_CPU=y
154 +# CONFIG_TASKS_TRACE_RCU_READ_MB is not set
155 +# end of RCU Subsystem
157 +CONFIG_BUILD_BIN2C=y
158 +# CONFIG_IKCONFIG is not set
159 +CONFIG_IKHEADERS=m
160 +CONFIG_LOG_BUF_SHIFT=18
161 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12
162 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13
163 +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
166 +# Scheduler features
168 +CONFIG_UCLAMP_TASK=y
169 +CONFIG_UCLAMP_BUCKETS_COUNT=5
170 +# end of Scheduler features
172 +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
173 +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
174 +CONFIG_CC_HAS_INT128=y
175 +CONFIG_ARCH_SUPPORTS_INT128=y
176 +CONFIG_NUMA_BALANCING=y
177 +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y
178 +CONFIG_CGROUPS=y
179 +CONFIG_PAGE_COUNTER=y
180 +CONFIG_MEMCG=y
181 +CONFIG_MEMCG_SWAP=y
182 +CONFIG_MEMCG_KMEM=y
183 +CONFIG_BLK_CGROUP=y
184 +CONFIG_CGROUP_WRITEBACK=y
185 +CONFIG_CGROUP_SCHED=y
186 +CONFIG_FAIR_GROUP_SCHED=y
187 +CONFIG_CFS_BANDWIDTH=y
188 +# CONFIG_RT_GROUP_SCHED is not set
189 +CONFIG_UCLAMP_TASK_GROUP=y
190 +CONFIG_CGROUP_PIDS=y
191 +CONFIG_CGROUP_RDMA=y
192 +CONFIG_CGROUP_FREEZER=y
193 +CONFIG_CGROUP_HUGETLB=y
194 +CONFIG_CPUSETS=y
195 +CONFIG_PROC_PID_CPUSET=y
196 +CONFIG_CGROUP_DEVICE=y
197 +CONFIG_CGROUP_CPUACCT=y
198 +CONFIG_CGROUP_PERF=y
199 +CONFIG_CGROUP_BPF=y
200 +# CONFIG_CGROUP_DEBUG is not set
201 +CONFIG_SOCK_CGROUP_DATA=y
202 +CONFIG_NAMESPACES=y
203 +CONFIG_UTS_NS=y
204 +CONFIG_TIME_NS=y
205 +CONFIG_IPC_NS=y
206 +CONFIG_USER_NS=y
207 +CONFIG_PID_NS=y
208 +CONFIG_NET_NS=y
209 +CONFIG_CHECKPOINT_RESTORE=y
210 +CONFIG_SCHED_AUTOGROUP=y
211 +CONFIG_SCHED_AUTOGROUP_DEFAULT_ENABLED=y
212 +# CONFIG_SYSFS_DEPRECATED is not set
213 +CONFIG_RELAY=y
214 +CONFIG_BLK_DEV_INITRD=y
215 +CONFIG_INITRAMFS_SOURCE=""
216 +CONFIG_RD_GZIP=y
217 +CONFIG_RD_BZIP2=y
218 +CONFIG_RD_LZMA=y
219 +CONFIG_RD_XZ=y
220 +CONFIG_RD_LZO=y
221 +CONFIG_RD_LZ4=y
222 +CONFIG_RD_ZSTD=y
223 +CONFIG_BOOT_CONFIG=y
224 +# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set
225 +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
226 +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
227 +CONFIG_LD_ORPHAN_WARN=y
228 +CONFIG_SYSCTL=y
229 +CONFIG_HAVE_UID16=y
230 +CONFIG_SYSCTL_EXCEPTION_TRACE=y
231 +CONFIG_HAVE_PCSPKR_PLATFORM=y
232 +CONFIG_BPF=y
233 +CONFIG_EXPERT=y
234 +CONFIG_UID16=y
235 +CONFIG_MULTIUSER=y
236 +CONFIG_SGETMASK_SYSCALL=y
237 +CONFIG_SYSFS_SYSCALL=y
238 +CONFIG_FHANDLE=y
239 +CONFIG_POSIX_TIMERS=y
240 +CONFIG_PRINTK=y
241 +CONFIG_PRINTK_NMI=y
242 +CONFIG_BUG=y
243 +CONFIG_ELF_CORE=y
244 +CONFIG_PCSPKR_PLATFORM=y
245 +CONFIG_BASE_FULL=y
246 +CONFIG_FUTEX=y
247 +CONFIG_FUTEX2=y
248 +CONFIG_FUTEX_PI=y
249 +CONFIG_EPOLL=y
250 +CONFIG_SIGNALFD=y
251 +CONFIG_TIMERFD=y
252 +CONFIG_EVENTFD=y
253 +CONFIG_SHMEM=y
254 +CONFIG_AIO=y
255 +CONFIG_IO_URING=y
256 +CONFIG_ADVISE_SYSCALLS=y
257 +CONFIG_HAVE_ARCH_USERFAULTFD_WP=y
258 +CONFIG_MEMBARRIER=y
259 +CONFIG_KALLSYMS=y
260 +CONFIG_KALLSYMS_ALL=y
261 +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y
262 +CONFIG_KALLSYMS_BASE_RELATIVE=y
263 +CONFIG_BPF_SYSCALL=y
264 +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
265 +CONFIG_BPF_JIT_ALWAYS_ON=y
266 +CONFIG_BPF_JIT_DEFAULT_ON=y
267 +CONFIG_USERMODE_DRIVER=y
268 +# CONFIG_BPF_PRELOAD is not set
269 +CONFIG_USERFAULTFD=y
270 +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y
271 +CONFIG_KCMP=y
272 +CONFIG_RSEQ=y
273 +# CONFIG_DEBUG_RSEQ is not set
274 +# CONFIG_EMBEDDED is not set
275 +CONFIG_HAVE_PERF_EVENTS=y
276 +CONFIG_PC104=y
279 +# Kernel Performance Events And Counters
281 +CONFIG_PERF_EVENTS=y
282 +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
283 +# end of Kernel Performance Events And Counters
285 +CONFIG_VM_EVENT_COUNTERS=y
286 +CONFIG_SLUB_DEBUG=y
287 +# CONFIG_COMPAT_BRK is not set
288 +# CONFIG_SLAB is not set
289 +CONFIG_SLUB=y
290 +# CONFIG_SLOB is not set
291 +CONFIG_SLAB_MERGE_DEFAULT=y
292 +CONFIG_SLAB_FREELIST_RANDOM=y
293 +CONFIG_SLAB_FREELIST_HARDENED=y
294 +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
295 +CONFIG_SLUB_CPU_PARTIAL=y
296 +CONFIG_SYSTEM_DATA_VERIFICATION=y
297 +CONFIG_PROFILING=y
298 +# end of General setup
300 +CONFIG_64BIT=y
301 +CONFIG_X86_64=y
302 +CONFIG_X86=y
303 +CONFIG_INSTRUCTION_DECODER=y
304 +CONFIG_OUTPUT_FORMAT="elf64-x86-64"
305 +CONFIG_LOCKDEP_SUPPORT=y
306 +CONFIG_STACKTRACE_SUPPORT=y
307 +CONFIG_MMU=y
308 +CONFIG_ARCH_MMAP_RND_BITS_MIN=28
309 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32
310 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8
311 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16
312 +CONFIG_GENERIC_ISA_DMA=y
313 +CONFIG_GENERIC_BUG=y
314 +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
315 +CONFIG_ARCH_MAY_HAVE_PC_FDC=y
316 +CONFIG_GENERIC_CALIBRATE_DELAY=y
317 +CONFIG_ARCH_HAS_CPU_RELAX=y
318 +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
319 +CONFIG_ARCH_HAS_FILTER_PGPROT=y
320 +CONFIG_HAVE_SETUP_PER_CPU_AREA=y
321 +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
322 +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
323 +CONFIG_ARCH_HIBERNATION_POSSIBLE=y
324 +CONFIG_ARCH_SUSPEND_POSSIBLE=y
325 +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y
326 +CONFIG_ZONE_DMA32=y
327 +CONFIG_AUDIT_ARCH=y
328 +CONFIG_HAVE_INTEL_TXT=y
329 +CONFIG_X86_64_SMP=y
330 +CONFIG_ARCH_SUPPORTS_UPROBES=y
331 +CONFIG_FIX_EARLYCON_MEM=y
332 +CONFIG_DYNAMIC_PHYSICAL_MASK=y
333 +CONFIG_PGTABLE_LEVELS=5
334 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y
337 +# Processor type and features
339 +CONFIG_ZONE_DMA=y
340 +CONFIG_SMP=y
341 +CONFIG_X86_FEATURE_NAMES=y
342 +CONFIG_X86_X2APIC=y
343 +CONFIG_X86_MPPARSE=y
344 +# CONFIG_GOLDFISH is not set
345 +CONFIG_RETPOLINE=y
346 +CONFIG_X86_CPU_RESCTRL=y
347 +CONFIG_X86_EXTENDED_PLATFORM=y
348 +CONFIG_X86_NUMACHIP=y
349 +# CONFIG_X86_VSMP is not set
350 +CONFIG_X86_UV=y
351 +# CONFIG_X86_GOLDFISH is not set
352 +# CONFIG_X86_INTEL_MID is not set
353 +CONFIG_X86_INTEL_LPSS=y
354 +CONFIG_X86_AMD_PLATFORM_DEVICE=y
355 +CONFIG_IOSF_MBI=y
356 +CONFIG_IOSF_MBI_DEBUG=y
357 +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
358 +CONFIG_SCHED_OMIT_FRAME_POINTER=y
359 +CONFIG_HYPERVISOR_GUEST=y
360 +CONFIG_PARAVIRT=y
361 +CONFIG_PARAVIRT_XXL=y
362 +# CONFIG_PARAVIRT_DEBUG is not set
363 +CONFIG_PARAVIRT_SPINLOCKS=y
364 +CONFIG_X86_HV_CALLBACK_VECTOR=y
365 +CONFIG_XEN=y
366 +CONFIG_XEN_PV=y
367 +CONFIG_XEN_512GB=y
368 +CONFIG_XEN_PV_SMP=y
369 +CONFIG_XEN_DOM0=y
370 +CONFIG_XEN_PVHVM=y
371 +CONFIG_XEN_PVHVM_SMP=y
372 +CONFIG_XEN_PVHVM_GUEST=y
373 +CONFIG_XEN_SAVE_RESTORE=y
374 +# CONFIG_XEN_DEBUG_FS is not set
375 +CONFIG_XEN_PVH=y
376 +CONFIG_KVM_GUEST=y
377 +CONFIG_ARCH_CPUIDLE_HALTPOLL=y
378 +CONFIG_PVH=y
379 +# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set
380 +CONFIG_PARAVIRT_CLOCK=y
381 +CONFIG_JAILHOUSE_GUEST=y
382 +CONFIG_ACRN_GUEST=y
383 +# CONFIG_MK8 is not set
384 +# CONFIG_MK8SSE3 is not set
385 +# CONFIG_MK10 is not set
386 +# CONFIG_MBARCELONA is not set
387 +# CONFIG_MBOBCAT is not set
388 +# CONFIG_MJAGUAR is not set
389 +# CONFIG_MBULLDOZER is not set
390 +# CONFIG_MPILEDRIVER is not set
391 +# CONFIG_MSTEAMROLLER is not set
392 +# CONFIG_MEXCAVATOR is not set
393 +# CONFIG_MZEN is not set
394 +# CONFIG_MZEN2 is not set
395 +# CONFIG_MZEN3 is not set
396 +# CONFIG_MPSC is not set
397 +# CONFIG_MCORE2 is not set
398 +# CONFIG_MATOM is not set
399 +# CONFIG_MNEHALEM is not set
400 +# CONFIG_MWESTMERE is not set
401 +# CONFIG_MSILVERMONT is not set
402 +# CONFIG_MGOLDMONT is not set
403 +# CONFIG_MGOLDMONTPLUS is not set
404 +# CONFIG_MSANDYBRIDGE is not set
405 +# CONFIG_MIVYBRIDGE is not set
406 +# CONFIG_MHASWELL is not set
407 +# CONFIG_MBROADWELL is not set
408 +# CONFIG_MSKYLAKE is not set
409 +# CONFIG_MSKYLAKEX is not set
410 +# CONFIG_MCANNONLAKE is not set
411 +# CONFIG_MICELAKE is not set
412 +# CONFIG_MCASCADELAKE is not set
413 +# CONFIG_MCOOPERLAKE is not set
414 +# CONFIG_MTIGERLAKE is not set
415 +# CONFIG_MSAPPHIRERAPIDS is not set
416 +# CONFIG_MROCKETLAKE is not set
417 +# CONFIG_MALDERLAKE is not set
418 +CONFIG_GENERIC_CPU=y
419 +# CONFIG_GENERIC_CPU2 is not set
420 +# CONFIG_GENERIC_CPU3 is not set
421 +# CONFIG_GENERIC_CPU4 is not set
422 +# CONFIG_MNATIVE_INTEL is not set
423 +# CONFIG_MNATIVE_AMD is not set
424 +CONFIG_X86_INTERNODE_CACHE_SHIFT=6
425 +CONFIG_X86_L1_CACHE_SHIFT=6
426 +CONFIG_X86_TSC=y
427 +CONFIG_X86_CMPXCHG64=y
428 +CONFIG_X86_CMOV=y
429 +CONFIG_X86_MINIMUM_CPU_FAMILY=64
430 +CONFIG_X86_DEBUGCTLMSR=y
431 +CONFIG_IA32_FEAT_CTL=y
432 +CONFIG_X86_VMX_FEATURE_NAMES=y
433 +CONFIG_PROCESSOR_SELECT=y
434 +CONFIG_CPU_SUP_INTEL=y
435 +CONFIG_CPU_SUP_AMD=y
436 +CONFIG_CPU_SUP_HYGON=y
437 +CONFIG_CPU_SUP_CENTAUR=y
438 +CONFIG_CPU_SUP_ZHAOXIN=y
439 +CONFIG_HPET_TIMER=y
440 +CONFIG_HPET_EMULATE_RTC=y
441 +CONFIG_DMI=y
442 +CONFIG_GART_IOMMU=y
443 +# CONFIG_MAXSMP is not set
444 +CONFIG_NR_CPUS_RANGE_BEGIN=2
445 +CONFIG_NR_CPUS_RANGE_END=512
446 +CONFIG_NR_CPUS_DEFAULT=64
447 +CONFIG_NR_CPUS=512
448 +CONFIG_SCHED_SMT=y
449 +CONFIG_SCHED_MC=y
450 +CONFIG_SCHED_MC_PRIO=y
451 +CONFIG_X86_LOCAL_APIC=y
452 +CONFIG_X86_IO_APIC=y
453 +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
454 +CONFIG_X86_MCE=y
455 +CONFIG_X86_MCELOG_LEGACY=y
456 +CONFIG_X86_MCE_INTEL=y
457 +CONFIG_X86_MCE_AMD=y
458 +CONFIG_X86_MCE_THRESHOLD=y
459 +CONFIG_X86_MCE_INJECT=m
462 +# Performance monitoring
464 +CONFIG_PERF_EVENTS_INTEL_UNCORE=y
465 +CONFIG_PERF_EVENTS_INTEL_RAPL=m
466 +CONFIG_PERF_EVENTS_INTEL_CSTATE=m
467 +# CONFIG_PERF_EVENTS_AMD_POWER is not set
468 +# end of Performance monitoring
470 +CONFIG_X86_16BIT=y
471 +CONFIG_X86_ESPFIX64=y
472 +CONFIG_X86_VSYSCALL_EMULATION=y
473 +CONFIG_X86_IOPL_IOPERM=y
474 +CONFIG_I8K=m
475 +CONFIG_MICROCODE=y
476 +CONFIG_MICROCODE_INTEL=y
477 +CONFIG_MICROCODE_AMD=y
478 +CONFIG_MICROCODE_OLD_INTERFACE=y
479 +CONFIG_X86_MSR=m
480 +CONFIG_X86_CPUID=m
481 +CONFIG_X86_5LEVEL=y
482 +CONFIG_X86_DIRECT_GBPAGES=y
483 +# CONFIG_X86_CPA_STATISTICS is not set
484 +CONFIG_AMD_MEM_ENCRYPT=y
485 +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set
486 +CONFIG_NUMA=y
487 +CONFIG_AMD_NUMA=y
488 +CONFIG_X86_64_ACPI_NUMA=y
489 +# CONFIG_NUMA_EMU is not set
490 +CONFIG_NODES_SHIFT=10
491 +CONFIG_ARCH_SPARSEMEM_ENABLE=y
492 +CONFIG_ARCH_SPARSEMEM_DEFAULT=y
493 +CONFIG_ARCH_SELECT_MEMORY_MODEL=y
494 +CONFIG_ARCH_MEMORY_PROBE=y
495 +CONFIG_ARCH_PROC_KCORE_TEXT=y
496 +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
497 +CONFIG_X86_PMEM_LEGACY_DEVICE=y
498 +CONFIG_X86_PMEM_LEGACY=y
499 +CONFIG_X86_CHECK_BIOS_CORRUPTION=y
500 +CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y
501 +CONFIG_X86_RESERVE_LOW=64
502 +CONFIG_MTRR=y
503 +CONFIG_MTRR_SANITIZER=y
504 +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
505 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
506 +CONFIG_X86_PAT=y
507 +CONFIG_ARCH_USES_PG_UNCACHED=y
508 +CONFIG_ARCH_RANDOM=y
509 +CONFIG_X86_SMAP=y
510 +CONFIG_X86_UMIP=y
511 +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y
512 +# CONFIG_X86_INTEL_TSX_MODE_OFF is not set
513 +# CONFIG_X86_INTEL_TSX_MODE_ON is not set
514 +CONFIG_X86_INTEL_TSX_MODE_AUTO=y
515 +CONFIG_X86_SGX=y
516 +CONFIG_EFI=y
517 +CONFIG_EFI_STUB=y
518 +CONFIG_EFI_MIXED=y
519 +# CONFIG_HZ_100 is not set
520 +# CONFIG_HZ_250 is not set
521 +# CONFIG_HZ_300 is not set
522 +CONFIG_HZ_500=y
523 +# CONFIG_HZ_1000 is not set
524 +CONFIG_HZ=500
525 +CONFIG_SCHED_HRTICK=y
526 +CONFIG_KEXEC=y
527 +CONFIG_KEXEC_FILE=y
528 +CONFIG_ARCH_HAS_KEXEC_PURGATORY=y
529 +CONFIG_KEXEC_SIG=y
530 +# CONFIG_KEXEC_SIG_FORCE is not set
531 +CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y
532 +CONFIG_CRASH_DUMP=y
533 +CONFIG_KEXEC_JUMP=y
534 +CONFIG_PHYSICAL_START=0x1000000
535 +CONFIG_RELOCATABLE=y
536 +CONFIG_RANDOMIZE_BASE=y
537 +CONFIG_X86_NEED_RELOCS=y
538 +CONFIG_PHYSICAL_ALIGN=0x200000
539 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y
540 +CONFIG_RANDOMIZE_MEMORY=y
541 +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa
542 +CONFIG_HOTPLUG_CPU=y
543 +# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
544 +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
545 +# CONFIG_COMPAT_VDSO is not set
546 +# CONFIG_LEGACY_VSYSCALL_EMULATE is not set
547 +CONFIG_LEGACY_VSYSCALL_XONLY=y
548 +# CONFIG_LEGACY_VSYSCALL_NONE is not set
549 +# CONFIG_CMDLINE_BOOL is not set
550 +CONFIG_MODIFY_LDT_SYSCALL=y
551 +CONFIG_HAVE_LIVEPATCH=y
552 +# end of Processor type and features
554 +CONFIG_ARCH_HAS_ADD_PAGES=y
555 +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
556 +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
557 +CONFIG_USE_PERCPU_NUMA_NODE_ID=y
558 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
559 +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y
560 +CONFIG_ARCH_ENABLE_THP_MIGRATION=y
563 +# Power management and ACPI options
565 +CONFIG_ARCH_HIBERNATION_HEADER=y
566 +CONFIG_SUSPEND=y
567 +CONFIG_SUSPEND_FREEZER=y
568 +# CONFIG_SUSPEND_SKIP_SYNC is not set
569 +CONFIG_HIBERNATE_CALLBACKS=y
570 +CONFIG_HIBERNATION=y
571 +CONFIG_HIBERNATION_SNAPSHOT_DEV=y
572 +CONFIG_PM_STD_PARTITION=""
573 +CONFIG_PM_SLEEP=y
574 +CONFIG_PM_SLEEP_SMP=y
575 +# CONFIG_PM_AUTOSLEEP is not set
576 +CONFIG_PM_WAKELOCKS=y
577 +CONFIG_PM_WAKELOCKS_LIMIT=100
578 +CONFIG_PM_WAKELOCKS_GC=y
579 +CONFIG_PM=y
580 +CONFIG_PM_DEBUG=y
581 +CONFIG_PM_ADVANCED_DEBUG=y
582 +# CONFIG_PM_TEST_SUSPEND is not set
583 +CONFIG_PM_SLEEP_DEBUG=y
584 +# CONFIG_DPM_WATCHDOG is not set
585 +CONFIG_PM_TRACE=y
586 +CONFIG_PM_TRACE_RTC=y
587 +CONFIG_PM_CLK=y
588 +CONFIG_PM_GENERIC_DOMAINS=y
589 +CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
590 +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y
591 +CONFIG_ENERGY_MODEL=y
592 +CONFIG_ARCH_SUPPORTS_ACPI=y
593 +CONFIG_ACPI=y
594 +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y
595 +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y
596 +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y
597 +CONFIG_ACPI_DEBUGGER=y
598 +CONFIG_ACPI_DEBUGGER_USER=y
599 +CONFIG_ACPI_SPCR_TABLE=y
600 +CONFIG_ACPI_FPDT=y
601 +CONFIG_ACPI_LPIT=y
602 +CONFIG_ACPI_SLEEP=y
603 +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y
604 +CONFIG_ACPI_EC_DEBUGFS=m
605 +CONFIG_ACPI_AC=y
606 +CONFIG_ACPI_BATTERY=y
607 +CONFIG_ACPI_BUTTON=y
608 +CONFIG_ACPI_VIDEO=m
609 +CONFIG_ACPI_FAN=y
610 +CONFIG_ACPI_TAD=m
611 +CONFIG_ACPI_DOCK=y
612 +CONFIG_ACPI_CPU_FREQ_PSS=y
613 +CONFIG_ACPI_PROCESSOR_CSTATE=y
614 +CONFIG_ACPI_PROCESSOR_IDLE=y
615 +CONFIG_ACPI_CPPC_LIB=y
616 +CONFIG_ACPI_PROCESSOR=y
617 +CONFIG_ACPI_IPMI=m
618 +CONFIG_ACPI_HOTPLUG_CPU=y
619 +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m
620 +CONFIG_ACPI_THERMAL=y
621 +CONFIG_ACPI_PLATFORM_PROFILE=m
622 +CONFIG_ACPI_CUSTOM_DSDT_FILE=""
623 +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y
624 +CONFIG_ACPI_TABLE_UPGRADE=y
625 +CONFIG_ACPI_DEBUG=y
626 +CONFIG_ACPI_PCI_SLOT=y
627 +CONFIG_ACPI_CONTAINER=y
628 +CONFIG_ACPI_HOTPLUG_MEMORY=y
629 +CONFIG_ACPI_HOTPLUG_IOAPIC=y
630 +CONFIG_ACPI_SBS=m
631 +CONFIG_ACPI_HED=y
632 +# CONFIG_ACPI_CUSTOM_METHOD is not set
633 +CONFIG_ACPI_BGRT=y
634 +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set
635 +CONFIG_ACPI_NFIT=m
636 +# CONFIG_NFIT_SECURITY_DEBUG is not set
637 +CONFIG_ACPI_NUMA=y
638 +CONFIG_ACPI_HMAT=y
639 +CONFIG_HAVE_ACPI_APEI=y
640 +CONFIG_HAVE_ACPI_APEI_NMI=y
641 +CONFIG_ACPI_APEI=y
642 +CONFIG_ACPI_APEI_GHES=y
643 +CONFIG_ACPI_APEI_PCIEAER=y
644 +CONFIG_ACPI_APEI_MEMORY_FAILURE=y
645 +CONFIG_ACPI_APEI_EINJ=m
646 +# CONFIG_ACPI_APEI_ERST_DEBUG is not set
647 +CONFIG_ACPI_DPTF=y
648 +CONFIG_DPTF_POWER=m
649 +CONFIG_DPTF_PCH_FIVR=m
650 +CONFIG_ACPI_WATCHDOG=y
651 +CONFIG_ACPI_EXTLOG=m
652 +CONFIG_ACPI_ADXL=y
653 +CONFIG_ACPI_CONFIGFS=m
654 +CONFIG_PMIC_OPREGION=y
655 +CONFIG_BYTCRC_PMIC_OPREGION=y
656 +CONFIG_CHTCRC_PMIC_OPREGION=y
657 +CONFIG_XPOWER_PMIC_OPREGION=y
658 +CONFIG_BXT_WC_PMIC_OPREGION=y
659 +CONFIG_CHT_WC_PMIC_OPREGION=y
660 +CONFIG_CHT_DC_TI_PMIC_OPREGION=y
661 +CONFIG_TPS68470_PMIC_OPREGION=y
662 +CONFIG_X86_PM_TIMER=y
665 +# CPU Frequency scaling
667 +CONFIG_CPU_FREQ=y
668 +CONFIG_CPU_FREQ_GOV_ATTR_SET=y
669 +CONFIG_CPU_FREQ_GOV_COMMON=y
670 +CONFIG_CPU_FREQ_STAT=y
671 +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
672 +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
673 +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
674 +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set
675 +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
676 +CONFIG_CPU_FREQ_GOV_POWERSAVE=y
677 +CONFIG_CPU_FREQ_GOV_USERSPACE=y
678 +CONFIG_CPU_FREQ_GOV_ONDEMAND=y
679 +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
680 +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
683 +# CPU frequency scaling drivers
685 +CONFIG_X86_INTEL_PSTATE=y
686 +CONFIG_X86_PCC_CPUFREQ=y
687 +CONFIG_X86_ACPI_CPUFREQ=y
688 +CONFIG_X86_ACPI_CPUFREQ_CPB=y
689 +CONFIG_X86_POWERNOW_K8=y
690 +CONFIG_X86_AMD_FREQ_SENSITIVITY=m
691 +CONFIG_X86_SPEEDSTEP_CENTRINO=y
692 +CONFIG_X86_P4_CLOCKMOD=m
695 +# shared options
697 +CONFIG_X86_SPEEDSTEP_LIB=m
698 +# end of CPU Frequency scaling
701 +# CPU Idle
703 +CONFIG_CPU_IDLE=y
704 +CONFIG_CPU_IDLE_GOV_LADDER=y
705 +CONFIG_CPU_IDLE_GOV_MENU=y
706 +CONFIG_CPU_IDLE_GOV_TEO=y
707 +CONFIG_CPU_IDLE_GOV_HALTPOLL=y
708 +CONFIG_HALTPOLL_CPUIDLE=m
709 +# end of CPU Idle
711 +CONFIG_INTEL_IDLE=y
712 +# end of Power management and ACPI options
715 +# Bus options (PCI etc.)
717 +CONFIG_PCI_DIRECT=y
718 +CONFIG_PCI_MMCONFIG=y
719 +CONFIG_PCI_XEN=y
720 +CONFIG_MMCONF_FAM10H=y
721 +# CONFIG_PCI_CNB20LE_QUIRK is not set
722 +CONFIG_ISA_BUS=y
723 +CONFIG_ISA_DMA_API=y
724 +CONFIG_AMD_NB=y
725 +# CONFIG_X86_SYSFB is not set
726 +# end of Bus options (PCI etc.)
729 +# Binary Emulations
731 +CONFIG_IA32_EMULATION=y
732 +CONFIG_X86_X32=y
733 +CONFIG_COMPAT_32=y
734 +CONFIG_COMPAT=y
735 +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y
736 +CONFIG_SYSVIPC_COMPAT=y
737 +# end of Binary Emulations
740 +# Firmware Drivers
742 +CONFIG_EDD=y
743 +CONFIG_EDD_OFF=y
744 +CONFIG_FIRMWARE_MEMMAP=y
745 +CONFIG_DMIID=y
746 +CONFIG_DMI_SYSFS=m
747 +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y
748 +CONFIG_ISCSI_IBFT_FIND=y
749 +CONFIG_ISCSI_IBFT=m
750 +CONFIG_FW_CFG_SYSFS=m
751 +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set
752 +# CONFIG_GOOGLE_FIRMWARE is not set
755 +# EFI (Extensible Firmware Interface) Support
757 +CONFIG_EFI_VARS=y
758 +CONFIG_EFI_ESRT=y
759 +CONFIG_EFI_VARS_PSTORE=m
760 +# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set
761 +CONFIG_EFI_RUNTIME_MAP=y
762 +# CONFIG_EFI_FAKE_MEMMAP is not set
763 +CONFIG_EFI_SOFT_RESERVE=y
764 +CONFIG_EFI_RUNTIME_WRAPPERS=y
765 +CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y
766 +CONFIG_EFI_BOOTLOADER_CONTROL=m
767 +CONFIG_EFI_CAPSULE_LOADER=m
768 +CONFIG_EFI_TEST=m
769 +CONFIG_APPLE_PROPERTIES=y
770 +CONFIG_RESET_ATTACK_MITIGATION=y
771 +CONFIG_EFI_RCI2_TABLE=y
772 +# CONFIG_EFI_DISABLE_PCI_DMA is not set
773 +# end of EFI (Extensible Firmware Interface) Support
775 +CONFIG_EFI_EMBEDDED_FIRMWARE=y
776 +CONFIG_UEFI_CPER=y
777 +CONFIG_UEFI_CPER_X86=y
778 +CONFIG_EFI_DEV_PATH_PARSER=y
779 +CONFIG_EFI_EARLYCON=y
780 +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y
783 +# Tegra firmware driver
785 +# end of Tegra firmware driver
786 +# end of Firmware Drivers
788 +CONFIG_HAVE_KVM=y
789 +CONFIG_HAVE_KVM_IRQCHIP=y
790 +CONFIG_HAVE_KVM_IRQFD=y
791 +CONFIG_HAVE_KVM_IRQ_ROUTING=y
792 +CONFIG_HAVE_KVM_EVENTFD=y
793 +CONFIG_KVM_MMIO=y
794 +CONFIG_KVM_ASYNC_PF=y
795 +CONFIG_HAVE_KVM_MSI=y
796 +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y
797 +CONFIG_KVM_VFIO=y
798 +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y
799 +CONFIG_KVM_COMPAT=y
800 +CONFIG_HAVE_KVM_IRQ_BYPASS=y
801 +CONFIG_HAVE_KVM_NO_POLL=y
802 +CONFIG_KVM_XFER_TO_GUEST_WORK=y
803 +CONFIG_VIRTUALIZATION=y
804 +CONFIG_KVM=m
805 +CONFIG_KVM_WERROR=y
806 +CONFIG_KVM_INTEL=m
807 +CONFIG_KVM_AMD=m
808 +CONFIG_KVM_AMD_SEV=y
809 +CONFIG_KVM_XEN=y
810 +CONFIG_AS_AVX512=y
811 +CONFIG_AS_SHA1_NI=y
812 +CONFIG_AS_SHA256_NI=y
813 +CONFIG_AS_TPAUSE=y
816 +# General architecture-dependent options
818 +CONFIG_CRASH_CORE=y
819 +CONFIG_KEXEC_CORE=y
820 +CONFIG_HOTPLUG_SMT=y
821 +CONFIG_GENERIC_ENTRY=y
822 +CONFIG_KPROBES=y
823 +CONFIG_JUMP_LABEL=y
824 +# CONFIG_STATIC_KEYS_SELFTEST is not set
825 +# CONFIG_STATIC_CALL_SELFTEST is not set
826 +CONFIG_OPTPROBES=y
827 +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
828 +CONFIG_ARCH_USE_BUILTIN_BSWAP=y
829 +CONFIG_KRETPROBES=y
830 +CONFIG_USER_RETURN_NOTIFIER=y
831 +CONFIG_HAVE_IOREMAP_PROT=y
832 +CONFIG_HAVE_KPROBES=y
833 +CONFIG_HAVE_KRETPROBES=y
834 +CONFIG_HAVE_OPTPROBES=y
835 +CONFIG_HAVE_KPROBES_ON_FTRACE=y
836 +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y
837 +CONFIG_HAVE_NMI=y
838 +CONFIG_HAVE_ARCH_TRACEHOOK=y
839 +CONFIG_HAVE_DMA_CONTIGUOUS=y
840 +CONFIG_GENERIC_SMP_IDLE_THREAD=y
841 +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y
842 +CONFIG_ARCH_HAS_SET_MEMORY=y
843 +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y
844 +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y
845 +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y
846 +CONFIG_HAVE_ASM_MODVERSIONS=y
847 +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
848 +CONFIG_HAVE_RSEQ=y
849 +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y
850 +CONFIG_HAVE_HW_BREAKPOINT=y
851 +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
852 +CONFIG_HAVE_USER_RETURN_NOTIFIER=y
853 +CONFIG_HAVE_PERF_EVENTS_NMI=y
854 +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y
855 +CONFIG_HAVE_PERF_REGS=y
856 +CONFIG_HAVE_PERF_USER_STACK_DUMP=y
857 +CONFIG_HAVE_ARCH_JUMP_LABEL=y
858 +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y
859 +CONFIG_MMU_GATHER_TABLE_FREE=y
860 +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y
861 +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
862 +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
863 +CONFIG_HAVE_CMPXCHG_LOCAL=y
864 +CONFIG_HAVE_CMPXCHG_DOUBLE=y
865 +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
866 +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y
867 +CONFIG_HAVE_ARCH_SECCOMP=y
868 +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
869 +CONFIG_SECCOMP=y
870 +CONFIG_SECCOMP_FILTER=y
871 +# CONFIG_SECCOMP_CACHE_DEBUG is not set
872 +CONFIG_HAVE_ARCH_STACKLEAK=y
873 +CONFIG_HAVE_STACKPROTECTOR=y
874 +CONFIG_STACKPROTECTOR=y
875 +CONFIG_STACKPROTECTOR_STRONG=y
876 +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y
877 +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y
878 +CONFIG_LTO_NONE=y
879 +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y
880 +CONFIG_HAVE_CONTEXT_TRACKING=y
881 +CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK=y
882 +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
883 +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
884 +CONFIG_HAVE_MOVE_PUD=y
885 +CONFIG_HAVE_MOVE_PMD=y
886 +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
887 +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y
888 +CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG=y
889 +CONFIG_HAVE_ARCH_HUGE_VMAP=y
890 +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
891 +CONFIG_HAVE_ARCH_SOFT_DIRTY=y
892 +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
893 +CONFIG_MODULES_USE_ELF_RELA=y
894 +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y
895 +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y
896 +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
897 +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y
898 +CONFIG_HAVE_EXIT_THREAD=y
899 +CONFIG_ARCH_MMAP_RND_BITS=28
900 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y
901 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8
902 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y
903 +CONFIG_HAVE_STACK_VALIDATION=y
904 +CONFIG_HAVE_RELIABLE_STACKTRACE=y
905 +CONFIG_ISA_BUS_API=y
906 +CONFIG_OLD_SIGSUSPEND3=y
907 +CONFIG_COMPAT_OLD_SIGACTION=y
908 +CONFIG_COMPAT_32BIT_TIME=y
909 +CONFIG_HAVE_ARCH_VMAP_STACK=y
910 +CONFIG_VMAP_STACK=y
911 +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
912 +CONFIG_STRICT_KERNEL_RWX=y
913 +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
914 +CONFIG_STRICT_MODULE_RWX=y
915 +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y
916 +CONFIG_ARCH_USE_MEMREMAP_PROT=y
917 +# CONFIG_LOCK_EVENT_COUNTS is not set
918 +CONFIG_ARCH_HAS_MEM_ENCRYPT=y
919 +CONFIG_HAVE_STATIC_CALL=y
920 +CONFIG_HAVE_STATIC_CALL_INLINE=y
921 +CONFIG_HAVE_PREEMPT_DYNAMIC=y
922 +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y
923 +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
924 +CONFIG_ARCH_HAS_ELFCORE_COMPAT=y
927 +# GCOV-based kernel profiling
929 +# CONFIG_GCOV_KERNEL is not set
930 +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
931 +# end of GCOV-based kernel profiling
933 +CONFIG_HAVE_GCC_PLUGINS=y
934 +# end of General architecture-dependent options
936 +CONFIG_RT_MUTEXES=y
937 +CONFIG_BASE_SMALL=0
938 +CONFIG_MODULE_SIG_FORMAT=y
939 +CONFIG_MODULES=y
940 +# CONFIG_MODULE_FORCE_LOAD is not set
941 +CONFIG_MODULE_UNLOAD=y
942 +# CONFIG_MODULE_FORCE_UNLOAD is not set
943 +CONFIG_MODVERSIONS=y
944 +CONFIG_ASM_MODVERSIONS=y
945 +CONFIG_MODULE_SRCVERSION_ALL=y
946 +CONFIG_MODULE_SIG=y
947 +# CONFIG_MODULE_SIG_FORCE is not set
948 +CONFIG_MODULE_SIG_ALL=y
949 +# CONFIG_MODULE_SIG_SHA1 is not set
950 +# CONFIG_MODULE_SIG_SHA224 is not set
951 +# CONFIG_MODULE_SIG_SHA256 is not set
952 +# CONFIG_MODULE_SIG_SHA384 is not set
953 +CONFIG_MODULE_SIG_SHA512=y
954 +CONFIG_MODULE_SIG_HASH="sha512"
955 +# CONFIG_MODULE_COMPRESS is not set
956 +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set
957 +# CONFIG_TRIM_UNUSED_KSYMS is not set
958 +CONFIG_MODULES_TREE_LOOKUP=y
959 +CONFIG_BLOCK=y
960 +CONFIG_BLK_SCSI_REQUEST=y
961 +CONFIG_BLK_CGROUP_RWSTAT=y
962 +CONFIG_BLK_DEV_BSG=y
963 +CONFIG_BLK_DEV_BSGLIB=y
964 +CONFIG_BLK_DEV_INTEGRITY=y
965 +CONFIG_BLK_DEV_INTEGRITY_T10=y
966 +CONFIG_BLK_DEV_ZONED=y
967 +CONFIG_BLK_DEV_THROTTLING=y
968 +# CONFIG_BLK_DEV_THROTTLING_LOW is not set
969 +CONFIG_BLK_CMDLINE_PARSER=y
970 +CONFIG_BLK_WBT=y
971 +CONFIG_BLK_CGROUP_IOLATENCY=y
972 +# CONFIG_BLK_CGROUP_IOCOST is not set
973 +CONFIG_BLK_WBT_MQ=y
974 +CONFIG_BLK_DEBUG_FS=y
975 +CONFIG_BLK_DEBUG_FS_ZONED=y
976 +CONFIG_BLK_SED_OPAL=y
977 +CONFIG_BLK_INLINE_ENCRYPTION=y
978 +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
981 +# Partition Types
983 +CONFIG_PARTITION_ADVANCED=y
984 +# CONFIG_ACORN_PARTITION is not set
985 +CONFIG_AIX_PARTITION=y
986 +CONFIG_OSF_PARTITION=y
987 +CONFIG_AMIGA_PARTITION=y
988 +CONFIG_ATARI_PARTITION=y
989 +CONFIG_MAC_PARTITION=y
990 +CONFIG_MSDOS_PARTITION=y
991 +CONFIG_BSD_DISKLABEL=y
992 +CONFIG_MINIX_SUBPARTITION=y
993 +CONFIG_SOLARIS_X86_PARTITION=y
994 +CONFIG_UNIXWARE_DISKLABEL=y
995 +CONFIG_LDM_PARTITION=y
996 +# CONFIG_LDM_DEBUG is not set
997 +CONFIG_SGI_PARTITION=y
998 +CONFIG_ULTRIX_PARTITION=y
999 +CONFIG_SUN_PARTITION=y
1000 +CONFIG_KARMA_PARTITION=y
1001 +CONFIG_EFI_PARTITION=y
1002 +CONFIG_SYSV68_PARTITION=y
1003 +CONFIG_CMDLINE_PARTITION=y
1004 +# end of Partition Types
1006 +CONFIG_BLOCK_COMPAT=y
1007 +CONFIG_BLK_MQ_PCI=y
1008 +CONFIG_BLK_MQ_VIRTIO=y
1009 +CONFIG_BLK_MQ_RDMA=y
1010 +CONFIG_BLK_PM=y
1013 +# IO Schedulers
1015 +CONFIG_MQ_IOSCHED_DEADLINE=m
1016 +CONFIG_MQ_IOSCHED_KYBER=m
1017 +CONFIG_IOSCHED_BFQ=y
1018 +CONFIG_BFQ_GROUP_IOSCHED=y
1019 +# CONFIG_BFQ_CGROUP_DEBUG is not set
1020 +# end of IO Schedulers
1022 +CONFIG_PREEMPT_NOTIFIERS=y
1023 +CONFIG_PADATA=y
1024 +CONFIG_ASN1=y
1025 +CONFIG_UNINLINE_SPIN_UNLOCK=y
1026 +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
1027 +CONFIG_MUTEX_SPIN_ON_OWNER=y
1028 +CONFIG_RWSEM_SPIN_ON_OWNER=y
1029 +CONFIG_LOCK_SPIN_ON_OWNER=y
1030 +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
1031 +CONFIG_QUEUED_SPINLOCKS=y
1032 +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
1033 +CONFIG_QUEUED_RWLOCKS=y
1034 +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y
1035 +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y
1036 +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y
1037 +CONFIG_FREEZER=y
1040 +# Executable file formats
1042 +CONFIG_BINFMT_ELF=y
1043 +CONFIG_COMPAT_BINFMT_ELF=y
1044 +CONFIG_ELFCORE=y
1045 +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
1046 +CONFIG_BINFMT_SCRIPT=y
1047 +CONFIG_BINFMT_MISC=m
1048 +CONFIG_COREDUMP=y
1049 +# end of Executable file formats
1052 +# Memory Management options
1054 +CONFIG_SELECT_MEMORY_MODEL=y
1055 +CONFIG_SPARSEMEM_MANUAL=y
1056 +CONFIG_SPARSEMEM=y
1057 +CONFIG_NEED_MULTIPLE_NODES=y
1058 +CONFIG_SPARSEMEM_EXTREME=y
1059 +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
1060 +CONFIG_SPARSEMEM_VMEMMAP=y
1061 +CONFIG_CLEAN_LOW_KBYTES=524288
1062 +CONFIG_CLEAN_MIN_KBYTES=0
1063 +CONFIG_HAVE_FAST_GUP=y
1064 +CONFIG_NUMA_KEEP_MEMINFO=y
1065 +CONFIG_MEMORY_ISOLATION=y
1066 +CONFIG_HAVE_BOOTMEM_INFO_NODE=y
1067 +CONFIG_MEMORY_HOTPLUG=y
1068 +CONFIG_MEMORY_HOTPLUG_SPARSE=y
1069 +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
1070 +CONFIG_MEMORY_HOTREMOVE=y
1071 +CONFIG_SPLIT_PTLOCK_CPUS=4
1072 +CONFIG_MEMORY_BALLOON=y
1073 +CONFIG_BALLOON_COMPACTION=y
1074 +CONFIG_COMPACTION=y
1075 +CONFIG_PAGE_REPORTING=y
1076 +CONFIG_MIGRATION=y
1077 +CONFIG_CONTIG_ALLOC=y
1078 +CONFIG_PHYS_ADDR_T_64BIT=y
1079 +CONFIG_BOUNCE=y
1080 +CONFIG_VIRT_TO_BUS=y
1081 +CONFIG_MMU_NOTIFIER=y
1082 +CONFIG_KSM=y
1083 +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
1084 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
1085 +CONFIG_MEMORY_FAILURE=y
1086 +CONFIG_HWPOISON_INJECT=m
1087 +CONFIG_TRANSPARENT_HUGEPAGE=y
1088 +# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set
1089 +CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
1090 +CONFIG_ARCH_WANTS_THP_SWAP=y
1091 +CONFIG_THP_SWAP=y
1092 +CONFIG_CLEANCACHE=y
1093 +CONFIG_FRONTSWAP=y
1094 +# CONFIG_CMA is not set
1095 +CONFIG_MEM_SOFT_DIRTY=y
1096 +CONFIG_ZSWAP=y
1097 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set
1098 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO is not set
1099 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set
1100 +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4=y
1101 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set
1102 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set
1103 +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lz4"
1104 +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD is not set
1105 +CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD=y
1106 +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set
1107 +CONFIG_ZSWAP_ZPOOL_DEFAULT="z3fold"
1108 +# CONFIG_ZSWAP_DEFAULT_ON is not set
1109 +CONFIG_ZPOOL=y
1110 +CONFIG_ZBUD=m
1111 +CONFIG_Z3FOLD=y
1112 +CONFIG_ZSMALLOC=m
1113 +# CONFIG_ZSMALLOC_STAT is not set
1114 +CONFIG_GENERIC_EARLY_IOREMAP=y
1115 +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set
1116 +CONFIG_IDLE_PAGE_TRACKING=y
1117 +CONFIG_ARCH_HAS_PTE_DEVMAP=y
1118 +CONFIG_ZONE_DEVICE=y
1119 +CONFIG_DEV_PAGEMAP_OPS=y
1120 +CONFIG_HMM_MIRROR=y
1121 +CONFIG_DEVICE_PRIVATE=y
1122 +CONFIG_VMAP_PFN=y
1123 +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y
1124 +CONFIG_ARCH_HAS_PKEYS=y
1125 +# CONFIG_PERCPU_STATS is not set
1126 +# CONFIG_GUP_TEST is not set
1127 +# CONFIG_READ_ONLY_THP_FOR_FS is not set
1128 +CONFIG_ARCH_HAS_PTE_SPECIAL=y
1129 +CONFIG_MAPPING_DIRTY_HELPERS=y
1130 +CONFIG_LRU_GEN=y
1131 +CONFIG_NR_LRU_GENS=4
1132 +CONFIG_TIERS_PER_GEN=2
1133 +# CONFIG_LRU_GEN_ENABLED is not set
1134 +# CONFIG_LRU_GEN_STATS is not set
1135 +# end of Memory Management options
1137 +CONFIG_NET=y
1138 +CONFIG_WANT_COMPAT_NETLINK_MESSAGES=y
1139 +CONFIG_COMPAT_NETLINK_MESSAGES=y
1140 +CONFIG_NET_INGRESS=y
1141 +CONFIG_NET_EGRESS=y
1142 +CONFIG_NET_REDIRECT=y
1143 +CONFIG_SKB_EXTENSIONS=y
1146 +# Networking options
1148 +CONFIG_PACKET=y
1149 +CONFIG_PACKET_DIAG=m
1150 +CONFIG_UNIX=y
1151 +CONFIG_UNIX_SCM=y
1152 +CONFIG_UNIX_DIAG=m
1153 +CONFIG_TLS=m
1154 +CONFIG_TLS_DEVICE=y
1155 +# CONFIG_TLS_TOE is not set
1156 +CONFIG_XFRM=y
1157 +CONFIG_XFRM_OFFLOAD=y
1158 +CONFIG_XFRM_ALGO=m
1159 +CONFIG_XFRM_USER=m
1160 +CONFIG_XFRM_USER_COMPAT=m
1161 +CONFIG_XFRM_INTERFACE=m
1162 +# CONFIG_XFRM_SUB_POLICY is not set
1163 +# CONFIG_XFRM_MIGRATE is not set
1164 +CONFIG_XFRM_STATISTICS=y
1165 +CONFIG_XFRM_AH=m
1166 +CONFIG_XFRM_ESP=m
1167 +CONFIG_XFRM_IPCOMP=m
1168 +CONFIG_NET_KEY=m
1169 +# CONFIG_NET_KEY_MIGRATE is not set
1170 +CONFIG_XFRM_ESPINTCP=y
1171 +CONFIG_SMC=m
1172 +CONFIG_SMC_DIAG=m
1173 +CONFIG_XDP_SOCKETS=y
1174 +CONFIG_XDP_SOCKETS_DIAG=m
1175 +CONFIG_INET=y
1176 +CONFIG_IP_MULTICAST=y
1177 +CONFIG_IP_ADVANCED_ROUTER=y
1178 +CONFIG_IP_FIB_TRIE_STATS=y
1179 +CONFIG_IP_MULTIPLE_TABLES=y
1180 +CONFIG_IP_ROUTE_MULTIPATH=y
1181 +CONFIG_IP_ROUTE_VERBOSE=y
1182 +CONFIG_IP_ROUTE_CLASSID=y
1183 +# CONFIG_IP_PNP is not set
1184 +CONFIG_NET_IPIP=m
1185 +CONFIG_NET_IPGRE_DEMUX=m
1186 +CONFIG_NET_IP_TUNNEL=m
1187 +CONFIG_NET_IPGRE=m
1188 +CONFIG_NET_IPGRE_BROADCAST=y
1189 +CONFIG_IP_MROUTE_COMMON=y
1190 +CONFIG_IP_MROUTE=y
1191 +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
1192 +CONFIG_IP_PIMSM_V1=y
1193 +CONFIG_IP_PIMSM_V2=y
1194 +CONFIG_SYN_COOKIES=y
1195 +CONFIG_NET_IPVTI=m
1196 +CONFIG_NET_UDP_TUNNEL=m
1197 +CONFIG_NET_FOU=m
1198 +CONFIG_NET_FOU_IP_TUNNELS=y
1199 +CONFIG_INET_AH=m
1200 +CONFIG_INET_ESP=m
1201 +CONFIG_INET_ESP_OFFLOAD=m
1202 +CONFIG_INET_ESPINTCP=y
1203 +CONFIG_INET_IPCOMP=m
1204 +CONFIG_INET_XFRM_TUNNEL=m
1205 +CONFIG_INET_TUNNEL=m
1206 +CONFIG_INET_DIAG=m
1207 +CONFIG_INET_TCP_DIAG=m
1208 +CONFIG_INET_UDP_DIAG=m
1209 +CONFIG_INET_RAW_DIAG=m
1210 +CONFIG_INET_DIAG_DESTROY=y
1211 +CONFIG_TCP_CONG_ADVANCED=y
1212 +CONFIG_TCP_CONG_BIC=m
1213 +CONFIG_TCP_CONG_CUBIC=m
1214 +CONFIG_TCP_CONG_WESTWOOD=m
1215 +CONFIG_TCP_CONG_HTCP=m
1216 +CONFIG_TCP_CONG_HSTCP=m
1217 +CONFIG_TCP_CONG_HYBLA=m
1218 +CONFIG_TCP_CONG_VEGAS=m
1219 +CONFIG_TCP_CONG_NV=m
1220 +CONFIG_TCP_CONG_SCALABLE=m
1221 +CONFIG_TCP_CONG_LP=m
1222 +CONFIG_TCP_CONG_VENO=m
1223 +CONFIG_TCP_CONG_YEAH=m
1224 +CONFIG_TCP_CONG_ILLINOIS=m
1225 +CONFIG_TCP_CONG_DCTCP=m
1226 +CONFIG_TCP_CONG_CDG=m
1227 +CONFIG_TCP_CONG_BBR=m
1228 +CONFIG_TCP_CONG_BBR2=y
1229 +CONFIG_DEFAULT_BBR2=y
1230 +# CONFIG_DEFAULT_RENO is not set
1231 +CONFIG_DEFAULT_TCP_CONG="bbr2"
1232 +CONFIG_TCP_MD5SIG=y
1233 +CONFIG_IPV6=y
1234 +CONFIG_IPV6_ROUTER_PREF=y
1235 +CONFIG_IPV6_ROUTE_INFO=y
1236 +# CONFIG_IPV6_OPTIMISTIC_DAD is not set
1237 +CONFIG_INET6_AH=m
1238 +CONFIG_INET6_ESP=m
1239 +CONFIG_INET6_ESP_OFFLOAD=m
1240 +CONFIG_INET6_ESPINTCP=y
1241 +CONFIG_INET6_IPCOMP=m
1242 +CONFIG_IPV6_MIP6=m
1243 +CONFIG_IPV6_ILA=m
1244 +CONFIG_INET6_XFRM_TUNNEL=m
1245 +CONFIG_INET6_TUNNEL=m
1246 +CONFIG_IPV6_VTI=m
1247 +CONFIG_IPV6_SIT=m
1248 +CONFIG_IPV6_SIT_6RD=y
1249 +CONFIG_IPV6_NDISC_NODETYPE=y
1250 +CONFIG_IPV6_TUNNEL=m
1251 +CONFIG_IPV6_GRE=m
1252 +CONFIG_IPV6_FOU=m
1253 +CONFIG_IPV6_FOU_TUNNEL=m
1254 +CONFIG_IPV6_MULTIPLE_TABLES=y
1255 +CONFIG_IPV6_SUBTREES=y
1256 +CONFIG_IPV6_MROUTE=y
1257 +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
1258 +CONFIG_IPV6_PIMSM_V2=y
1259 +CONFIG_IPV6_SEG6_LWTUNNEL=y
1260 +CONFIG_IPV6_SEG6_HMAC=y
1261 +CONFIG_IPV6_SEG6_BPF=y
1262 +# CONFIG_IPV6_RPL_LWTUNNEL is not set
1263 +CONFIG_NETLABEL=y
1264 +CONFIG_MPTCP=y
1265 +CONFIG_INET_MPTCP_DIAG=m
1266 +CONFIG_MPTCP_IPV6=y
1267 +CONFIG_NETWORK_SECMARK=y
1268 +CONFIG_NET_PTP_CLASSIFY=y
1269 +CONFIG_NETWORK_PHY_TIMESTAMPING=y
1270 +CONFIG_NETFILTER=y
1271 +CONFIG_NETFILTER_ADVANCED=y
1272 +CONFIG_BRIDGE_NETFILTER=m
1275 +# Core Netfilter Configuration
1277 +CONFIG_NETFILTER_INGRESS=y
1278 +CONFIG_NETFILTER_NETLINK=m
1279 +CONFIG_NETFILTER_FAMILY_BRIDGE=y
1280 +CONFIG_NETFILTER_FAMILY_ARP=y
1281 +CONFIG_NETFILTER_NETLINK_ACCT=m
1282 +CONFIG_NETFILTER_NETLINK_QUEUE=m
1283 +CONFIG_NETFILTER_NETLINK_LOG=m
1284 +CONFIG_NETFILTER_NETLINK_OSF=m
1285 +CONFIG_NF_CONNTRACK=m
1286 +CONFIG_NF_LOG_COMMON=m
1287 +CONFIG_NF_LOG_NETDEV=m
1288 +CONFIG_NETFILTER_CONNCOUNT=m
1289 +CONFIG_NF_CONNTRACK_MARK=y
1290 +CONFIG_NF_CONNTRACK_SECMARK=y
1291 +CONFIG_NF_CONNTRACK_ZONES=y
1292 +# CONFIG_NF_CONNTRACK_PROCFS is not set
1293 +CONFIG_NF_CONNTRACK_EVENTS=y
1294 +CONFIG_NF_CONNTRACK_TIMEOUT=y
1295 +CONFIG_NF_CONNTRACK_TIMESTAMP=y
1296 +CONFIG_NF_CONNTRACK_LABELS=y
1297 +CONFIG_NF_CT_PROTO_DCCP=y
1298 +CONFIG_NF_CT_PROTO_GRE=y
1299 +CONFIG_NF_CT_PROTO_SCTP=y
1300 +CONFIG_NF_CT_PROTO_UDPLITE=y
1301 +CONFIG_NF_CONNTRACK_AMANDA=m
1302 +CONFIG_NF_CONNTRACK_FTP=m
1303 +CONFIG_NF_CONNTRACK_H323=m
1304 +CONFIG_NF_CONNTRACK_IRC=m
1305 +CONFIG_NF_CONNTRACK_BROADCAST=m
1306 +CONFIG_NF_CONNTRACK_NETBIOS_NS=m
1307 +CONFIG_NF_CONNTRACK_SNMP=m
1308 +CONFIG_NF_CONNTRACK_PPTP=m
1309 +CONFIG_NF_CONNTRACK_SANE=m
1310 +CONFIG_NF_CONNTRACK_SIP=m
1311 +CONFIG_NF_CONNTRACK_TFTP=m
1312 +CONFIG_NF_CT_NETLINK=m
1313 +CONFIG_NF_CT_NETLINK_TIMEOUT=m
1314 +CONFIG_NF_CT_NETLINK_HELPER=m
1315 +CONFIG_NETFILTER_NETLINK_GLUE_CT=y
1316 +CONFIG_NF_NAT=m
1317 +CONFIG_NF_NAT_AMANDA=m
1318 +CONFIG_NF_NAT_FTP=m
1319 +CONFIG_NF_NAT_IRC=m
1320 +CONFIG_NF_NAT_SIP=m
1321 +CONFIG_NF_NAT_TFTP=m
1322 +CONFIG_NF_NAT_REDIRECT=y
1323 +CONFIG_NF_NAT_MASQUERADE=y
1324 +CONFIG_NETFILTER_SYNPROXY=m
1325 +CONFIG_NF_TABLES=m
1326 +CONFIG_NF_TABLES_INET=y
1327 +CONFIG_NF_TABLES_NETDEV=y
1328 +CONFIG_NFT_NUMGEN=m
1329 +CONFIG_NFT_CT=m
1330 +CONFIG_NFT_FLOW_OFFLOAD=m
1331 +CONFIG_NFT_COUNTER=m
1332 +CONFIG_NFT_CONNLIMIT=m
1333 +CONFIG_NFT_LOG=m
1334 +CONFIG_NFT_LIMIT=m
1335 +CONFIG_NFT_MASQ=m
1336 +CONFIG_NFT_REDIR=m
1337 +CONFIG_NFT_NAT=m
1338 +CONFIG_NFT_TUNNEL=m
1339 +CONFIG_NFT_OBJREF=m
1340 +CONFIG_NFT_QUEUE=m
1341 +CONFIG_NFT_QUOTA=m
1342 +CONFIG_NFT_REJECT=m
1343 +CONFIG_NFT_REJECT_INET=m
1344 +CONFIG_NFT_COMPAT=m
1345 +CONFIG_NFT_HASH=m
1346 +CONFIG_NFT_FIB=m
1347 +CONFIG_NFT_FIB_INET=m
1348 +CONFIG_NFT_XFRM=m
1349 +CONFIG_NFT_SOCKET=m
1350 +CONFIG_NFT_OSF=m
1351 +CONFIG_NFT_TPROXY=m
1352 +CONFIG_NFT_SYNPROXY=m
1353 +CONFIG_NF_DUP_NETDEV=m
1354 +CONFIG_NFT_DUP_NETDEV=m
1355 +CONFIG_NFT_FWD_NETDEV=m
1356 +CONFIG_NFT_FIB_NETDEV=m
1357 +CONFIG_NFT_REJECT_NETDEV=m
1358 +CONFIG_NF_FLOW_TABLE_INET=m
1359 +CONFIG_NF_FLOW_TABLE=m
1360 +CONFIG_NETFILTER_XTABLES=m
1363 +# Xtables combined modules
1365 +CONFIG_NETFILTER_XT_MARK=m
1366 +CONFIG_NETFILTER_XT_CONNMARK=m
1367 +CONFIG_NETFILTER_XT_SET=m
1370 +# Xtables targets
1372 +CONFIG_NETFILTER_XT_TARGET_AUDIT=m
1373 +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
1374 +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
1375 +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
1376 +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
1377 +CONFIG_NETFILTER_XT_TARGET_CT=m
1378 +CONFIG_NETFILTER_XT_TARGET_DSCP=m
1379 +CONFIG_NETFILTER_XT_TARGET_HL=m
1380 +CONFIG_NETFILTER_XT_TARGET_HMARK=m
1381 +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
1382 +CONFIG_NETFILTER_XT_TARGET_LED=m
1383 +CONFIG_NETFILTER_XT_TARGET_LOG=m
1384 +CONFIG_NETFILTER_XT_TARGET_MARK=m
1385 +CONFIG_NETFILTER_XT_NAT=m
1386 +CONFIG_NETFILTER_XT_TARGET_NETMAP=m
1387 +CONFIG_NETFILTER_XT_TARGET_NFLOG=m
1388 +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
1389 +# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
1390 +CONFIG_NETFILTER_XT_TARGET_RATEEST=m
1391 +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
1392 +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m
1393 +CONFIG_NETFILTER_XT_TARGET_TEE=m
1394 +CONFIG_NETFILTER_XT_TARGET_TPROXY=m
1395 +CONFIG_NETFILTER_XT_TARGET_TRACE=m
1396 +CONFIG_NETFILTER_XT_TARGET_SECMARK=m
1397 +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
1398 +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
1401 +# Xtables matches
1403 +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
1404 +CONFIG_NETFILTER_XT_MATCH_BPF=m
1405 +CONFIG_NETFILTER_XT_MATCH_CGROUP=m
1406 +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
1407 +CONFIG_NETFILTER_XT_MATCH_COMMENT=m
1408 +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
1409 +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
1410 +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
1411 +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
1412 +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
1413 +CONFIG_NETFILTER_XT_MATCH_CPU=m
1414 +CONFIG_NETFILTER_XT_MATCH_DCCP=m
1415 +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
1416 +CONFIG_NETFILTER_XT_MATCH_DSCP=m
1417 +CONFIG_NETFILTER_XT_MATCH_ECN=m
1418 +CONFIG_NETFILTER_XT_MATCH_ESP=m
1419 +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
1420 +CONFIG_NETFILTER_XT_MATCH_HELPER=m
1421 +CONFIG_NETFILTER_XT_MATCH_HL=m
1422 +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
1423 +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
1424 +CONFIG_NETFILTER_XT_MATCH_IPVS=m
1425 +CONFIG_NETFILTER_XT_MATCH_L2TP=m
1426 +CONFIG_NETFILTER_XT_MATCH_LENGTH=m
1427 +CONFIG_NETFILTER_XT_MATCH_LIMIT=m
1428 +CONFIG_NETFILTER_XT_MATCH_MAC=m
1429 +CONFIG_NETFILTER_XT_MATCH_MARK=m
1430 +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
1431 +CONFIG_NETFILTER_XT_MATCH_NFACCT=m
1432 +CONFIG_NETFILTER_XT_MATCH_OSF=m
1433 +CONFIG_NETFILTER_XT_MATCH_OWNER=m
1434 +CONFIG_NETFILTER_XT_MATCH_POLICY=m
1435 +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
1436 +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
1437 +CONFIG_NETFILTER_XT_MATCH_QUOTA=m
1438 +CONFIG_NETFILTER_XT_MATCH_RATEEST=m
1439 +CONFIG_NETFILTER_XT_MATCH_REALM=m
1440 +CONFIG_NETFILTER_XT_MATCH_RECENT=m
1441 +CONFIG_NETFILTER_XT_MATCH_SCTP=m
1442 +CONFIG_NETFILTER_XT_MATCH_SOCKET=m
1443 +CONFIG_NETFILTER_XT_MATCH_STATE=m
1444 +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
1445 +CONFIG_NETFILTER_XT_MATCH_STRING=m
1446 +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
1447 +CONFIG_NETFILTER_XT_MATCH_TIME=m
1448 +CONFIG_NETFILTER_XT_MATCH_U32=m
1449 +# end of Core Netfilter Configuration
1451 +CONFIG_IP_SET=m
1452 +CONFIG_IP_SET_MAX=256
1453 +CONFIG_IP_SET_BITMAP_IP=m
1454 +CONFIG_IP_SET_BITMAP_IPMAC=m
1455 +CONFIG_IP_SET_BITMAP_PORT=m
1456 +CONFIG_IP_SET_HASH_IP=m
1457 +CONFIG_IP_SET_HASH_IPMARK=m
1458 +CONFIG_IP_SET_HASH_IPPORT=m
1459 +CONFIG_IP_SET_HASH_IPPORTIP=m
1460 +CONFIG_IP_SET_HASH_IPPORTNET=m
1461 +CONFIG_IP_SET_HASH_IPMAC=m
1462 +CONFIG_IP_SET_HASH_MAC=m
1463 +CONFIG_IP_SET_HASH_NETPORTNET=m
1464 +CONFIG_IP_SET_HASH_NET=m
1465 +CONFIG_IP_SET_HASH_NETNET=m
1466 +CONFIG_IP_SET_HASH_NETPORT=m
1467 +CONFIG_IP_SET_HASH_NETIFACE=m
1468 +CONFIG_IP_SET_LIST_SET=m
1469 +CONFIG_IP_VS=m
1470 +CONFIG_IP_VS_IPV6=y
1471 +# CONFIG_IP_VS_DEBUG is not set
1472 +CONFIG_IP_VS_TAB_BITS=12
1475 +# IPVS transport protocol load balancing support
1477 +CONFIG_IP_VS_PROTO_TCP=y
1478 +CONFIG_IP_VS_PROTO_UDP=y
1479 +CONFIG_IP_VS_PROTO_AH_ESP=y
1480 +CONFIG_IP_VS_PROTO_ESP=y
1481 +CONFIG_IP_VS_PROTO_AH=y
1482 +CONFIG_IP_VS_PROTO_SCTP=y
1485 +# IPVS scheduler
1487 +CONFIG_IP_VS_RR=m
1488 +CONFIG_IP_VS_WRR=m
1489 +CONFIG_IP_VS_LC=m
1490 +CONFIG_IP_VS_WLC=m
1491 +CONFIG_IP_VS_FO=m
1492 +CONFIG_IP_VS_OVF=m
1493 +CONFIG_IP_VS_LBLC=m
1494 +CONFIG_IP_VS_LBLCR=m
1495 +CONFIG_IP_VS_DH=m
1496 +CONFIG_IP_VS_SH=m
1497 +CONFIG_IP_VS_MH=m
1498 +CONFIG_IP_VS_SED=m
1499 +CONFIG_IP_VS_NQ=m
1500 +CONFIG_IP_VS_TWOS=m
1503 +# IPVS SH scheduler
1505 +CONFIG_IP_VS_SH_TAB_BITS=8
1508 +# IPVS MH scheduler
1510 +CONFIG_IP_VS_MH_TAB_INDEX=12
1513 +# IPVS application helper
1515 +CONFIG_IP_VS_FTP=m
1516 +CONFIG_IP_VS_NFCT=y
1517 +CONFIG_IP_VS_PE_SIP=m
1520 +# IP: Netfilter Configuration
1522 +CONFIG_NF_DEFRAG_IPV4=m
1523 +CONFIG_NF_SOCKET_IPV4=m
1524 +CONFIG_NF_TPROXY_IPV4=m
1525 +CONFIG_NF_TABLES_IPV4=y
1526 +CONFIG_NFT_REJECT_IPV4=m
1527 +CONFIG_NFT_DUP_IPV4=m
1528 +CONFIG_NFT_FIB_IPV4=m
1529 +CONFIG_NF_TABLES_ARP=y
1530 +CONFIG_NF_FLOW_TABLE_IPV4=m
1531 +CONFIG_NF_DUP_IPV4=m
1532 +CONFIG_NF_LOG_ARP=m
1533 +CONFIG_NF_LOG_IPV4=m
1534 +CONFIG_NF_REJECT_IPV4=m
1535 +CONFIG_NF_NAT_SNMP_BASIC=m
1536 +CONFIG_NF_NAT_PPTP=m
1537 +CONFIG_NF_NAT_H323=m
1538 +CONFIG_IP_NF_IPTABLES=m
1539 +CONFIG_IP_NF_MATCH_AH=m
1540 +CONFIG_IP_NF_MATCH_ECN=m
1541 +CONFIG_IP_NF_MATCH_RPFILTER=m
1542 +CONFIG_IP_NF_MATCH_TTL=m
1543 +CONFIG_IP_NF_FILTER=m
1544 +CONFIG_IP_NF_TARGET_REJECT=m
1545 +CONFIG_IP_NF_TARGET_SYNPROXY=m
1546 +CONFIG_IP_NF_NAT=m
1547 +CONFIG_IP_NF_TARGET_MASQUERADE=m
1548 +CONFIG_IP_NF_TARGET_NETMAP=m
1549 +CONFIG_IP_NF_TARGET_REDIRECT=m
1550 +CONFIG_IP_NF_MANGLE=m
1551 +CONFIG_IP_NF_TARGET_CLUSTERIP=m
1552 +CONFIG_IP_NF_TARGET_ECN=m
1553 +CONFIG_IP_NF_TARGET_TTL=m
1554 +CONFIG_IP_NF_RAW=m
1555 +CONFIG_IP_NF_SECURITY=m
1556 +CONFIG_IP_NF_ARPTABLES=m
1557 +CONFIG_IP_NF_ARPFILTER=m
1558 +CONFIG_IP_NF_ARP_MANGLE=m
1559 +# end of IP: Netfilter Configuration
1562 +# IPv6: Netfilter Configuration
1564 +CONFIG_NF_SOCKET_IPV6=m
1565 +CONFIG_NF_TPROXY_IPV6=m
1566 +CONFIG_NF_TABLES_IPV6=y
1567 +CONFIG_NFT_REJECT_IPV6=m
1568 +CONFIG_NFT_DUP_IPV6=m
1569 +CONFIG_NFT_FIB_IPV6=m
1570 +CONFIG_NF_FLOW_TABLE_IPV6=m
1571 +CONFIG_NF_DUP_IPV6=m
1572 +CONFIG_NF_REJECT_IPV6=m
1573 +CONFIG_NF_LOG_IPV6=m
1574 +CONFIG_IP6_NF_IPTABLES=m
1575 +CONFIG_IP6_NF_MATCH_AH=m
1576 +CONFIG_IP6_NF_MATCH_EUI64=m
1577 +CONFIG_IP6_NF_MATCH_FRAG=m
1578 +CONFIG_IP6_NF_MATCH_OPTS=m
1579 +CONFIG_IP6_NF_MATCH_HL=m
1580 +CONFIG_IP6_NF_MATCH_IPV6HEADER=m
1581 +CONFIG_IP6_NF_MATCH_MH=m
1582 +CONFIG_IP6_NF_MATCH_RPFILTER=m
1583 +CONFIG_IP6_NF_MATCH_RT=m
1584 +CONFIG_IP6_NF_MATCH_SRH=m
1585 +CONFIG_IP6_NF_TARGET_HL=m
1586 +CONFIG_IP6_NF_FILTER=m
1587 +CONFIG_IP6_NF_TARGET_REJECT=m
1588 +CONFIG_IP6_NF_TARGET_SYNPROXY=m
1589 +CONFIG_IP6_NF_MANGLE=m
1590 +CONFIG_IP6_NF_RAW=m
1591 +CONFIG_IP6_NF_SECURITY=m
1592 +CONFIG_IP6_NF_NAT=m
1593 +CONFIG_IP6_NF_TARGET_MASQUERADE=m
1594 +CONFIG_IP6_NF_TARGET_NPT=m
1595 +# end of IPv6: Netfilter Configuration
1597 +CONFIG_NF_DEFRAG_IPV6=m
1600 +# DECnet: Netfilter Configuration
1602 +CONFIG_DECNET_NF_GRABULATOR=m
1603 +# end of DECnet: Netfilter Configuration
1605 +CONFIG_NF_TABLES_BRIDGE=m
1606 +CONFIG_NFT_BRIDGE_META=m
1607 +CONFIG_NFT_BRIDGE_REJECT=m
1608 +CONFIG_NF_LOG_BRIDGE=m
1609 +CONFIG_NF_CONNTRACK_BRIDGE=m
1610 +CONFIG_BRIDGE_NF_EBTABLES=m
1611 +CONFIG_BRIDGE_EBT_BROUTE=m
1612 +CONFIG_BRIDGE_EBT_T_FILTER=m
1613 +CONFIG_BRIDGE_EBT_T_NAT=m
1614 +CONFIG_BRIDGE_EBT_802_3=m
1615 +CONFIG_BRIDGE_EBT_AMONG=m
1616 +CONFIG_BRIDGE_EBT_ARP=m
1617 +CONFIG_BRIDGE_EBT_IP=m
1618 +CONFIG_BRIDGE_EBT_IP6=m
1619 +CONFIG_BRIDGE_EBT_LIMIT=m
1620 +CONFIG_BRIDGE_EBT_MARK=m
1621 +CONFIG_BRIDGE_EBT_PKTTYPE=m
1622 +CONFIG_BRIDGE_EBT_STP=m
1623 +CONFIG_BRIDGE_EBT_VLAN=m
1624 +CONFIG_BRIDGE_EBT_ARPREPLY=m
1625 +CONFIG_BRIDGE_EBT_DNAT=m
1626 +CONFIG_BRIDGE_EBT_MARK_T=m
1627 +CONFIG_BRIDGE_EBT_REDIRECT=m
1628 +CONFIG_BRIDGE_EBT_SNAT=m
1629 +CONFIG_BRIDGE_EBT_LOG=m
1630 +CONFIG_BRIDGE_EBT_NFLOG=m
1631 +CONFIG_BPFILTER=y
1632 +CONFIG_BPFILTER_UMH=m
1633 +CONFIG_IP_DCCP=m
1634 +CONFIG_INET_DCCP_DIAG=m
1637 +# DCCP CCIDs Configuration
1639 +# CONFIG_IP_DCCP_CCID2_DEBUG is not set
1640 +# CONFIG_IP_DCCP_CCID3 is not set
1641 +# end of DCCP CCIDs Configuration
1644 +# DCCP Kernel Hacking
1646 +# CONFIG_IP_DCCP_DEBUG is not set
1647 +# end of DCCP Kernel Hacking
1649 +CONFIG_IP_SCTP=m
1650 +# CONFIG_SCTP_DBG_OBJCNT is not set
1651 +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set
1652 +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y
1653 +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set
1654 +CONFIG_SCTP_COOKIE_HMAC_MD5=y
1655 +CONFIG_SCTP_COOKIE_HMAC_SHA1=y
1656 +CONFIG_INET_SCTP_DIAG=m
1657 +CONFIG_RDS=m
1658 +CONFIG_RDS_RDMA=m
1659 +CONFIG_RDS_TCP=m
1660 +# CONFIG_RDS_DEBUG is not set
1661 +CONFIG_TIPC=m
1662 +CONFIG_TIPC_MEDIA_IB=y
1663 +CONFIG_TIPC_MEDIA_UDP=y
1664 +CONFIG_TIPC_CRYPTO=y
1665 +CONFIG_TIPC_DIAG=m
1666 +CONFIG_ATM=m
1667 +CONFIG_ATM_CLIP=m
1668 +# CONFIG_ATM_CLIP_NO_ICMP is not set
1669 +CONFIG_ATM_LANE=m
1670 +CONFIG_ATM_MPOA=m
1671 +CONFIG_ATM_BR2684=m
1672 +# CONFIG_ATM_BR2684_IPFILTER is not set
1673 +CONFIG_L2TP=m
1674 +CONFIG_L2TP_DEBUGFS=m
1675 +CONFIG_L2TP_V3=y
1676 +CONFIG_L2TP_IP=m
1677 +CONFIG_L2TP_ETH=m
1678 +CONFIG_STP=m
1679 +CONFIG_GARP=m
1680 +CONFIG_MRP=m
1681 +CONFIG_BRIDGE=m
1682 +CONFIG_BRIDGE_IGMP_SNOOPING=y
1683 +CONFIG_BRIDGE_VLAN_FILTERING=y
1684 +CONFIG_BRIDGE_MRP=y
1685 +CONFIG_BRIDGE_CFM=y
1686 +CONFIG_HAVE_NET_DSA=y
1687 +CONFIG_NET_DSA=m
1688 +CONFIG_NET_DSA_TAG_8021Q=m
1689 +CONFIG_NET_DSA_TAG_AR9331=m
1690 +CONFIG_NET_DSA_TAG_BRCM_COMMON=m
1691 +CONFIG_NET_DSA_TAG_BRCM=m
1692 +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m
1693 +CONFIG_NET_DSA_TAG_HELLCREEK=m
1694 +CONFIG_NET_DSA_TAG_GSWIP=m
1695 +CONFIG_NET_DSA_TAG_DSA_COMMON=m
1696 +CONFIG_NET_DSA_TAG_DSA=m
1697 +CONFIG_NET_DSA_TAG_EDSA=m
1698 +CONFIG_NET_DSA_TAG_MTK=m
1699 +CONFIG_NET_DSA_TAG_KSZ=m
1700 +CONFIG_NET_DSA_TAG_RTL4_A=m
1701 +CONFIG_NET_DSA_TAG_OCELOT=m
1702 +CONFIG_NET_DSA_TAG_OCELOT_8021Q=m
1703 +CONFIG_NET_DSA_TAG_QCA=m
1704 +CONFIG_NET_DSA_TAG_LAN9303=m
1705 +CONFIG_NET_DSA_TAG_SJA1105=m
1706 +CONFIG_NET_DSA_TAG_TRAILER=m
1707 +CONFIG_NET_DSA_TAG_XRS700X=m
1708 +CONFIG_VLAN_8021Q=m
1709 +CONFIG_VLAN_8021Q_GVRP=y
1710 +CONFIG_VLAN_8021Q_MVRP=y
1711 +CONFIG_DECNET=m
1712 +# CONFIG_DECNET_ROUTER is not set
1713 +CONFIG_LLC=m
1714 +CONFIG_LLC2=m
1715 +CONFIG_ATALK=m
1716 +CONFIG_DEV_APPLETALK=m
1717 +# CONFIG_IPDDP is not set
1718 +CONFIG_X25=m
1719 +CONFIG_LAPB=m
1720 +CONFIG_PHONET=m
1721 +CONFIG_6LOWPAN=m
1722 +# CONFIG_6LOWPAN_DEBUGFS is not set
1723 +CONFIG_6LOWPAN_NHC=m
1724 +CONFIG_6LOWPAN_NHC_DEST=m
1725 +CONFIG_6LOWPAN_NHC_FRAGMENT=m
1726 +CONFIG_6LOWPAN_NHC_HOP=m
1727 +CONFIG_6LOWPAN_NHC_IPV6=m
1728 +CONFIG_6LOWPAN_NHC_MOBILITY=m
1729 +CONFIG_6LOWPAN_NHC_ROUTING=m
1730 +CONFIG_6LOWPAN_NHC_UDP=m
1731 +# CONFIG_6LOWPAN_GHC_EXT_HDR_HOP is not set
1732 +# CONFIG_6LOWPAN_GHC_UDP is not set
1733 +# CONFIG_6LOWPAN_GHC_ICMPV6 is not set
1734 +# CONFIG_6LOWPAN_GHC_EXT_HDR_DEST is not set
1735 +# CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG is not set
1736 +# CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE is not set
1737 +CONFIG_IEEE802154=m
1738 +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set
1739 +CONFIG_IEEE802154_SOCKET=m
1740 +CONFIG_IEEE802154_6LOWPAN=m
1741 +CONFIG_MAC802154=m
1742 +CONFIG_NET_SCHED=y
1745 +# Queueing/Scheduling
1747 +CONFIG_NET_SCH_CBQ=m
1748 +CONFIG_NET_SCH_HTB=m
1749 +CONFIG_NET_SCH_HFSC=m
1750 +CONFIG_NET_SCH_ATM=m
1751 +CONFIG_NET_SCH_PRIO=m
1752 +CONFIG_NET_SCH_MULTIQ=m
1753 +CONFIG_NET_SCH_RED=m
1754 +CONFIG_NET_SCH_SFB=m
1755 +CONFIG_NET_SCH_SFQ=m
1756 +CONFIG_NET_SCH_TEQL=m
1757 +CONFIG_NET_SCH_TBF=m
1758 +CONFIG_NET_SCH_CBS=m
1759 +CONFIG_NET_SCH_ETF=m
1760 +CONFIG_NET_SCH_TAPRIO=m
1761 +CONFIG_NET_SCH_GRED=m
1762 +CONFIG_NET_SCH_DSMARK=m
1763 +CONFIG_NET_SCH_NETEM=m
1764 +CONFIG_NET_SCH_DRR=m
1765 +CONFIG_NET_SCH_MQPRIO=m
1766 +CONFIG_NET_SCH_SKBPRIO=m
1767 +CONFIG_NET_SCH_CHOKE=m
1768 +CONFIG_NET_SCH_QFQ=m
1769 +CONFIG_NET_SCH_CODEL=m
1770 +CONFIG_NET_SCH_FQ_CODEL=m
1771 +CONFIG_NET_SCH_CAKE=m
1772 +CONFIG_NET_SCH_FQ=m
1773 +CONFIG_NET_SCH_HHF=m
1774 +CONFIG_NET_SCH_PIE=y
1775 +CONFIG_NET_SCH_FQ_PIE=y
1776 +CONFIG_NET_SCH_INGRESS=m
1777 +CONFIG_NET_SCH_PLUG=m
1778 +CONFIG_NET_SCH_ETS=m
1779 +CONFIG_NET_SCH_DEFAULT=y
1780 +# CONFIG_DEFAULT_FQ is not set
1781 +# CONFIG_DEFAULT_CODEL is not set
1782 +# CONFIG_DEFAULT_FQ_CODEL is not set
1783 +CONFIG_DEFAULT_FQ_PIE=y
1784 +# CONFIG_DEFAULT_SFQ is not set
1785 +# CONFIG_DEFAULT_PFIFO_FAST is not set
1786 +CONFIG_DEFAULT_NET_SCH="fq_pie"
1789 +# Classification
1791 +CONFIG_NET_CLS=y
1792 +CONFIG_NET_CLS_BASIC=m
1793 +CONFIG_NET_CLS_TCINDEX=m
1794 +CONFIG_NET_CLS_ROUTE4=m
1795 +CONFIG_NET_CLS_FW=m
1796 +CONFIG_NET_CLS_U32=m
1797 +# CONFIG_CLS_U32_PERF is not set
1798 +CONFIG_CLS_U32_MARK=y
1799 +CONFIG_NET_CLS_RSVP=m
1800 +CONFIG_NET_CLS_RSVP6=m
1801 +CONFIG_NET_CLS_FLOW=m
1802 +CONFIG_NET_CLS_CGROUP=m
1803 +CONFIG_NET_CLS_BPF=m
1804 +CONFIG_NET_CLS_FLOWER=m
1805 +CONFIG_NET_CLS_MATCHALL=m
1806 +CONFIG_NET_EMATCH=y
1807 +CONFIG_NET_EMATCH_STACK=32
1808 +CONFIG_NET_EMATCH_CMP=m
1809 +CONFIG_NET_EMATCH_NBYTE=m
1810 +CONFIG_NET_EMATCH_U32=m
1811 +CONFIG_NET_EMATCH_META=m
1812 +CONFIG_NET_EMATCH_TEXT=m
1813 +CONFIG_NET_EMATCH_CANID=m
1814 +CONFIG_NET_EMATCH_IPSET=m
1815 +CONFIG_NET_EMATCH_IPT=m
1816 +CONFIG_NET_CLS_ACT=y
1817 +CONFIG_NET_ACT_POLICE=m
1818 +CONFIG_NET_ACT_GACT=m
1819 +CONFIG_GACT_PROB=y
1820 +CONFIG_NET_ACT_MIRRED=m
1821 +CONFIG_NET_ACT_SAMPLE=m
1822 +CONFIG_NET_ACT_IPT=m
1823 +CONFIG_NET_ACT_NAT=m
1824 +CONFIG_NET_ACT_PEDIT=m
1825 +CONFIG_NET_ACT_SIMP=m
1826 +CONFIG_NET_ACT_SKBEDIT=m
1827 +CONFIG_NET_ACT_CSUM=m
1828 +CONFIG_NET_ACT_MPLS=m
1829 +CONFIG_NET_ACT_VLAN=m
1830 +CONFIG_NET_ACT_BPF=m
1831 +CONFIG_NET_ACT_CONNMARK=m
1832 +CONFIG_NET_ACT_CTINFO=m
1833 +CONFIG_NET_ACT_SKBMOD=m
1834 +# CONFIG_NET_ACT_IFE is not set
1835 +CONFIG_NET_ACT_TUNNEL_KEY=m
1836 +CONFIG_NET_ACT_CT=m
1837 +CONFIG_NET_ACT_GATE=m
1838 +CONFIG_NET_TC_SKB_EXT=y
1839 +CONFIG_NET_SCH_FIFO=y
1840 +CONFIG_DCB=y
1841 +CONFIG_DNS_RESOLVER=y
1842 +CONFIG_BATMAN_ADV=m
1843 +# CONFIG_BATMAN_ADV_BATMAN_V is not set
1844 +CONFIG_BATMAN_ADV_BLA=y
1845 +CONFIG_BATMAN_ADV_DAT=y
1846 +CONFIG_BATMAN_ADV_NC=y
1847 +CONFIG_BATMAN_ADV_MCAST=y
1848 +# CONFIG_BATMAN_ADV_DEBUG is not set
1849 +CONFIG_OPENVSWITCH=m
1850 +CONFIG_OPENVSWITCH_GRE=m
1851 +CONFIG_OPENVSWITCH_VXLAN=m
1852 +CONFIG_OPENVSWITCH_GENEVE=m
1853 +CONFIG_VSOCKETS=m
1854 +CONFIG_VSOCKETS_DIAG=m
1855 +CONFIG_VSOCKETS_LOOPBACK=m
1856 +CONFIG_VMWARE_VMCI_VSOCKETS=m
1857 +CONFIG_VIRTIO_VSOCKETS=m
1858 +CONFIG_VIRTIO_VSOCKETS_COMMON=m
1859 +CONFIG_HYPERV_VSOCKETS=m
1860 +CONFIG_NETLINK_DIAG=m
1861 +CONFIG_MPLS=y
1862 +CONFIG_NET_MPLS_GSO=m
1863 +CONFIG_MPLS_ROUTING=m
1864 +CONFIG_MPLS_IPTUNNEL=m
1865 +CONFIG_NET_NSH=m
1866 +CONFIG_HSR=m
1867 +CONFIG_NET_SWITCHDEV=y
1868 +CONFIG_NET_L3_MASTER_DEV=y
1869 +CONFIG_QRTR=m
1870 +CONFIG_QRTR_SMD=m
1871 +CONFIG_QRTR_TUN=m
1872 +CONFIG_QRTR_MHI=m
1873 +CONFIG_NET_NCSI=y
1874 +CONFIG_NCSI_OEM_CMD_GET_MAC=y
1875 +CONFIG_RPS=y
1876 +CONFIG_RFS_ACCEL=y
1877 +CONFIG_SOCK_RX_QUEUE_MAPPING=y
1878 +CONFIG_XPS=y
1879 +CONFIG_CGROUP_NET_PRIO=y
1880 +CONFIG_CGROUP_NET_CLASSID=y
1881 +CONFIG_NET_RX_BUSY_POLL=y
1882 +CONFIG_BQL=y
1883 +CONFIG_BPF_JIT=y
1884 +CONFIG_BPF_STREAM_PARSER=y
1885 +CONFIG_NET_FLOW_LIMIT=y
1888 +# Network testing
1890 +CONFIG_NET_PKTGEN=m
1891 +# end of Network testing
1892 +# end of Networking options
1894 +CONFIG_HAMRADIO=y
1897 +# Packet Radio protocols
1899 +CONFIG_AX25=m
1900 +CONFIG_AX25_DAMA_SLAVE=y
1901 +CONFIG_NETROM=m
1902 +CONFIG_ROSE=m
1905 +# AX.25 network device drivers
1907 +CONFIG_MKISS=m
1908 +CONFIG_6PACK=m
1909 +CONFIG_BPQETHER=m
1910 +CONFIG_BAYCOM_SER_FDX=m
1911 +CONFIG_BAYCOM_SER_HDX=m
1912 +CONFIG_BAYCOM_PAR=m
1913 +CONFIG_YAM=m
1914 +# end of AX.25 network device drivers
1916 +CONFIG_CAN=m
1917 +CONFIG_CAN_RAW=m
1918 +CONFIG_CAN_BCM=m
1919 +CONFIG_CAN_GW=m
1920 +CONFIG_CAN_J1939=m
1921 +CONFIG_CAN_ISOTP=m
1924 +# CAN Device Drivers
1926 +CONFIG_CAN_VCAN=m
1927 +CONFIG_CAN_VXCAN=m
1928 +CONFIG_CAN_SLCAN=m
1929 +CONFIG_CAN_DEV=m
1930 +CONFIG_CAN_CALC_BITTIMING=y
1931 +CONFIG_CAN_JANZ_ICAN3=m
1932 +CONFIG_CAN_KVASER_PCIEFD=m
1933 +CONFIG_CAN_C_CAN=m
1934 +CONFIG_CAN_C_CAN_PLATFORM=m
1935 +CONFIG_CAN_C_CAN_PCI=m
1936 +CONFIG_CAN_CC770=m
1937 +CONFIG_CAN_CC770_ISA=m
1938 +CONFIG_CAN_CC770_PLATFORM=m
1939 +CONFIG_CAN_IFI_CANFD=m
1940 +CONFIG_CAN_M_CAN=m
1941 +CONFIG_CAN_M_CAN_PCI=m
1942 +CONFIG_CAN_M_CAN_PLATFORM=m
1943 +CONFIG_CAN_M_CAN_TCAN4X5X=m
1944 +CONFIG_CAN_PEAK_PCIEFD=m
1945 +CONFIG_CAN_SJA1000=m
1946 +CONFIG_CAN_EMS_PCI=m
1947 +CONFIG_CAN_EMS_PCMCIA=m
1948 +CONFIG_CAN_F81601=m
1949 +CONFIG_CAN_KVASER_PCI=m
1950 +CONFIG_CAN_PEAK_PCI=m
1951 +CONFIG_CAN_PEAK_PCIEC=y
1952 +CONFIG_CAN_PEAK_PCMCIA=m
1953 +CONFIG_CAN_PLX_PCI=m
1954 +CONFIG_CAN_SJA1000_ISA=m
1955 +CONFIG_CAN_SJA1000_PLATFORM=m
1956 +CONFIG_CAN_SOFTING=m
1957 +CONFIG_CAN_SOFTING_CS=m
1960 +# CAN SPI interfaces
1962 +CONFIG_CAN_HI311X=m
1963 +CONFIG_CAN_MCP251X=m
1964 +CONFIG_CAN_MCP251XFD=m
1965 +# CONFIG_CAN_MCP251XFD_SANITY is not set
1966 +# end of CAN SPI interfaces
1969 +# CAN USB interfaces
1971 +CONFIG_CAN_8DEV_USB=m
1972 +CONFIG_CAN_EMS_USB=m
1973 +CONFIG_CAN_ESD_USB2=m
1974 +CONFIG_CAN_GS_USB=m
1975 +CONFIG_CAN_KVASER_USB=m
1976 +CONFIG_CAN_MCBA_USB=m
1977 +CONFIG_CAN_PEAK_USB=m
1978 +CONFIG_CAN_UCAN=m
1979 +# end of CAN USB interfaces
1981 +# CONFIG_CAN_DEBUG_DEVICES is not set
1982 +# end of CAN Device Drivers
1984 +CONFIG_BT=m
1985 +CONFIG_BT_BREDR=y
1986 +CONFIG_BT_RFCOMM=m
1987 +CONFIG_BT_RFCOMM_TTY=y
1988 +CONFIG_BT_BNEP=m
1989 +CONFIG_BT_BNEP_MC_FILTER=y
1990 +CONFIG_BT_BNEP_PROTO_FILTER=y
1991 +CONFIG_BT_CMTP=m
1992 +CONFIG_BT_HIDP=m
1993 +CONFIG_BT_HS=y
1994 +CONFIG_BT_LE=y
1995 +CONFIG_BT_6LOWPAN=m
1996 +CONFIG_BT_LEDS=y
1997 +CONFIG_BT_MSFTEXT=y
1998 +CONFIG_BT_DEBUGFS=y
1999 +# CONFIG_BT_SELFTEST is not set
2002 +# Bluetooth device drivers
2004 +CONFIG_BT_INTEL=m
2005 +CONFIG_BT_BCM=m
2006 +CONFIG_BT_RTL=m
2007 +CONFIG_BT_QCA=m
2008 +CONFIG_BT_HCIBTUSB=m
2009 +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y
2010 +CONFIG_BT_HCIBTUSB_BCM=y
2011 +CONFIG_BT_HCIBTUSB_MTK=y
2012 +CONFIG_BT_HCIBTUSB_RTL=y
2013 +CONFIG_BT_HCIBTSDIO=m
2014 +CONFIG_BT_HCIUART=m
2015 +CONFIG_BT_HCIUART_SERDEV=y
2016 +CONFIG_BT_HCIUART_H4=y
2017 +CONFIG_BT_HCIUART_NOKIA=m
2018 +CONFIG_BT_HCIUART_BCSP=y
2019 +CONFIG_BT_HCIUART_ATH3K=y
2020 +CONFIG_BT_HCIUART_LL=y
2021 +CONFIG_BT_HCIUART_3WIRE=y
2022 +CONFIG_BT_HCIUART_INTEL=y
2023 +CONFIG_BT_HCIUART_BCM=y
2024 +CONFIG_BT_HCIUART_RTL=y
2025 +CONFIG_BT_HCIUART_QCA=y
2026 +CONFIG_BT_HCIUART_AG6XX=y
2027 +CONFIG_BT_HCIUART_MRVL=y
2028 +CONFIG_BT_HCIBCM203X=m
2029 +CONFIG_BT_HCIBPA10X=m
2030 +CONFIG_BT_HCIBFUSB=m
2031 +CONFIG_BT_HCIDTL1=m
2032 +CONFIG_BT_HCIBT3C=m
2033 +CONFIG_BT_HCIBLUECARD=m
2034 +CONFIG_BT_HCIVHCI=m
2035 +CONFIG_BT_MRVL=m
2036 +CONFIG_BT_MRVL_SDIO=m
2037 +CONFIG_BT_ATH3K=m
2038 +CONFIG_BT_MTKSDIO=m
2039 +CONFIG_BT_MTKUART=m
2040 +CONFIG_BT_HCIRSI=m
2041 +# end of Bluetooth device drivers
2043 +CONFIG_AF_RXRPC=m
2044 +CONFIG_AF_RXRPC_IPV6=y
2045 +# CONFIG_AF_RXRPC_INJECT_LOSS is not set
2046 +# CONFIG_AF_RXRPC_DEBUG is not set
2047 +CONFIG_RXKAD=y
2048 +CONFIG_AF_KCM=m
2049 +CONFIG_STREAM_PARSER=y
2050 +CONFIG_FIB_RULES=y
2051 +CONFIG_WIRELESS=y
2052 +CONFIG_WIRELESS_EXT=y
2053 +CONFIG_WEXT_CORE=y
2054 +CONFIG_WEXT_PROC=y
2055 +CONFIG_WEXT_SPY=y
2056 +CONFIG_WEXT_PRIV=y
2057 +CONFIG_CFG80211=m
2058 +# CONFIG_NL80211_TESTMODE is not set
2059 +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
2060 +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set
2061 +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y
2062 +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y
2063 +CONFIG_CFG80211_DEFAULT_PS=y
2064 +CONFIG_CFG80211_DEBUGFS=y
2065 +CONFIG_CFG80211_CRDA_SUPPORT=y
2066 +CONFIG_CFG80211_WEXT=y
2067 +CONFIG_CFG80211_WEXT_EXPORT=y
2068 +CONFIG_LIB80211=m
2069 +CONFIG_LIB80211_CRYPT_WEP=m
2070 +CONFIG_LIB80211_CRYPT_CCMP=m
2071 +CONFIG_LIB80211_CRYPT_TKIP=m
2072 +# CONFIG_LIB80211_DEBUG is not set
2073 +CONFIG_MAC80211=m
2074 +CONFIG_MAC80211_HAS_RC=y
2075 +CONFIG_MAC80211_RC_MINSTREL=y
2076 +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
2077 +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
2078 +CONFIG_MAC80211_MESH=y
2079 +CONFIG_MAC80211_LEDS=y
2080 +CONFIG_MAC80211_DEBUGFS=y
2081 +CONFIG_MAC80211_MESSAGE_TRACING=y
2082 +# CONFIG_MAC80211_DEBUG_MENU is not set
2083 +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
2084 +CONFIG_RFKILL=y
2085 +CONFIG_RFKILL_LEDS=y
2086 +CONFIG_RFKILL_INPUT=y
2087 +CONFIG_RFKILL_GPIO=m
2088 +CONFIG_NET_9P=m
2089 +CONFIG_NET_9P_VIRTIO=m
2090 +CONFIG_NET_9P_XEN=m
2091 +CONFIG_NET_9P_RDMA=m
2092 +# CONFIG_NET_9P_DEBUG is not set
2093 +CONFIG_CAIF=m
2094 +# CONFIG_CAIF_DEBUG is not set
2095 +CONFIG_CAIF_NETDEV=m
2096 +CONFIG_CAIF_USB=m
2097 +CONFIG_CEPH_LIB=m
2098 +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set
2099 +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y
2100 +CONFIG_NFC=m
2101 +CONFIG_NFC_DIGITAL=m
2102 +CONFIG_NFC_NCI=m
2103 +CONFIG_NFC_NCI_SPI=m
2104 +CONFIG_NFC_NCI_UART=m
2105 +CONFIG_NFC_HCI=m
2106 +CONFIG_NFC_SHDLC=y
2109 +# Near Field Communication (NFC) devices
2111 +CONFIG_NFC_TRF7970A=m
2112 +CONFIG_NFC_MEI_PHY=m
2113 +CONFIG_NFC_SIM=m
2114 +CONFIG_NFC_PORT100=m
2115 +CONFIG_NFC_VIRTUAL_NCI=m
2116 +CONFIG_NFC_FDP=m
2117 +CONFIG_NFC_FDP_I2C=m
2118 +CONFIG_NFC_PN544=m
2119 +CONFIG_NFC_PN544_I2C=m
2120 +CONFIG_NFC_PN544_MEI=m
2121 +CONFIG_NFC_PN533=m
2122 +CONFIG_NFC_PN533_USB=m
2123 +CONFIG_NFC_PN533_I2C=m
2124 +CONFIG_NFC_PN532_UART=m
2125 +CONFIG_NFC_MICROREAD=m
2126 +CONFIG_NFC_MICROREAD_I2C=m
2127 +CONFIG_NFC_MICROREAD_MEI=m
2128 +CONFIG_NFC_MRVL=m
2129 +CONFIG_NFC_MRVL_USB=m
2130 +CONFIG_NFC_MRVL_UART=m
2131 +CONFIG_NFC_MRVL_I2C=m
2132 +CONFIG_NFC_MRVL_SPI=m
2133 +CONFIG_NFC_ST21NFCA=m
2134 +CONFIG_NFC_ST21NFCA_I2C=m
2135 +CONFIG_NFC_ST_NCI=m
2136 +CONFIG_NFC_ST_NCI_I2C=m
2137 +CONFIG_NFC_ST_NCI_SPI=m
2138 +CONFIG_NFC_NXP_NCI=m
2139 +CONFIG_NFC_NXP_NCI_I2C=m
2140 +CONFIG_NFC_S3FWRN5=m
2141 +CONFIG_NFC_S3FWRN5_I2C=m
2142 +CONFIG_NFC_S3FWRN82_UART=m
2143 +CONFIG_NFC_ST95HF=m
2144 +# end of Near Field Communication (NFC) devices
2146 +CONFIG_PSAMPLE=m
2147 +CONFIG_NET_IFE=m
2148 +CONFIG_LWTUNNEL=y
2149 +CONFIG_LWTUNNEL_BPF=y
2150 +CONFIG_DST_CACHE=y
2151 +CONFIG_GRO_CELLS=y
2152 +CONFIG_SOCK_VALIDATE_XMIT=y
2153 +CONFIG_NET_SOCK_MSG=y
2154 +CONFIG_NET_DEVLINK=y
2155 +CONFIG_PAGE_POOL=y
2156 +CONFIG_FAILOVER=m
2157 +CONFIG_ETHTOOL_NETLINK=y
2158 +CONFIG_HAVE_EBPF_JIT=y
2161 +# Device Drivers
2163 +CONFIG_HAVE_EISA=y
2164 +# CONFIG_EISA is not set
2165 +CONFIG_HAVE_PCI=y
2166 +CONFIG_PCI=y
2167 +CONFIG_PCI_DOMAINS=y
2168 +CONFIG_PCIEPORTBUS=y
2169 +CONFIG_HOTPLUG_PCI_PCIE=y
2170 +CONFIG_PCIEAER=y
2171 +# CONFIG_PCIEAER_INJECT is not set
2172 +# CONFIG_PCIE_ECRC is not set
2173 +CONFIG_PCIEASPM=y
2174 +CONFIG_PCIEASPM_DEFAULT=y
2175 +# CONFIG_PCIEASPM_POWERSAVE is not set
2176 +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set
2177 +# CONFIG_PCIEASPM_PERFORMANCE is not set
2178 +CONFIG_PCIE_PME=y
2179 +CONFIG_PCIE_DPC=y
2180 +CONFIG_PCIE_PTM=y
2181 +# CONFIG_PCIE_EDR is not set
2182 +CONFIG_PCI_MSI=y
2183 +CONFIG_PCI_MSI_IRQ_DOMAIN=y
2184 +CONFIG_PCI_QUIRKS=y
2185 +# CONFIG_PCI_DEBUG is not set
2186 +CONFIG_PCI_REALLOC_ENABLE_AUTO=y
2187 +CONFIG_PCI_STUB=m
2188 +CONFIG_PCI_PF_STUB=m
2189 +CONFIG_XEN_PCIDEV_FRONTEND=m
2190 +CONFIG_PCI_ATS=y
2191 +CONFIG_PCI_LOCKLESS_CONFIG=y
2192 +CONFIG_PCI_IOV=y
2193 +CONFIG_PCI_PRI=y
2194 +CONFIG_PCI_PASID=y
2195 +# CONFIG_PCI_P2PDMA is not set
2196 +CONFIG_PCI_LABEL=y
2197 +CONFIG_PCI_HYPERV=m
2198 +# CONFIG_PCIE_BUS_TUNE_OFF is not set
2199 +CONFIG_PCIE_BUS_DEFAULT=y
2200 +# CONFIG_PCIE_BUS_SAFE is not set
2201 +# CONFIG_PCIE_BUS_PERFORMANCE is not set
2202 +# CONFIG_PCIE_BUS_PEER2PEER is not set
2203 +CONFIG_HOTPLUG_PCI=y
2204 +CONFIG_HOTPLUG_PCI_ACPI=y
2205 +CONFIG_HOTPLUG_PCI_ACPI_IBM=m
2206 +CONFIG_HOTPLUG_PCI_CPCI=y
2207 +CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
2208 +CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
2209 +CONFIG_HOTPLUG_PCI_SHPC=y
2212 +# PCI controller drivers
2214 +CONFIG_VMD=m
2215 +CONFIG_PCI_HYPERV_INTERFACE=m
2218 +# DesignWare PCI Core Support
2220 +CONFIG_PCIE_DW=y
2221 +CONFIG_PCIE_DW_HOST=y
2222 +CONFIG_PCIE_DW_EP=y
2223 +CONFIG_PCIE_DW_PLAT=y
2224 +CONFIG_PCIE_DW_PLAT_HOST=y
2225 +CONFIG_PCIE_DW_PLAT_EP=y
2226 +# CONFIG_PCI_MESON is not set
2227 +# end of DesignWare PCI Core Support
2230 +# Mobiveil PCIe Core Support
2232 +# end of Mobiveil PCIe Core Support
2235 +# Cadence PCIe controllers support
2237 +# end of Cadence PCIe controllers support
2238 +# end of PCI controller drivers
2241 +# PCI Endpoint
2243 +CONFIG_PCI_ENDPOINT=y
2244 +CONFIG_PCI_ENDPOINT_CONFIGFS=y
2245 +# CONFIG_PCI_EPF_TEST is not set
2246 +CONFIG_PCI_EPF_NTB=m
2247 +# end of PCI Endpoint
2250 +# PCI switch controller drivers
2252 +CONFIG_PCI_SW_SWITCHTEC=m
2253 +# end of PCI switch controller drivers
2255 +CONFIG_CXL_BUS=m
2256 +CONFIG_CXL_MEM=m
2257 +# CONFIG_CXL_MEM_RAW_COMMANDS is not set
2258 +CONFIG_PCCARD=m
2259 +CONFIG_PCMCIA=m
2260 +CONFIG_PCMCIA_LOAD_CIS=y
2261 +CONFIG_CARDBUS=y
2264 +# PC-card bridges
2266 +CONFIG_YENTA=m
2267 +CONFIG_YENTA_O2=y
2268 +CONFIG_YENTA_RICOH=y
2269 +CONFIG_YENTA_TI=y
2270 +CONFIG_YENTA_ENE_TUNE=y
2271 +CONFIG_YENTA_TOSHIBA=y
2272 +CONFIG_PD6729=m
2273 +CONFIG_I82092=m
2274 +CONFIG_PCCARD_NONSTATIC=y
2275 +CONFIG_RAPIDIO=y
2276 +CONFIG_RAPIDIO_TSI721=m
2277 +CONFIG_RAPIDIO_DISC_TIMEOUT=30
2278 +# CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS is not set
2279 +CONFIG_RAPIDIO_DMA_ENGINE=y
2280 +# CONFIG_RAPIDIO_DEBUG is not set
2281 +CONFIG_RAPIDIO_ENUM_BASIC=m
2282 +CONFIG_RAPIDIO_CHMAN=m
2283 +CONFIG_RAPIDIO_MPORT_CDEV=m
2286 +# RapidIO Switch drivers
2288 +CONFIG_RAPIDIO_TSI57X=m
2289 +CONFIG_RAPIDIO_CPS_XX=m
2290 +CONFIG_RAPIDIO_TSI568=m
2291 +CONFIG_RAPIDIO_CPS_GEN2=m
2292 +CONFIG_RAPIDIO_RXS_GEN3=m
2293 +# end of RapidIO Switch drivers
2296 +# Generic Driver Options
2298 +CONFIG_AUXILIARY_BUS=y
2299 +CONFIG_UEVENT_HELPER=y
2300 +CONFIG_UEVENT_HELPER_PATH=""
2301 +CONFIG_DEVTMPFS=y
2302 +CONFIG_DEVTMPFS_MOUNT=y
2303 +# CONFIG_STANDALONE is not set
2304 +CONFIG_PREVENT_FIRMWARE_BUILD=y
2307 +# Firmware loader
2309 +CONFIG_FW_LOADER=y
2310 +CONFIG_FW_LOADER_PAGED_BUF=y
2311 +CONFIG_EXTRA_FIRMWARE=""
2312 +CONFIG_FW_LOADER_USER_HELPER=y
2313 +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set
2314 +CONFIG_FW_LOADER_COMPRESS=y
2315 +CONFIG_FW_CACHE=y
2316 +# end of Firmware loader
2318 +CONFIG_WANT_DEV_COREDUMP=y
2319 +CONFIG_ALLOW_DEV_COREDUMP=y
2320 +CONFIG_DEV_COREDUMP=y
2321 +# CONFIG_DEBUG_DRIVER is not set
2322 +# CONFIG_DEBUG_DEVRES is not set
2323 +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set
2324 +CONFIG_HMEM_REPORTING=y
2325 +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set
2326 +CONFIG_SYS_HYPERVISOR=y
2327 +CONFIG_GENERIC_CPU_AUTOPROBE=y
2328 +CONFIG_GENERIC_CPU_VULNERABILITIES=y
2329 +CONFIG_REGMAP=y
2330 +CONFIG_REGMAP_I2C=y
2331 +CONFIG_REGMAP_SLIMBUS=m
2332 +CONFIG_REGMAP_SPI=y
2333 +CONFIG_REGMAP_SPMI=m
2334 +CONFIG_REGMAP_W1=m
2335 +CONFIG_REGMAP_MMIO=y
2336 +CONFIG_REGMAP_IRQ=y
2337 +CONFIG_REGMAP_SOUNDWIRE=m
2338 +CONFIG_REGMAP_SCCB=m
2339 +CONFIG_REGMAP_I3C=m
2340 +CONFIG_REGMAP_SPI_AVMM=m
2341 +CONFIG_DMA_SHARED_BUFFER=y
2342 +# CONFIG_DMA_FENCE_TRACE is not set
2343 +# end of Generic Driver Options
2346 +# Bus devices
2348 +CONFIG_MHI_BUS=m
2349 +# CONFIG_MHI_BUS_DEBUG is not set
2350 +CONFIG_MHI_BUS_PCI_GENERIC=m
2351 +# end of Bus devices
2353 +CONFIG_CONNECTOR=y
2354 +CONFIG_PROC_EVENTS=y
2355 +CONFIG_GNSS=m
2356 +CONFIG_GNSS_SERIAL=m
2357 +CONFIG_GNSS_MTK_SERIAL=m
2358 +CONFIG_GNSS_SIRF_SERIAL=m
2359 +CONFIG_GNSS_UBX_SERIAL=m
2360 +CONFIG_MTD=m
2361 +# CONFIG_MTD_TESTS is not set
2364 +# Partition parsers
2366 +CONFIG_MTD_AR7_PARTS=m
2367 +CONFIG_MTD_CMDLINE_PARTS=m
2368 +CONFIG_MTD_REDBOOT_PARTS=m
2369 +CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
2370 +# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
2371 +# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
2372 +# end of Partition parsers
2375 +# User Modules And Translation Layers
2377 +CONFIG_MTD_BLKDEVS=m
2378 +CONFIG_MTD_BLOCK=m
2379 +CONFIG_MTD_BLOCK_RO=m
2380 +CONFIG_FTL=m
2381 +CONFIG_NFTL=m
2382 +CONFIG_NFTL_RW=y
2383 +CONFIG_INFTL=m
2384 +CONFIG_RFD_FTL=m
2385 +CONFIG_SSFDC=m
2386 +CONFIG_SM_FTL=m
2387 +CONFIG_MTD_OOPS=m
2388 +CONFIG_MTD_PSTORE=m
2389 +CONFIG_MTD_SWAP=m
2390 +# CONFIG_MTD_PARTITIONED_MASTER is not set
2393 +# RAM/ROM/Flash chip drivers
2395 +CONFIG_MTD_CFI=m
2396 +CONFIG_MTD_JEDECPROBE=m
2397 +CONFIG_MTD_GEN_PROBE=m
2398 +# CONFIG_MTD_CFI_ADV_OPTIONS is not set
2399 +CONFIG_MTD_MAP_BANK_WIDTH_1=y
2400 +CONFIG_MTD_MAP_BANK_WIDTH_2=y
2401 +CONFIG_MTD_MAP_BANK_WIDTH_4=y
2402 +CONFIG_MTD_CFI_I1=y
2403 +CONFIG_MTD_CFI_I2=y
2404 +CONFIG_MTD_CFI_INTELEXT=m
2405 +CONFIG_MTD_CFI_AMDSTD=m
2406 +CONFIG_MTD_CFI_STAA=m
2407 +CONFIG_MTD_CFI_UTIL=m
2408 +CONFIG_MTD_RAM=m
2409 +CONFIG_MTD_ROM=m
2410 +CONFIG_MTD_ABSENT=m
2411 +# end of RAM/ROM/Flash chip drivers
2414 +# Mapping drivers for chip access
2416 +CONFIG_MTD_COMPLEX_MAPPINGS=y
2417 +CONFIG_MTD_PHYSMAP=m
2418 +# CONFIG_MTD_PHYSMAP_COMPAT is not set
2419 +CONFIG_MTD_PHYSMAP_GPIO_ADDR=y
2420 +CONFIG_MTD_SBC_GXX=m
2421 +CONFIG_MTD_AMD76XROM=m
2422 +CONFIG_MTD_ICHXROM=m
2423 +CONFIG_MTD_ESB2ROM=m
2424 +CONFIG_MTD_CK804XROM=m
2425 +CONFIG_MTD_SCB2_FLASH=m
2426 +CONFIG_MTD_NETtel=m
2427 +CONFIG_MTD_L440GX=m
2428 +CONFIG_MTD_PCI=m
2429 +CONFIG_MTD_PCMCIA=m
2430 +# CONFIG_MTD_PCMCIA_ANONYMOUS is not set
2431 +CONFIG_MTD_INTEL_VR_NOR=m
2432 +CONFIG_MTD_PLATRAM=m
2433 +# end of Mapping drivers for chip access
2436 +# Self-contained MTD device drivers
2438 +CONFIG_MTD_PMC551=m
2439 +# CONFIG_MTD_PMC551_BUGFIX is not set
2440 +# CONFIG_MTD_PMC551_DEBUG is not set
2441 +CONFIG_MTD_DATAFLASH=m
2442 +# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
2443 +CONFIG_MTD_DATAFLASH_OTP=y
2444 +CONFIG_MTD_MCHP23K256=m
2445 +CONFIG_MTD_SST25L=m
2446 +CONFIG_MTD_SLRAM=m
2447 +CONFIG_MTD_PHRAM=m
2448 +CONFIG_MTD_MTDRAM=m
2449 +CONFIG_MTDRAM_TOTAL_SIZE=4096
2450 +CONFIG_MTDRAM_ERASE_SIZE=128
2451 +CONFIG_MTD_BLOCK2MTD=m
2454 +# Disk-On-Chip Device Drivers
2456 +# CONFIG_MTD_DOCG3 is not set
2457 +# end of Self-contained MTD device drivers
2460 +# NAND
2462 +CONFIG_MTD_NAND_CORE=m
2463 +CONFIG_MTD_ONENAND=m
2464 +CONFIG_MTD_ONENAND_VERIFY_WRITE=y
2465 +CONFIG_MTD_ONENAND_GENERIC=m
2466 +# CONFIG_MTD_ONENAND_OTP is not set
2467 +CONFIG_MTD_ONENAND_2X_PROGRAM=y
2468 +CONFIG_MTD_RAW_NAND=m
2471 +# Raw/parallel NAND flash controllers
2473 +CONFIG_MTD_NAND_DENALI=m
2474 +CONFIG_MTD_NAND_DENALI_PCI=m
2475 +CONFIG_MTD_NAND_CAFE=m
2476 +CONFIG_MTD_NAND_MXIC=m
2477 +CONFIG_MTD_NAND_GPIO=m
2478 +CONFIG_MTD_NAND_PLATFORM=m
2479 +CONFIG_MTD_NAND_ARASAN=m
2482 +# Misc
2484 +CONFIG_MTD_SM_COMMON=m
2485 +CONFIG_MTD_NAND_NANDSIM=m
2486 +CONFIG_MTD_NAND_RICOH=m
2487 +CONFIG_MTD_NAND_DISKONCHIP=m
2488 +# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
2489 +CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
2490 +# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set
2491 +CONFIG_MTD_SPI_NAND=m
2494 +# ECC engine support
2496 +CONFIG_MTD_NAND_ECC=y
2497 +CONFIG_MTD_NAND_ECC_SW_HAMMING=y
2498 +# CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC is not set
2499 +CONFIG_MTD_NAND_ECC_SW_BCH=y
2500 +# end of ECC engine support
2501 +# end of NAND
2504 +# LPDDR & LPDDR2 PCM memory drivers
2506 +CONFIG_MTD_LPDDR=m
2507 +CONFIG_MTD_QINFO_PROBE=m
2508 +# end of LPDDR & LPDDR2 PCM memory drivers
2510 +CONFIG_MTD_SPI_NOR=m
2511 +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y
2512 +# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set
2513 +CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y
2514 +# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set
2515 +# CONFIG_SPI_INTEL_SPI_PCI is not set
2516 +# CONFIG_SPI_INTEL_SPI_PLATFORM is not set
2517 +CONFIG_MTD_UBI=m
2518 +CONFIG_MTD_UBI_WL_THRESHOLD=4096
2519 +CONFIG_MTD_UBI_BEB_LIMIT=20
2520 +CONFIG_MTD_UBI_FASTMAP=y
2521 +CONFIG_MTD_UBI_GLUEBI=m
2522 +CONFIG_MTD_UBI_BLOCK=y
2523 +CONFIG_MTD_HYPERBUS=m
2524 +# CONFIG_OF is not set
2525 +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
2526 +CONFIG_PARPORT=m
2527 +CONFIG_PARPORT_PC=m
2528 +CONFIG_PARPORT_SERIAL=m
2529 +CONFIG_PARPORT_PC_FIFO=y
2530 +# CONFIG_PARPORT_PC_SUPERIO is not set
2531 +CONFIG_PARPORT_PC_PCMCIA=m
2532 +CONFIG_PARPORT_AX88796=m
2533 +CONFIG_PARPORT_1284=y
2534 +CONFIG_PARPORT_NOT_PC=y
2535 +CONFIG_PNP=y
2536 +# CONFIG_PNP_DEBUG_MESSAGES is not set
2539 +# Protocols
2541 +CONFIG_PNPACPI=y
2542 +CONFIG_BLK_DEV=y
2543 +CONFIG_BLK_DEV_NULL_BLK=m
2544 +CONFIG_BLK_DEV_FD=m
2545 +CONFIG_CDROM=y
2546 +CONFIG_PARIDE=m
2549 +# Parallel IDE high-level drivers
2551 +CONFIG_PARIDE_PD=m
2552 +CONFIG_PARIDE_PCD=m
2553 +CONFIG_PARIDE_PF=m
2554 +CONFIG_PARIDE_PT=m
2555 +CONFIG_PARIDE_PG=m
2558 +# Parallel IDE protocol modules
2560 +CONFIG_PARIDE_ATEN=m
2561 +CONFIG_PARIDE_BPCK=m
2562 +CONFIG_PARIDE_COMM=m
2563 +CONFIG_PARIDE_DSTR=m
2564 +CONFIG_PARIDE_FIT2=m
2565 +CONFIG_PARIDE_FIT3=m
2566 +CONFIG_PARIDE_EPAT=m
2567 +CONFIG_PARIDE_EPATC8=y
2568 +CONFIG_PARIDE_EPIA=m
2569 +CONFIG_PARIDE_FRIQ=m
2570 +CONFIG_PARIDE_FRPW=m
2571 +CONFIG_PARIDE_KBIC=m
2572 +CONFIG_PARIDE_KTTI=m
2573 +CONFIG_PARIDE_ON20=m
2574 +CONFIG_PARIDE_ON26=m
2575 +CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m
2576 +CONFIG_ZRAM=m
2577 +CONFIG_ZRAM_DEF_COMP_LZORLE=y
2578 +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set
2579 +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set
2580 +# CONFIG_ZRAM_DEF_COMP_LZO is not set
2581 +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set
2582 +# CONFIG_ZRAM_DEF_COMP_842 is not set
2583 +CONFIG_ZRAM_DEF_COMP="lzo-rle"
2584 +CONFIG_ZRAM_WRITEBACK=y
2585 +CONFIG_ZRAM_MEMORY_TRACKING=y
2586 +CONFIG_BLK_DEV_UMEM=m
2587 +CONFIG_BLK_DEV_LOOP=y
2588 +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
2589 +CONFIG_BLK_DEV_CRYPTOLOOP=m
2590 +CONFIG_BLK_DEV_DRBD=m
2591 +# CONFIG_DRBD_FAULT_INJECTION is not set
2592 +CONFIG_BLK_DEV_NBD=m
2593 +CONFIG_BLK_DEV_SX8=m
2594 +CONFIG_BLK_DEV_RAM=m
2595 +CONFIG_BLK_DEV_RAM_COUNT=16
2596 +CONFIG_BLK_DEV_RAM_SIZE=65536
2597 +CONFIG_CDROM_PKTCDVD=m
2598 +CONFIG_CDROM_PKTCDVD_BUFFERS=8
2599 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set
2600 +CONFIG_ATA_OVER_ETH=m
2601 +CONFIG_XEN_BLKDEV_FRONTEND=y
2602 +CONFIG_XEN_BLKDEV_BACKEND=m
2603 +CONFIG_VIRTIO_BLK=m
2604 +CONFIG_BLK_DEV_RBD=m
2605 +CONFIG_BLK_DEV_RSXX=m
2606 +CONFIG_BLK_DEV_RNBD=y
2607 +CONFIG_BLK_DEV_RNBD_CLIENT=m
2608 +CONFIG_BLK_DEV_RNBD_SERVER=m
2611 +# NVME Support
2613 +CONFIG_NVME_CORE=m
2614 +CONFIG_BLK_DEV_NVME=m
2615 +CONFIG_NVME_MULTIPATH=y
2616 +CONFIG_NVME_HWMON=y
2617 +CONFIG_NVME_FABRICS=m
2618 +CONFIG_NVME_RDMA=m
2619 +CONFIG_NVME_FC=m
2620 +CONFIG_NVME_TCP=m
2621 +CONFIG_NVME_TARGET=m
2622 +CONFIG_NVME_TARGET_PASSTHRU=y
2623 +CONFIG_NVME_TARGET_LOOP=m
2624 +CONFIG_NVME_TARGET_RDMA=m
2625 +CONFIG_NVME_TARGET_FC=m
2626 +# CONFIG_NVME_TARGET_FCLOOP is not set
2627 +CONFIG_NVME_TARGET_TCP=m
2628 +# end of NVME Support
2631 +# Misc devices
2633 +CONFIG_SENSORS_LIS3LV02D=m
2634 +CONFIG_AD525X_DPOT=m
2635 +CONFIG_AD525X_DPOT_I2C=m
2636 +CONFIG_AD525X_DPOT_SPI=m
2637 +CONFIG_DUMMY_IRQ=m
2638 +CONFIG_IBM_ASM=m
2639 +CONFIG_PHANTOM=m
2640 +CONFIG_TIFM_CORE=m
2641 +CONFIG_TIFM_7XX1=m
2642 +CONFIG_ICS932S401=m
2643 +CONFIG_ENCLOSURE_SERVICES=m
2644 +CONFIG_SGI_XP=m
2645 +CONFIG_HP_ILO=m
2646 +CONFIG_SGI_GRU=m
2647 +# CONFIG_SGI_GRU_DEBUG is not set
2648 +CONFIG_APDS9802ALS=m
2649 +CONFIG_ISL29003=m
2650 +CONFIG_ISL29020=m
2651 +CONFIG_SENSORS_TSL2550=m
2652 +CONFIG_SENSORS_BH1770=m
2653 +CONFIG_SENSORS_APDS990X=m
2654 +CONFIG_HMC6352=m
2655 +CONFIG_DS1682=m
2656 +CONFIG_VMWARE_BALLOON=m
2657 +CONFIG_LATTICE_ECP3_CONFIG=m
2658 +CONFIG_SRAM=y
2659 +# CONFIG_PCI_ENDPOINT_TEST is not set
2660 +CONFIG_XILINX_SDFEC=m
2661 +CONFIG_MISC_RTSX=m
2662 +CONFIG_PVPANIC=m
2663 +CONFIG_C2PORT=m
2664 +CONFIG_C2PORT_DURAMAR_2150=m
2667 +# EEPROM support
2669 +CONFIG_EEPROM_AT24=m
2670 +CONFIG_EEPROM_AT25=m
2671 +CONFIG_EEPROM_LEGACY=m
2672 +CONFIG_EEPROM_MAX6875=m
2673 +CONFIG_EEPROM_93CX6=m
2674 +CONFIG_EEPROM_93XX46=m
2675 +CONFIG_EEPROM_IDT_89HPESX=m
2676 +CONFIG_EEPROM_EE1004=m
2677 +# end of EEPROM support
2679 +CONFIG_CB710_CORE=m
2680 +# CONFIG_CB710_DEBUG is not set
2681 +CONFIG_CB710_DEBUG_ASSUMPTIONS=y
2684 +# Texas Instruments shared transport line discipline
2686 +CONFIG_TI_ST=m
2687 +# end of Texas Instruments shared transport line discipline
2689 +CONFIG_SENSORS_LIS3_I2C=m
2690 +CONFIG_ALTERA_STAPL=m
2691 +CONFIG_INTEL_MEI=m
2692 +CONFIG_INTEL_MEI_ME=m
2693 +CONFIG_INTEL_MEI_TXE=m
2694 +CONFIG_INTEL_MEI_HDCP=m
2695 +CONFIG_VMWARE_VMCI=m
2696 +CONFIG_GENWQE=m
2697 +CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY=0
2698 +CONFIG_ECHO=m
2699 +CONFIG_BCM_VK=m
2700 +CONFIG_BCM_VK_TTY=y
2701 +CONFIG_MISC_ALCOR_PCI=m
2702 +CONFIG_MISC_RTSX_PCI=m
2703 +CONFIG_MISC_RTSX_USB=m
2704 +CONFIG_HABANA_AI=m
2705 +CONFIG_UACCE=m
2706 +# end of Misc devices
2708 +CONFIG_HAVE_IDE=y
2709 +# CONFIG_IDE is not set
2712 +# SCSI device support
2714 +CONFIG_SCSI_MOD=y
2715 +CONFIG_RAID_ATTRS=m
2716 +CONFIG_SCSI=y
2717 +CONFIG_SCSI_DMA=y
2718 +CONFIG_SCSI_NETLINK=y
2719 +CONFIG_SCSI_PROC_FS=y
2722 +# SCSI support type (disk, tape, CD-ROM)
2724 +CONFIG_BLK_DEV_SD=y
2725 +CONFIG_CHR_DEV_ST=m
2726 +CONFIG_BLK_DEV_SR=y
2727 +CONFIG_CHR_DEV_SG=y
2728 +CONFIG_CHR_DEV_SCH=m
2729 +CONFIG_SCSI_ENCLOSURE=m
2730 +CONFIG_SCSI_CONSTANTS=y
2731 +CONFIG_SCSI_LOGGING=y
2732 +CONFIG_SCSI_SCAN_ASYNC=y
2735 +# SCSI Transports
2737 +CONFIG_SCSI_SPI_ATTRS=m
2738 +CONFIG_SCSI_FC_ATTRS=m
2739 +CONFIG_SCSI_ISCSI_ATTRS=m
2740 +CONFIG_SCSI_SAS_ATTRS=m
2741 +CONFIG_SCSI_SAS_LIBSAS=m
2742 +CONFIG_SCSI_SAS_ATA=y
2743 +CONFIG_SCSI_SAS_HOST_SMP=y
2744 +CONFIG_SCSI_SRP_ATTRS=m
2745 +# end of SCSI Transports
2747 +CONFIG_SCSI_LOWLEVEL=y
2748 +CONFIG_ISCSI_TCP=m
2749 +CONFIG_ISCSI_BOOT_SYSFS=m
2750 +CONFIG_SCSI_CXGB3_ISCSI=m
2751 +CONFIG_SCSI_CXGB4_ISCSI=m
2752 +CONFIG_SCSI_BNX2_ISCSI=m
2753 +CONFIG_SCSI_BNX2X_FCOE=m
2754 +CONFIG_BE2ISCSI=m
2755 +CONFIG_BLK_DEV_3W_XXXX_RAID=m
2756 +CONFIG_SCSI_HPSA=m
2757 +CONFIG_SCSI_3W_9XXX=m
2758 +CONFIG_SCSI_3W_SAS=m
2759 +CONFIG_SCSI_ACARD=m
2760 +CONFIG_SCSI_AACRAID=m
2761 +CONFIG_SCSI_AIC7XXX=m
2762 +CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
2763 +CONFIG_AIC7XXX_RESET_DELAY_MS=5000
2764 +# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
2765 +CONFIG_AIC7XXX_DEBUG_MASK=0
2766 +CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
2767 +CONFIG_SCSI_AIC79XX=m
2768 +CONFIG_AIC79XX_CMDS_PER_DEVICE=32
2769 +CONFIG_AIC79XX_RESET_DELAY_MS=5000
2770 +# CONFIG_AIC79XX_DEBUG_ENABLE is not set
2771 +CONFIG_AIC79XX_DEBUG_MASK=0
2772 +CONFIG_AIC79XX_REG_PRETTY_PRINT=y
2773 +CONFIG_SCSI_AIC94XX=m
2774 +# CONFIG_AIC94XX_DEBUG is not set
2775 +CONFIG_SCSI_MVSAS=m
2776 +# CONFIG_SCSI_MVSAS_DEBUG is not set
2777 +# CONFIG_SCSI_MVSAS_TASKLET is not set
2778 +CONFIG_SCSI_MVUMI=m
2779 +CONFIG_SCSI_DPT_I2O=m
2780 +CONFIG_SCSI_ADVANSYS=m
2781 +CONFIG_SCSI_ARCMSR=m
2782 +CONFIG_SCSI_ESAS2R=m
2783 +CONFIG_MEGARAID_NEWGEN=y
2784 +CONFIG_MEGARAID_MM=m
2785 +CONFIG_MEGARAID_MAILBOX=m
2786 +CONFIG_MEGARAID_LEGACY=m
2787 +CONFIG_MEGARAID_SAS=m
2788 +CONFIG_SCSI_MPT3SAS=m
2789 +CONFIG_SCSI_MPT2SAS_MAX_SGE=128
2790 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128
2791 +CONFIG_SCSI_MPT2SAS=m
2792 +CONFIG_SCSI_SMARTPQI=m
2793 +CONFIG_SCSI_UFSHCD=m
2794 +CONFIG_SCSI_UFSHCD_PCI=m
2795 +CONFIG_SCSI_UFS_DWC_TC_PCI=m
2796 +CONFIG_SCSI_UFSHCD_PLATFORM=m
2797 +CONFIG_SCSI_UFS_CDNS_PLATFORM=m
2798 +CONFIG_SCSI_UFS_DWC_TC_PLATFORM=m
2799 +CONFIG_SCSI_UFS_BSG=y
2800 +CONFIG_SCSI_UFS_CRYPTO=y
2801 +CONFIG_SCSI_HPTIOP=m
2802 +CONFIG_SCSI_BUSLOGIC=m
2803 +CONFIG_SCSI_FLASHPOINT=y
2804 +CONFIG_SCSI_MYRB=m
2805 +CONFIG_SCSI_MYRS=m
2806 +CONFIG_VMWARE_PVSCSI=m
2807 +CONFIG_XEN_SCSI_FRONTEND=m
2808 +CONFIG_HYPERV_STORAGE=m
2809 +CONFIG_LIBFC=m
2810 +CONFIG_LIBFCOE=m
2811 +CONFIG_FCOE=m
2812 +CONFIG_FCOE_FNIC=m
2813 +CONFIG_SCSI_SNIC=m
2814 +# CONFIG_SCSI_SNIC_DEBUG_FS is not set
2815 +CONFIG_SCSI_DMX3191D=m
2816 +CONFIG_SCSI_FDOMAIN=m
2817 +CONFIG_SCSI_FDOMAIN_PCI=m
2818 +CONFIG_SCSI_ISCI=m
2819 +CONFIG_SCSI_IPS=m
2820 +CONFIG_SCSI_INITIO=m
2821 +CONFIG_SCSI_INIA100=m
2822 +CONFIG_SCSI_PPA=m
2823 +CONFIG_SCSI_IMM=m
2824 +# CONFIG_SCSI_IZIP_EPP16 is not set
2825 +# CONFIG_SCSI_IZIP_SLOW_CTR is not set
2826 +CONFIG_SCSI_STEX=m
2827 +CONFIG_SCSI_SYM53C8XX_2=m
2828 +CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
2829 +CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
2830 +CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
2831 +CONFIG_SCSI_SYM53C8XX_MMIO=y
2832 +CONFIG_SCSI_IPR=m
2833 +CONFIG_SCSI_IPR_TRACE=y
2834 +CONFIG_SCSI_IPR_DUMP=y
2835 +CONFIG_SCSI_QLOGIC_1280=m
2836 +CONFIG_SCSI_QLA_FC=m
2837 +CONFIG_TCM_QLA2XXX=m
2838 +# CONFIG_TCM_QLA2XXX_DEBUG is not set
2839 +CONFIG_SCSI_QLA_ISCSI=m
2840 +CONFIG_QEDI=m
2841 +CONFIG_QEDF=m
2842 +CONFIG_SCSI_LPFC=m
2843 +# CONFIG_SCSI_LPFC_DEBUG_FS is not set
2844 +CONFIG_SCSI_DC395x=m
2845 +CONFIG_SCSI_AM53C974=m
2846 +CONFIG_SCSI_WD719X=m
2847 +CONFIG_SCSI_DEBUG=m
2848 +CONFIG_SCSI_PMCRAID=m
2849 +CONFIG_SCSI_PM8001=m
2850 +CONFIG_SCSI_BFA_FC=m
2851 +CONFIG_SCSI_VIRTIO=m
2852 +CONFIG_SCSI_CHELSIO_FCOE=m
2853 +CONFIG_SCSI_LOWLEVEL_PCMCIA=y
2854 +CONFIG_PCMCIA_AHA152X=m
2855 +CONFIG_PCMCIA_FDOMAIN=m
2856 +CONFIG_PCMCIA_QLOGIC=m
2857 +CONFIG_PCMCIA_SYM53C500=m
2858 +CONFIG_SCSI_DH=y
2859 +CONFIG_SCSI_DH_RDAC=m
2860 +CONFIG_SCSI_DH_HP_SW=m
2861 +CONFIG_SCSI_DH_EMC=m
2862 +CONFIG_SCSI_DH_ALUA=m
2863 +# end of SCSI device support
2865 +CONFIG_ATA=y
2866 +CONFIG_SATA_HOST=y
2867 +CONFIG_PATA_TIMINGS=y
2868 +CONFIG_ATA_VERBOSE_ERROR=y
2869 +CONFIG_ATA_FORCE=y
2870 +CONFIG_ATA_ACPI=y
2871 +CONFIG_SATA_ZPODD=y
2872 +CONFIG_SATA_PMP=y
2875 +# Controllers with non-SFF native interface
2877 +CONFIG_SATA_AHCI=m
2878 +CONFIG_SATA_MOBILE_LPM_POLICY=3
2879 +CONFIG_SATA_AHCI_PLATFORM=m
2880 +CONFIG_SATA_INIC162X=m
2881 +CONFIG_SATA_ACARD_AHCI=m
2882 +CONFIG_SATA_SIL24=m
2883 +CONFIG_ATA_SFF=y
2886 +# SFF controllers with custom DMA interface
2888 +CONFIG_PDC_ADMA=m
2889 +CONFIG_SATA_QSTOR=m
2890 +CONFIG_SATA_SX4=m
2891 +CONFIG_ATA_BMDMA=y
2894 +# SATA SFF controllers with BMDMA
2896 +CONFIG_ATA_PIIX=y
2897 +CONFIG_SATA_DWC=m
2898 +CONFIG_SATA_DWC_OLD_DMA=y
2899 +# CONFIG_SATA_DWC_DEBUG is not set
2900 +CONFIG_SATA_MV=m
2901 +CONFIG_SATA_NV=m
2902 +CONFIG_SATA_PROMISE=m
2903 +CONFIG_SATA_SIL=m
2904 +CONFIG_SATA_SIS=m
2905 +CONFIG_SATA_SVW=m
2906 +CONFIG_SATA_ULI=m
2907 +CONFIG_SATA_VIA=m
2908 +CONFIG_SATA_VITESSE=m
2911 +# PATA SFF controllers with BMDMA
2913 +CONFIG_PATA_ALI=m
2914 +CONFIG_PATA_AMD=m
2915 +CONFIG_PATA_ARTOP=m
2916 +CONFIG_PATA_ATIIXP=m
2917 +CONFIG_PATA_ATP867X=m
2918 +CONFIG_PATA_CMD64X=m
2919 +CONFIG_PATA_CYPRESS=m
2920 +CONFIG_PATA_EFAR=m
2921 +CONFIG_PATA_HPT366=m
2922 +CONFIG_PATA_HPT37X=m
2923 +CONFIG_PATA_HPT3X2N=m
2924 +CONFIG_PATA_HPT3X3=m
2925 +# CONFIG_PATA_HPT3X3_DMA is not set
2926 +CONFIG_PATA_IT8213=m
2927 +CONFIG_PATA_IT821X=m
2928 +CONFIG_PATA_JMICRON=m
2929 +CONFIG_PATA_MARVELL=m
2930 +CONFIG_PATA_NETCELL=m
2931 +CONFIG_PATA_NINJA32=m
2932 +CONFIG_PATA_NS87415=m
2933 +CONFIG_PATA_OLDPIIX=m
2934 +CONFIG_PATA_OPTIDMA=m
2935 +CONFIG_PATA_PDC2027X=m
2936 +CONFIG_PATA_PDC_OLD=m
2937 +CONFIG_PATA_RADISYS=m
2938 +CONFIG_PATA_RDC=m
2939 +CONFIG_PATA_SCH=m
2940 +CONFIG_PATA_SERVERWORKS=m
2941 +CONFIG_PATA_SIL680=m
2942 +CONFIG_PATA_SIS=y
2943 +CONFIG_PATA_TOSHIBA=m
2944 +CONFIG_PATA_TRIFLEX=m
2945 +CONFIG_PATA_VIA=m
2946 +CONFIG_PATA_WINBOND=m
2949 +# PIO-only SFF controllers
2951 +CONFIG_PATA_CMD640_PCI=m
2952 +CONFIG_PATA_MPIIX=m
2953 +CONFIG_PATA_NS87410=m
2954 +CONFIG_PATA_OPTI=m
2955 +CONFIG_PATA_PCMCIA=m
2956 +CONFIG_PATA_PLATFORM=m
2957 +CONFIG_PATA_RZ1000=m
2960 +# Generic fallback / legacy drivers
2962 +CONFIG_PATA_ACPI=m
2963 +CONFIG_ATA_GENERIC=y
2964 +CONFIG_PATA_LEGACY=m
2965 +CONFIG_MD=y
2966 +CONFIG_BLK_DEV_MD=y
2967 +CONFIG_MD_AUTODETECT=y
2968 +CONFIG_MD_LINEAR=m
2969 +CONFIG_MD_RAID0=m
2970 +CONFIG_MD_RAID1=m
2971 +CONFIG_MD_RAID10=m
2972 +CONFIG_MD_RAID456=m
2973 +CONFIG_MD_MULTIPATH=m
2974 +CONFIG_MD_FAULTY=m
2975 +CONFIG_MD_CLUSTER=m
2976 +CONFIG_BCACHE=m
2977 +# CONFIG_BCACHE_DEBUG is not set
2978 +# CONFIG_BCACHE_CLOSURES_DEBUG is not set
2979 +CONFIG_BCACHE_ASYNC_REGISTRATION=y
2980 +CONFIG_BLK_DEV_DM_BUILTIN=y
2981 +CONFIG_BLK_DEV_DM=y
2982 +# CONFIG_DM_DEBUG is not set
2983 +CONFIG_DM_BUFIO=m
2984 +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set
2985 +CONFIG_DM_BIO_PRISON=m
2986 +CONFIG_DM_PERSISTENT_DATA=m
2987 +CONFIG_DM_UNSTRIPED=m
2988 +CONFIG_DM_CRYPT=m
2989 +CONFIG_DM_SNAPSHOT=m
2990 +CONFIG_DM_THIN_PROVISIONING=m
2991 +CONFIG_DM_CACHE=m
2992 +CONFIG_DM_CACHE_SMQ=m
2993 +CONFIG_DM_WRITECACHE=m
2994 +CONFIG_DM_EBS=m
2995 +CONFIG_DM_ERA=m
2996 +CONFIG_DM_CLONE=m
2997 +CONFIG_DM_MIRROR=m
2998 +CONFIG_DM_LOG_USERSPACE=m
2999 +CONFIG_DM_RAID=m
3000 +CONFIG_DM_ZERO=m
3001 +CONFIG_DM_MULTIPATH=m
3002 +CONFIG_DM_MULTIPATH_QL=m
3003 +CONFIG_DM_MULTIPATH_ST=m
3004 +CONFIG_DM_MULTIPATH_HST=m
3005 +CONFIG_DM_MULTIPATH_IOA=m
3006 +CONFIG_DM_DELAY=m
3007 +# CONFIG_DM_DUST is not set
3008 +CONFIG_DM_INIT=y
3009 +CONFIG_DM_UEVENT=y
3010 +CONFIG_DM_FLAKEY=m
3011 +CONFIG_DM_VERITY=m
3012 +CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
3013 +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG_SECONDARY_KEYRING is not set
3014 +# CONFIG_DM_VERITY_FEC is not set
3015 +CONFIG_DM_SWITCH=m
3016 +CONFIG_DM_LOG_WRITES=m
3017 +CONFIG_DM_INTEGRITY=m
3018 +CONFIG_DM_ZONED=m
3019 +CONFIG_TARGET_CORE=m
3020 +CONFIG_TCM_IBLOCK=m
3021 +CONFIG_TCM_FILEIO=m
3022 +CONFIG_TCM_PSCSI=m
3023 +CONFIG_TCM_USER2=m
3024 +CONFIG_LOOPBACK_TARGET=m
3025 +CONFIG_TCM_FC=m
3026 +CONFIG_ISCSI_TARGET=m
3027 +CONFIG_ISCSI_TARGET_CXGB4=m
3028 +CONFIG_SBP_TARGET=m
3029 +CONFIG_FUSION=y
3030 +CONFIG_FUSION_SPI=m
3031 +CONFIG_FUSION_FC=m
3032 +CONFIG_FUSION_SAS=m
3033 +CONFIG_FUSION_MAX_SGE=128
3034 +CONFIG_FUSION_CTL=m
3035 +CONFIG_FUSION_LAN=m
3036 +CONFIG_FUSION_LOGGING=y
3039 +# IEEE 1394 (FireWire) support
3041 +CONFIG_FIREWIRE=m
3042 +CONFIG_FIREWIRE_OHCI=m
3043 +CONFIG_FIREWIRE_SBP2=m
3044 +CONFIG_FIREWIRE_NET=m
3045 +CONFIG_FIREWIRE_NOSY=m
3046 +# end of IEEE 1394 (FireWire) support
3048 +CONFIG_MACINTOSH_DRIVERS=y
3049 +CONFIG_MAC_EMUMOUSEBTN=m
3050 +CONFIG_NETDEVICES=y
3051 +CONFIG_MII=m
3052 +CONFIG_NET_CORE=y
3053 +CONFIG_BONDING=m
3054 +CONFIG_DUMMY=m
3055 +CONFIG_WIREGUARD=m
3056 +# CONFIG_WIREGUARD_DEBUG is not set
3057 +CONFIG_EQUALIZER=m
3058 +CONFIG_NET_FC=y
3059 +CONFIG_IFB=m
3060 +CONFIG_NET_TEAM=m
3061 +CONFIG_NET_TEAM_MODE_BROADCAST=m
3062 +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
3063 +CONFIG_NET_TEAM_MODE_RANDOM=m
3064 +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
3065 +CONFIG_NET_TEAM_MODE_LOADBALANCE=m
3066 +CONFIG_MACVLAN=m
3067 +CONFIG_MACVTAP=m
3068 +CONFIG_IPVLAN_L3S=y
3069 +CONFIG_IPVLAN=m
3070 +CONFIG_IPVTAP=m
3071 +CONFIG_VXLAN=m
3072 +CONFIG_GENEVE=m
3073 +CONFIG_BAREUDP=m
3074 +CONFIG_GTP=m
3075 +CONFIG_MACSEC=m
3076 +CONFIG_NETCONSOLE=m
3077 +CONFIG_NETCONSOLE_DYNAMIC=y
3078 +CONFIG_NETPOLL=y
3079 +CONFIG_NET_POLL_CONTROLLER=y
3080 +CONFIG_NTB_NETDEV=m
3081 +CONFIG_RIONET=m
3082 +CONFIG_RIONET_TX_SIZE=128
3083 +CONFIG_RIONET_RX_SIZE=128
3084 +CONFIG_TUN=y
3085 +CONFIG_TAP=m
3086 +# CONFIG_TUN_VNET_CROSS_LE is not set
3087 +CONFIG_VETH=m
3088 +CONFIG_VIRTIO_NET=m
3089 +CONFIG_NLMON=m
3090 +CONFIG_NET_VRF=m
3091 +CONFIG_VSOCKMON=m
3092 +CONFIG_MHI_NET=m
3093 +CONFIG_SUNGEM_PHY=m
3094 +CONFIG_ARCNET=m
3095 +CONFIG_ARCNET_1201=m
3096 +CONFIG_ARCNET_1051=m
3097 +CONFIG_ARCNET_RAW=m
3098 +CONFIG_ARCNET_CAP=m
3099 +CONFIG_ARCNET_COM90xx=m
3100 +CONFIG_ARCNET_COM90xxIO=m
3101 +CONFIG_ARCNET_RIM_I=m
3102 +CONFIG_ARCNET_COM20020=m
3103 +CONFIG_ARCNET_COM20020_PCI=m
3104 +CONFIG_ARCNET_COM20020_CS=m
3105 +CONFIG_ATM_DRIVERS=y
3106 +CONFIG_ATM_DUMMY=m
3107 +CONFIG_ATM_TCP=m
3108 +CONFIG_ATM_LANAI=m
3109 +CONFIG_ATM_ENI=m
3110 +# CONFIG_ATM_ENI_DEBUG is not set
3111 +# CONFIG_ATM_ENI_TUNE_BURST is not set
3112 +CONFIG_ATM_FIRESTREAM=m
3113 +CONFIG_ATM_ZATM=m
3114 +# CONFIG_ATM_ZATM_DEBUG is not set
3115 +CONFIG_ATM_NICSTAR=m
3116 +# CONFIG_ATM_NICSTAR_USE_SUNI is not set
3117 +# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set
3118 +CONFIG_ATM_IDT77252=m
3119 +# CONFIG_ATM_IDT77252_DEBUG is not set
3120 +# CONFIG_ATM_IDT77252_RCV_ALL is not set
3121 +CONFIG_ATM_IDT77252_USE_SUNI=y
3122 +CONFIG_ATM_AMBASSADOR=m
3123 +# CONFIG_ATM_AMBASSADOR_DEBUG is not set
3124 +CONFIG_ATM_HORIZON=m
3125 +# CONFIG_ATM_HORIZON_DEBUG is not set
3126 +CONFIG_ATM_IA=m
3127 +# CONFIG_ATM_IA_DEBUG is not set
3128 +CONFIG_ATM_FORE200E=m
3129 +# CONFIG_ATM_FORE200E_USE_TASKLET is not set
3130 +CONFIG_ATM_FORE200E_TX_RETRY=16
3131 +CONFIG_ATM_FORE200E_DEBUG=0
3132 +CONFIG_ATM_HE=m
3133 +CONFIG_ATM_HE_USE_SUNI=y
3134 +CONFIG_ATM_SOLOS=m
3135 +CONFIG_CAIF_DRIVERS=y
3136 +CONFIG_CAIF_TTY=m
3137 +CONFIG_CAIF_HSI=m
3138 +CONFIG_CAIF_VIRTIO=m
3141 +# Distributed Switch Architecture drivers
3143 +CONFIG_B53=m
3144 +CONFIG_B53_SPI_DRIVER=m
3145 +CONFIG_B53_MDIO_DRIVER=m
3146 +CONFIG_B53_MMAP_DRIVER=m
3147 +CONFIG_B53_SRAB_DRIVER=m
3148 +CONFIG_B53_SERDES=m
3149 +CONFIG_NET_DSA_BCM_SF2=m
3150 +# CONFIG_NET_DSA_LOOP is not set
3151 +CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK=m
3152 +CONFIG_NET_DSA_LANTIQ_GSWIP=m
3153 +CONFIG_NET_DSA_MT7530=m
3154 +CONFIG_NET_DSA_MV88E6060=m
3155 +CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON=m
3156 +CONFIG_NET_DSA_MICROCHIP_KSZ9477=m
3157 +CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C=m
3158 +CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI=m
3159 +CONFIG_NET_DSA_MICROCHIP_KSZ8795=m
3160 +CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI=m
3161 +CONFIG_NET_DSA_MV88E6XXX=m
3162 +CONFIG_NET_DSA_MV88E6XXX_PTP=y
3163 +CONFIG_NET_DSA_MSCC_SEVILLE=m
3164 +CONFIG_NET_DSA_AR9331=m
3165 +CONFIG_NET_DSA_SJA1105=m
3166 +CONFIG_NET_DSA_SJA1105_PTP=y
3167 +CONFIG_NET_DSA_SJA1105_TAS=y
3168 +CONFIG_NET_DSA_SJA1105_VL=y
3169 +CONFIG_NET_DSA_XRS700X=m
3170 +CONFIG_NET_DSA_XRS700X_I2C=m
3171 +CONFIG_NET_DSA_XRS700X_MDIO=m
3172 +CONFIG_NET_DSA_QCA8K=m
3173 +CONFIG_NET_DSA_REALTEK_SMI=m
3174 +CONFIG_NET_DSA_SMSC_LAN9303=m
3175 +CONFIG_NET_DSA_SMSC_LAN9303_I2C=m
3176 +CONFIG_NET_DSA_SMSC_LAN9303_MDIO=m
3177 +CONFIG_NET_DSA_VITESSE_VSC73XX=m
3178 +CONFIG_NET_DSA_VITESSE_VSC73XX_SPI=m
3179 +CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM=m
3180 +# end of Distributed Switch Architecture drivers
3182 +CONFIG_ETHERNET=y
3183 +CONFIG_MDIO=m
3184 +CONFIG_NET_VENDOR_3COM=y
3185 +CONFIG_PCMCIA_3C574=m
3186 +CONFIG_PCMCIA_3C589=m
3187 +CONFIG_VORTEX=m
3188 +CONFIG_TYPHOON=m
3189 +CONFIG_NET_VENDOR_ADAPTEC=y
3190 +CONFIG_ADAPTEC_STARFIRE=m
3191 +CONFIG_NET_VENDOR_AGERE=y
3192 +CONFIG_ET131X=m
3193 +CONFIG_NET_VENDOR_ALACRITECH=y
3194 +CONFIG_SLICOSS=m
3195 +CONFIG_NET_VENDOR_ALTEON=y
3196 +CONFIG_ACENIC=m
3197 +# CONFIG_ACENIC_OMIT_TIGON_I is not set
3198 +CONFIG_ALTERA_TSE=m
3199 +CONFIG_NET_VENDOR_AMAZON=y
3200 +CONFIG_ENA_ETHERNET=m
3201 +CONFIG_NET_VENDOR_AMD=y
3202 +CONFIG_AMD8111_ETH=m
3203 +CONFIG_PCNET32=m
3204 +CONFIG_PCMCIA_NMCLAN=m
3205 +CONFIG_AMD_XGBE=m
3206 +CONFIG_AMD_XGBE_DCB=y
3207 +CONFIG_AMD_XGBE_HAVE_ECC=y
3208 +CONFIG_NET_VENDOR_AQUANTIA=y
3209 +CONFIG_AQTION=m
3210 +CONFIG_NET_VENDOR_ARC=y
3211 +CONFIG_NET_VENDOR_ATHEROS=y
3212 +CONFIG_ATL2=m
3213 +CONFIG_ATL1=m
3214 +CONFIG_ATL1E=m
3215 +CONFIG_ATL1C=m
3216 +CONFIG_ALX=m
3217 +CONFIG_NET_VENDOR_BROADCOM=y
3218 +CONFIG_B44=m
3219 +CONFIG_B44_PCI_AUTOSELECT=y
3220 +CONFIG_B44_PCICORE_AUTOSELECT=y
3221 +CONFIG_B44_PCI=y
3222 +CONFIG_BCMGENET=m
3223 +CONFIG_BNX2=m
3224 +CONFIG_CNIC=m
3225 +CONFIG_TIGON3=m
3226 +CONFIG_TIGON3_HWMON=y
3227 +CONFIG_BNX2X=m
3228 +CONFIG_BNX2X_SRIOV=y
3229 +CONFIG_SYSTEMPORT=m
3230 +CONFIG_BNXT=m
3231 +CONFIG_BNXT_SRIOV=y
3232 +CONFIG_BNXT_FLOWER_OFFLOAD=y
3233 +CONFIG_BNXT_DCB=y
3234 +CONFIG_BNXT_HWMON=y
3235 +CONFIG_NET_VENDOR_BROCADE=y
3236 +CONFIG_BNA=m
3237 +CONFIG_NET_VENDOR_CADENCE=y
3238 +CONFIG_MACB=m
3239 +CONFIG_MACB_USE_HWSTAMP=y
3240 +CONFIG_MACB_PCI=m
3241 +CONFIG_NET_VENDOR_CAVIUM=y
3242 +CONFIG_THUNDER_NIC_PF=m
3243 +CONFIG_THUNDER_NIC_VF=m
3244 +CONFIG_THUNDER_NIC_BGX=m
3245 +CONFIG_THUNDER_NIC_RGX=m
3246 +CONFIG_CAVIUM_PTP=m
3247 +CONFIG_LIQUIDIO=m
3248 +CONFIG_LIQUIDIO_VF=m
3249 +CONFIG_NET_VENDOR_CHELSIO=y
3250 +CONFIG_CHELSIO_T1=m
3251 +CONFIG_CHELSIO_T1_1G=y
3252 +CONFIG_CHELSIO_T3=m
3253 +CONFIG_CHELSIO_T4=m
3254 +CONFIG_CHELSIO_T4_DCB=y
3255 +CONFIG_CHELSIO_T4_FCOE=y
3256 +CONFIG_CHELSIO_T4VF=m
3257 +CONFIG_CHELSIO_LIB=m
3258 +CONFIG_CHELSIO_INLINE_CRYPTO=y
3259 +CONFIG_CHELSIO_IPSEC_INLINE=m
3260 +CONFIG_CHELSIO_TLS_DEVICE=m
3261 +CONFIG_NET_VENDOR_CISCO=y
3262 +CONFIG_ENIC=m
3263 +CONFIG_NET_VENDOR_CORTINA=y
3264 +CONFIG_CX_ECAT=m
3265 +CONFIG_DNET=m
3266 +CONFIG_NET_VENDOR_DEC=y
3267 +CONFIG_NET_TULIP=y
3268 +CONFIG_DE2104X=m
3269 +CONFIG_DE2104X_DSL=0
3270 +CONFIG_TULIP=m
3271 +# CONFIG_TULIP_MWI is not set
3272 +# CONFIG_TULIP_MMIO is not set
3273 +# CONFIG_TULIP_NAPI is not set
3274 +CONFIG_DE4X5=m
3275 +CONFIG_WINBOND_840=m
3276 +CONFIG_DM9102=m
3277 +CONFIG_ULI526X=m
3278 +CONFIG_PCMCIA_XIRCOM=m
3279 +CONFIG_NET_VENDOR_DLINK=y
3280 +CONFIG_DL2K=m
3281 +CONFIG_SUNDANCE=m
3282 +# CONFIG_SUNDANCE_MMIO is not set
3283 +CONFIG_NET_VENDOR_EMULEX=y
3284 +CONFIG_BE2NET=m
3285 +CONFIG_BE2NET_HWMON=y
3286 +CONFIG_BE2NET_BE2=y
3287 +CONFIG_BE2NET_BE3=y
3288 +CONFIG_BE2NET_LANCER=y
3289 +CONFIG_BE2NET_SKYHAWK=y
3290 +CONFIG_NET_VENDOR_EZCHIP=y
3291 +CONFIG_NET_VENDOR_FUJITSU=y
3292 +CONFIG_PCMCIA_FMVJ18X=m
3293 +CONFIG_NET_VENDOR_GOOGLE=y
3294 +CONFIG_GVE=m
3295 +CONFIG_NET_VENDOR_HUAWEI=y
3296 +CONFIG_HINIC=m
3297 +CONFIG_NET_VENDOR_I825XX=y
3298 +CONFIG_NET_VENDOR_INTEL=y
3299 +CONFIG_E100=m
3300 +CONFIG_E1000=m
3301 +CONFIG_E1000E=m
3302 +CONFIG_E1000E_HWTS=y
3303 +CONFIG_IGB=m
3304 +CONFIG_IGB_HWMON=y
3305 +CONFIG_IGB_DCA=y
3306 +CONFIG_IGBVF=m
3307 +CONFIG_IXGB=m
3308 +CONFIG_IXGBE=m
3309 +CONFIG_IXGBE_HWMON=y
3310 +CONFIG_IXGBE_DCA=y
3311 +CONFIG_IXGBE_DCB=y
3312 +CONFIG_IXGBE_IPSEC=y
3313 +CONFIG_IXGBEVF=m
3314 +CONFIG_IXGBEVF_IPSEC=y
3315 +CONFIG_I40E=m
3316 +CONFIG_I40E_DCB=y
3317 +CONFIG_IAVF=m
3318 +CONFIG_I40EVF=m
3319 +CONFIG_ICE=m
3320 +CONFIG_FM10K=m
3321 +CONFIG_IGC=m
3322 +CONFIG_JME=m
3323 +CONFIG_NET_VENDOR_MARVELL=y
3324 +CONFIG_MVMDIO=m
3325 +CONFIG_SKGE=m
3326 +# CONFIG_SKGE_DEBUG is not set
3327 +CONFIG_SKGE_GENESIS=y
3328 +CONFIG_SKY2=m
3329 +# CONFIG_SKY2_DEBUG is not set
3330 +CONFIG_PRESTERA=m
3331 +CONFIG_PRESTERA_PCI=m
3332 +CONFIG_NET_VENDOR_MELLANOX=y
3333 +CONFIG_MLX4_EN=m
3334 +CONFIG_MLX4_EN_DCB=y
3335 +CONFIG_MLX4_CORE=m
3336 +CONFIG_MLX4_DEBUG=y
3337 +CONFIG_MLX4_CORE_GEN2=y
3338 +CONFIG_MLX5_CORE=m
3339 +CONFIG_MLX5_ACCEL=y
3340 +CONFIG_MLX5_FPGA=y
3341 +CONFIG_MLX5_CORE_EN=y
3342 +CONFIG_MLX5_EN_ARFS=y
3343 +CONFIG_MLX5_EN_RXNFC=y
3344 +CONFIG_MLX5_MPFS=y
3345 +CONFIG_MLX5_ESWITCH=y
3346 +CONFIG_MLX5_CLS_ACT=y
3347 +CONFIG_MLX5_TC_CT=y
3348 +CONFIG_MLX5_CORE_EN_DCB=y
3349 +CONFIG_MLX5_CORE_IPOIB=y
3350 +CONFIG_MLX5_FPGA_IPSEC=y
3351 +CONFIG_MLX5_IPSEC=y
3352 +CONFIG_MLX5_EN_IPSEC=y
3353 +CONFIG_MLX5_FPGA_TLS=y
3354 +CONFIG_MLX5_TLS=y
3355 +CONFIG_MLX5_EN_TLS=y
3356 +CONFIG_MLX5_SW_STEERING=y
3357 +CONFIG_MLX5_SF=y
3358 +CONFIG_MLX5_SF_MANAGER=y
3359 +CONFIG_MLXSW_CORE=m
3360 +CONFIG_MLXSW_CORE_HWMON=y
3361 +CONFIG_MLXSW_CORE_THERMAL=y
3362 +CONFIG_MLXSW_PCI=m
3363 +CONFIG_MLXSW_I2C=m
3364 +CONFIG_MLXSW_SWITCHIB=m
3365 +CONFIG_MLXSW_SWITCHX2=m
3366 +CONFIG_MLXSW_SPECTRUM=m
3367 +CONFIG_MLXSW_SPECTRUM_DCB=y
3368 +CONFIG_MLXSW_MINIMAL=m
3369 +CONFIG_MLXFW=m
3370 +CONFIG_NET_VENDOR_MICREL=y
3371 +CONFIG_KS8842=m
3372 +CONFIG_KS8851=m
3373 +CONFIG_KS8851_MLL=m
3374 +CONFIG_KSZ884X_PCI=m
3375 +CONFIG_NET_VENDOR_MICROCHIP=y
3376 +CONFIG_ENC28J60=m
3377 +# CONFIG_ENC28J60_WRITEVERIFY is not set
3378 +CONFIG_ENCX24J600=m
3379 +CONFIG_LAN743X=m
3380 +CONFIG_NET_VENDOR_MICROSEMI=y
3381 +CONFIG_MSCC_OCELOT_SWITCH_LIB=m
3382 +CONFIG_NET_VENDOR_MYRI=y
3383 +CONFIG_MYRI10GE=m
3384 +CONFIG_MYRI10GE_DCA=y
3385 +CONFIG_FEALNX=m
3386 +CONFIG_NET_VENDOR_NATSEMI=y
3387 +CONFIG_NATSEMI=m
3388 +CONFIG_NS83820=m
3389 +CONFIG_NET_VENDOR_NETERION=y
3390 +CONFIG_S2IO=m
3391 +CONFIG_VXGE=m
3392 +# CONFIG_VXGE_DEBUG_TRACE_ALL is not set
3393 +CONFIG_NET_VENDOR_NETRONOME=y
3394 +CONFIG_NFP=m
3395 +CONFIG_NFP_APP_FLOWER=y
3396 +CONFIG_NFP_APP_ABM_NIC=y
3397 +# CONFIG_NFP_DEBUG is not set
3398 +CONFIG_NET_VENDOR_NI=y
3399 +CONFIG_NI_XGE_MANAGEMENT_ENET=m
3400 +CONFIG_NET_VENDOR_8390=y
3401 +CONFIG_PCMCIA_AXNET=m
3402 +CONFIG_NE2K_PCI=m
3403 +CONFIG_PCMCIA_PCNET=m
3404 +CONFIG_NET_VENDOR_NVIDIA=y
3405 +CONFIG_FORCEDETH=m
3406 +CONFIG_NET_VENDOR_OKI=y
3407 +CONFIG_ETHOC=m
3408 +CONFIG_NET_VENDOR_PACKET_ENGINES=y
3409 +CONFIG_HAMACHI=m
3410 +CONFIG_YELLOWFIN=m
3411 +CONFIG_NET_VENDOR_PENSANDO=y
3412 +CONFIG_IONIC=m
3413 +CONFIG_NET_VENDOR_QLOGIC=y
3414 +CONFIG_QLA3XXX=m
3415 +CONFIG_QLCNIC=m
3416 +CONFIG_QLCNIC_SRIOV=y
3417 +CONFIG_QLCNIC_DCB=y
3418 +CONFIG_QLCNIC_HWMON=y
3419 +CONFIG_NETXEN_NIC=m
3420 +CONFIG_QED=m
3421 +CONFIG_QED_LL2=y
3422 +CONFIG_QED_SRIOV=y
3423 +CONFIG_QEDE=m
3424 +CONFIG_QED_RDMA=y
3425 +CONFIG_QED_ISCSI=y
3426 +CONFIG_QED_FCOE=y
3427 +CONFIG_QED_OOO=y
3428 +CONFIG_NET_VENDOR_QUALCOMM=y
3429 +CONFIG_QCOM_EMAC=m
3430 +CONFIG_RMNET=m
3431 +CONFIG_NET_VENDOR_RDC=y
3432 +CONFIG_R6040=m
3433 +CONFIG_NET_VENDOR_REALTEK=y
3434 +CONFIG_ATP=m
3435 +CONFIG_8139CP=m
3436 +CONFIG_8139TOO=m
3437 +CONFIG_8139TOO_PIO=y
3438 +# CONFIG_8139TOO_TUNE_TWISTER is not set
3439 +CONFIG_8139TOO_8129=y
3440 +# CONFIG_8139_OLD_RX_RESET is not set
3441 +CONFIG_R8169=m
3442 +CONFIG_NET_VENDOR_RENESAS=y
3443 +CONFIG_NET_VENDOR_ROCKER=y
3444 +CONFIG_ROCKER=m
3445 +CONFIG_NET_VENDOR_SAMSUNG=y
3446 +CONFIG_SXGBE_ETH=m
3447 +CONFIG_NET_VENDOR_SEEQ=y
3448 +CONFIG_NET_VENDOR_SOLARFLARE=y
3449 +CONFIG_SFC=m
3450 +CONFIG_SFC_MTD=y
3451 +CONFIG_SFC_MCDI_MON=y
3452 +CONFIG_SFC_SRIOV=y
3453 +CONFIG_SFC_MCDI_LOGGING=y
3454 +CONFIG_SFC_FALCON=m
3455 +CONFIG_SFC_FALCON_MTD=y
3456 +CONFIG_NET_VENDOR_SILAN=y
3457 +CONFIG_SC92031=m
3458 +CONFIG_NET_VENDOR_SIS=y
3459 +CONFIG_SIS900=m
3460 +CONFIG_SIS190=m
3461 +CONFIG_NET_VENDOR_SMSC=y
3462 +CONFIG_PCMCIA_SMC91C92=m
3463 +CONFIG_EPIC100=m
3464 +CONFIG_SMSC911X=m
3465 +CONFIG_SMSC9420=m
3466 +CONFIG_NET_VENDOR_SOCIONEXT=y
3467 +CONFIG_NET_VENDOR_STMICRO=y
3468 +CONFIG_STMMAC_ETH=m
3469 +# CONFIG_STMMAC_SELFTESTS is not set
3470 +CONFIG_STMMAC_PLATFORM=m
3471 +CONFIG_DWMAC_GENERIC=m
3472 +CONFIG_DWMAC_INTEL=m
3473 +CONFIG_STMMAC_PCI=m
3474 +CONFIG_NET_VENDOR_SUN=y
3475 +CONFIG_HAPPYMEAL=m
3476 +CONFIG_SUNGEM=m
3477 +CONFIG_CASSINI=m
3478 +CONFIG_NIU=m
3479 +CONFIG_NET_VENDOR_SYNOPSYS=y
3480 +CONFIG_DWC_XLGMAC=m
3481 +CONFIG_DWC_XLGMAC_PCI=m
3482 +CONFIG_NET_VENDOR_TEHUTI=y
3483 +CONFIG_TEHUTI=m
3484 +CONFIG_NET_VENDOR_TI=y
3485 +# CONFIG_TI_CPSW_PHY_SEL is not set
3486 +CONFIG_TLAN=m
3487 +CONFIG_NET_VENDOR_VIA=y
3488 +CONFIG_VIA_RHINE=m
3489 +CONFIG_VIA_RHINE_MMIO=y
3490 +CONFIG_VIA_VELOCITY=m
3491 +CONFIG_NET_VENDOR_WIZNET=y
3492 +CONFIG_WIZNET_W5100=m
3493 +CONFIG_WIZNET_W5300=m
3494 +# CONFIG_WIZNET_BUS_DIRECT is not set
3495 +# CONFIG_WIZNET_BUS_INDIRECT is not set
3496 +CONFIG_WIZNET_BUS_ANY=y
3497 +CONFIG_WIZNET_W5100_SPI=m
3498 +CONFIG_NET_VENDOR_XILINX=y
3499 +CONFIG_XILINX_EMACLITE=m
3500 +CONFIG_XILINX_AXI_EMAC=m
3501 +CONFIG_XILINX_LL_TEMAC=m
3502 +CONFIG_NET_VENDOR_XIRCOM=y
3503 +CONFIG_PCMCIA_XIRC2PS=m
3504 +CONFIG_FDDI=y
3505 +CONFIG_DEFXX=m
3506 +# CONFIG_DEFXX_MMIO is not set
3507 +CONFIG_SKFP=m
3508 +# CONFIG_HIPPI is not set
3509 +CONFIG_NET_SB1000=m
3510 +CONFIG_PHYLINK=m
3511 +CONFIG_PHYLIB=m
3512 +CONFIG_SWPHY=y
3513 +CONFIG_LED_TRIGGER_PHY=y
3514 +CONFIG_FIXED_PHY=m
3515 +CONFIG_SFP=m
3518 +# MII PHY device drivers
3520 +CONFIG_AMD_PHY=m
3521 +CONFIG_ADIN_PHY=m
3522 +CONFIG_AQUANTIA_PHY=m
3523 +CONFIG_AX88796B_PHY=m
3524 +CONFIG_BROADCOM_PHY=m
3525 +CONFIG_BCM54140_PHY=m
3526 +CONFIG_BCM7XXX_PHY=m
3527 +CONFIG_BCM84881_PHY=m
3528 +CONFIG_BCM87XX_PHY=m
3529 +CONFIG_BCM_NET_PHYLIB=m
3530 +CONFIG_CICADA_PHY=m
3531 +CONFIG_CORTINA_PHY=m
3532 +CONFIG_DAVICOM_PHY=m
3533 +CONFIG_ICPLUS_PHY=m
3534 +CONFIG_LXT_PHY=m
3535 +CONFIG_INTEL_XWAY_PHY=m
3536 +CONFIG_LSI_ET1011C_PHY=m
3537 +CONFIG_MARVELL_PHY=m
3538 +CONFIG_MARVELL_10G_PHY=m
3539 +CONFIG_MICREL_PHY=m
3540 +CONFIG_MICROCHIP_PHY=m
3541 +CONFIG_MICROCHIP_T1_PHY=m
3542 +CONFIG_MICROSEMI_PHY=m
3543 +CONFIG_NATIONAL_PHY=m
3544 +CONFIG_NXP_TJA11XX_PHY=m
3545 +CONFIG_AT803X_PHY=m
3546 +CONFIG_QSEMI_PHY=m
3547 +CONFIG_REALTEK_PHY=m
3548 +CONFIG_RENESAS_PHY=m
3549 +CONFIG_ROCKCHIP_PHY=m
3550 +CONFIG_SMSC_PHY=m
3551 +CONFIG_STE10XP=m
3552 +CONFIG_TERANETICS_PHY=m
3553 +CONFIG_DP83822_PHY=m
3554 +CONFIG_DP83TC811_PHY=m
3555 +CONFIG_DP83848_PHY=m
3556 +CONFIG_DP83867_PHY=m
3557 +CONFIG_DP83869_PHY=m
3558 +CONFIG_VITESSE_PHY=m
3559 +CONFIG_XILINX_GMII2RGMII=m
3560 +CONFIG_MICREL_KS8995MA=m
3561 +CONFIG_MDIO_DEVICE=m
3562 +CONFIG_MDIO_BUS=m
3563 +CONFIG_MDIO_DEVRES=m
3564 +CONFIG_MDIO_BITBANG=m
3565 +CONFIG_MDIO_BCM_UNIMAC=m
3566 +CONFIG_MDIO_CAVIUM=m
3567 +CONFIG_MDIO_GPIO=m
3568 +CONFIG_MDIO_I2C=m
3569 +CONFIG_MDIO_MVUSB=m
3570 +CONFIG_MDIO_MSCC_MIIM=m
3571 +CONFIG_MDIO_THUNDER=m
3574 +# MDIO Multiplexers
3578 +# PCS device drivers
3580 +CONFIG_PCS_XPCS=m
3581 +CONFIG_PCS_LYNX=m
3582 +# end of PCS device drivers
3584 +CONFIG_PLIP=m
3585 +CONFIG_PPP=y
3586 +CONFIG_PPP_BSDCOMP=m
3587 +CONFIG_PPP_DEFLATE=m
3588 +CONFIG_PPP_FILTER=y
3589 +CONFIG_PPP_MPPE=m
3590 +CONFIG_PPP_MULTILINK=y
3591 +CONFIG_PPPOATM=m
3592 +CONFIG_PPPOE=m
3593 +CONFIG_PPTP=m
3594 +CONFIG_PPPOL2TP=m
3595 +CONFIG_PPP_ASYNC=m
3596 +CONFIG_PPP_SYNC_TTY=m
3597 +CONFIG_SLIP=m
3598 +CONFIG_SLHC=y
3599 +CONFIG_SLIP_COMPRESSED=y
3600 +CONFIG_SLIP_SMART=y
3601 +CONFIG_SLIP_MODE_SLIP6=y
3602 +CONFIG_USB_NET_DRIVERS=m
3603 +CONFIG_USB_CATC=m
3604 +CONFIG_USB_KAWETH=m
3605 +CONFIG_USB_PEGASUS=m
3606 +CONFIG_USB_RTL8150=m
3607 +CONFIG_USB_RTL8152=m
3608 +CONFIG_USB_LAN78XX=m
3609 +CONFIG_USB_USBNET=m
3610 +CONFIG_USB_NET_AX8817X=m
3611 +CONFIG_USB_NET_AX88179_178A=m
3612 +CONFIG_USB_NET_CDCETHER=m
3613 +CONFIG_USB_NET_CDC_EEM=m
3614 +CONFIG_USB_NET_CDC_NCM=m
3615 +CONFIG_USB_NET_HUAWEI_CDC_NCM=m
3616 +CONFIG_USB_NET_CDC_MBIM=m
3617 +CONFIG_USB_NET_DM9601=m
3618 +CONFIG_USB_NET_SR9700=m
3619 +CONFIG_USB_NET_SR9800=m
3620 +CONFIG_USB_NET_SMSC75XX=m
3621 +CONFIG_USB_NET_SMSC95XX=m
3622 +CONFIG_USB_NET_GL620A=m
3623 +CONFIG_USB_NET_NET1080=m
3624 +CONFIG_USB_NET_PLUSB=m
3625 +CONFIG_USB_NET_MCS7830=m
3626 +CONFIG_USB_NET_RNDIS_HOST=m
3627 +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m
3628 +CONFIG_USB_NET_CDC_SUBSET=m
3629 +CONFIG_USB_ALI_M5632=y
3630 +CONFIG_USB_AN2720=y
3631 +CONFIG_USB_BELKIN=y
3632 +CONFIG_USB_ARMLINUX=y
3633 +CONFIG_USB_EPSON2888=y
3634 +CONFIG_USB_KC2190=y
3635 +CONFIG_USB_NET_ZAURUS=m
3636 +CONFIG_USB_NET_CX82310_ETH=m
3637 +CONFIG_USB_NET_KALMIA=m
3638 +CONFIG_USB_NET_QMI_WWAN=m
3639 +CONFIG_USB_HSO=m
3640 +CONFIG_USB_NET_INT51X1=m
3641 +CONFIG_USB_CDC_PHONET=m
3642 +CONFIG_USB_IPHETH=m
3643 +CONFIG_USB_SIERRA_NET=m
3644 +CONFIG_USB_VL600=m
3645 +CONFIG_USB_NET_CH9200=m
3646 +CONFIG_USB_NET_AQC111=m
3647 +CONFIG_USB_RTL8153_ECM=m
3648 +CONFIG_WLAN=y
3649 +CONFIG_WLAN_VENDOR_ADMTEK=y
3650 +CONFIG_ADM8211=m
3651 +CONFIG_ATH_COMMON=m
3652 +CONFIG_WLAN_VENDOR_ATH=y
3653 +# CONFIG_ATH_DEBUG is not set
3654 +CONFIG_ATH5K=m
3655 +# CONFIG_ATH5K_DEBUG is not set
3656 +CONFIG_ATH5K_PCI=y
3657 +CONFIG_ATH9K_HW=m
3658 +CONFIG_ATH9K_COMMON=m
3659 +CONFIG_ATH9K_COMMON_DEBUG=y
3660 +CONFIG_ATH9K_BTCOEX_SUPPORT=y
3661 +CONFIG_ATH9K=m
3662 +CONFIG_ATH9K_PCI=y
3663 +CONFIG_ATH9K_AHB=y
3664 +CONFIG_ATH9K_DEBUGFS=y
3665 +CONFIG_ATH9K_STATION_STATISTICS=y
3666 +# CONFIG_ATH9K_DYNACK is not set
3667 +CONFIG_ATH9K_WOW=y
3668 +CONFIG_ATH9K_RFKILL=y
3669 +CONFIG_ATH9K_CHANNEL_CONTEXT=y
3670 +CONFIG_ATH9K_PCOEM=y
3671 +CONFIG_ATH9K_PCI_NO_EEPROM=m
3672 +CONFIG_ATH9K_HTC=m
3673 +CONFIG_ATH9K_HTC_DEBUGFS=y
3674 +CONFIG_ATH9K_HWRNG=y
3675 +CONFIG_ATH9K_COMMON_SPECTRAL=y
3676 +CONFIG_CARL9170=m
3677 +CONFIG_CARL9170_LEDS=y
3678 +# CONFIG_CARL9170_DEBUGFS is not set
3679 +CONFIG_CARL9170_WPC=y
3680 +CONFIG_CARL9170_HWRNG=y
3681 +CONFIG_ATH6KL=m
3682 +CONFIG_ATH6KL_SDIO=m
3683 +CONFIG_ATH6KL_USB=m
3684 +# CONFIG_ATH6KL_DEBUG is not set
3685 +CONFIG_AR5523=m
3686 +CONFIG_WIL6210=m
3687 +CONFIG_WIL6210_ISR_COR=y
3688 +CONFIG_WIL6210_DEBUGFS=y
3689 +CONFIG_ATH10K=m
3690 +CONFIG_ATH10K_CE=y
3691 +CONFIG_ATH10K_PCI=m
3692 +CONFIG_ATH10K_SDIO=m
3693 +CONFIG_ATH10K_USB=m
3694 +# CONFIG_ATH10K_DEBUG is not set
3695 +CONFIG_ATH10K_DEBUGFS=y
3696 +CONFIG_ATH10K_SPECTRAL=y
3697 +CONFIG_WCN36XX=m
3698 +# CONFIG_WCN36XX_DEBUGFS is not set
3699 +CONFIG_ATH11K=m
3700 +CONFIG_ATH11K_AHB=m
3701 +CONFIG_ATH11K_PCI=m
3702 +# CONFIG_ATH11K_DEBUG is not set
3703 +CONFIG_ATH11K_DEBUGFS=y
3704 +CONFIG_ATH11K_SPECTRAL=y
3705 +CONFIG_WLAN_VENDOR_ATMEL=y
3706 +CONFIG_ATMEL=m
3707 +CONFIG_PCI_ATMEL=m
3708 +CONFIG_PCMCIA_ATMEL=m
3709 +CONFIG_AT76C50X_USB=m
3710 +CONFIG_WLAN_VENDOR_BROADCOM=y
3711 +CONFIG_B43=m
3712 +CONFIG_B43_BCMA=y
3713 +CONFIG_B43_SSB=y
3714 +CONFIG_B43_BUSES_BCMA_AND_SSB=y
3715 +# CONFIG_B43_BUSES_BCMA is not set
3716 +# CONFIG_B43_BUSES_SSB is not set
3717 +CONFIG_B43_PCI_AUTOSELECT=y
3718 +CONFIG_B43_PCICORE_AUTOSELECT=y
3719 +# CONFIG_B43_SDIO is not set
3720 +CONFIG_B43_BCMA_PIO=y
3721 +CONFIG_B43_PIO=y
3722 +CONFIG_B43_PHY_G=y
3723 +CONFIG_B43_PHY_N=y
3724 +CONFIG_B43_PHY_LP=y
3725 +CONFIG_B43_PHY_HT=y
3726 +CONFIG_B43_LEDS=y
3727 +CONFIG_B43_HWRNG=y
3728 +# CONFIG_B43_DEBUG is not set
3729 +CONFIG_B43LEGACY=m
3730 +CONFIG_B43LEGACY_PCI_AUTOSELECT=y
3731 +CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
3732 +CONFIG_B43LEGACY_LEDS=y
3733 +CONFIG_B43LEGACY_HWRNG=y
3734 +# CONFIG_B43LEGACY_DEBUG is not set
3735 +CONFIG_B43LEGACY_DMA=y
3736 +CONFIG_B43LEGACY_PIO=y
3737 +CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
3738 +# CONFIG_B43LEGACY_DMA_MODE is not set
3739 +# CONFIG_B43LEGACY_PIO_MODE is not set
3740 +CONFIG_BRCMUTIL=m
3741 +CONFIG_BRCMSMAC=m
3742 +CONFIG_BRCMFMAC=m
3743 +CONFIG_BRCMFMAC_PROTO_BCDC=y
3744 +CONFIG_BRCMFMAC_PROTO_MSGBUF=y
3745 +CONFIG_BRCMFMAC_SDIO=y
3746 +CONFIG_BRCMFMAC_USB=y
3747 +CONFIG_BRCMFMAC_PCIE=y
3748 +CONFIG_BRCM_TRACING=y
3749 +# CONFIG_BRCMDBG is not set
3750 +CONFIG_WLAN_VENDOR_CISCO=y
3751 +CONFIG_AIRO=m
3752 +CONFIG_AIRO_CS=m
3753 +CONFIG_WLAN_VENDOR_INTEL=y
3754 +CONFIG_IPW2100=m
3755 +CONFIG_IPW2100_MONITOR=y
3756 +# CONFIG_IPW2100_DEBUG is not set
3757 +CONFIG_IPW2200=m
3758 +CONFIG_IPW2200_MONITOR=y
3759 +CONFIG_IPW2200_RADIOTAP=y
3760 +CONFIG_IPW2200_PROMISCUOUS=y
3761 +CONFIG_IPW2200_QOS=y
3762 +# CONFIG_IPW2200_DEBUG is not set
3763 +CONFIG_LIBIPW=m
3764 +# CONFIG_LIBIPW_DEBUG is not set
3765 +CONFIG_IWLEGACY=m
3766 +CONFIG_IWL4965=m
3767 +CONFIG_IWL3945=m
3770 +# iwl3945 / iwl4965 Debugging Options
3772 +# CONFIG_IWLEGACY_DEBUG is not set
3773 +CONFIG_IWLEGACY_DEBUGFS=y
3774 +# end of iwl3945 / iwl4965 Debugging Options
3776 +CONFIG_IWLWIFI=m
3777 +CONFIG_IWLWIFI_LEDS=y
3778 +CONFIG_IWLDVM=m
3779 +CONFIG_IWLMVM=m
3780 +CONFIG_IWLWIFI_OPMODE_MODULAR=y
3781 +# CONFIG_IWLWIFI_BCAST_FILTERING is not set
3784 +# Debugging Options
3786 +# CONFIG_IWLWIFI_DEBUG is not set
3787 +CONFIG_IWLWIFI_DEBUGFS=y
3788 +# end of Debugging Options
3790 +CONFIG_WLAN_VENDOR_INTERSIL=y
3791 +CONFIG_HOSTAP=m
3792 +CONFIG_HOSTAP_FIRMWARE=y
3793 +CONFIG_HOSTAP_FIRMWARE_NVRAM=y
3794 +CONFIG_HOSTAP_PLX=m
3795 +CONFIG_HOSTAP_PCI=m
3796 +CONFIG_HOSTAP_CS=m
3797 +CONFIG_HERMES=m
3798 +# CONFIG_HERMES_PRISM is not set
3799 +CONFIG_HERMES_CACHE_FW_ON_INIT=y
3800 +CONFIG_PLX_HERMES=m
3801 +CONFIG_TMD_HERMES=m
3802 +CONFIG_NORTEL_HERMES=m
3803 +CONFIG_PCMCIA_HERMES=m
3804 +CONFIG_PCMCIA_SPECTRUM=m
3805 +CONFIG_ORINOCO_USB=m
3806 +CONFIG_P54_COMMON=m
3807 +CONFIG_P54_USB=m
3808 +CONFIG_P54_PCI=m
3809 +CONFIG_P54_SPI=m
3810 +# CONFIG_P54_SPI_DEFAULT_EEPROM is not set
3811 +CONFIG_P54_LEDS=y
3812 +# CONFIG_PRISM54 is not set
3813 +CONFIG_WLAN_VENDOR_MARVELL=y
3814 +CONFIG_LIBERTAS=m
3815 +CONFIG_LIBERTAS_USB=m
3816 +CONFIG_LIBERTAS_CS=m
3817 +CONFIG_LIBERTAS_SDIO=m
3818 +CONFIG_LIBERTAS_SPI=m
3819 +# CONFIG_LIBERTAS_DEBUG is not set
3820 +CONFIG_LIBERTAS_MESH=y
3821 +CONFIG_LIBERTAS_THINFIRM=m
3822 +# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set
3823 +CONFIG_LIBERTAS_THINFIRM_USB=m
3824 +CONFIG_MWIFIEX=m
3825 +CONFIG_MWIFIEX_SDIO=m
3826 +CONFIG_MWIFIEX_PCIE=m
3827 +CONFIG_MWIFIEX_USB=m
3828 +CONFIG_MWL8K=m
3829 +CONFIG_WLAN_VENDOR_MEDIATEK=y
3830 +CONFIG_MT7601U=m
3831 +CONFIG_MT76_CORE=m
3832 +CONFIG_MT76_LEDS=y
3833 +CONFIG_MT76_USB=m
3834 +CONFIG_MT76_SDIO=m
3835 +CONFIG_MT76x02_LIB=m
3836 +CONFIG_MT76x02_USB=m
3837 +CONFIG_MT76_CONNAC_LIB=m
3838 +CONFIG_MT76x0_COMMON=m
3839 +CONFIG_MT76x0U=m
3840 +CONFIG_MT76x0E=m
3841 +CONFIG_MT76x2_COMMON=m
3842 +CONFIG_MT76x2E=m
3843 +CONFIG_MT76x2U=m
3844 +CONFIG_MT7603E=m
3845 +CONFIG_MT7615_COMMON=m
3846 +CONFIG_MT7615E=m
3847 +CONFIG_MT7663_USB_SDIO_COMMON=m
3848 +CONFIG_MT7663U=m
3849 +CONFIG_MT7663S=m
3850 +CONFIG_MT7915E=m
3851 +CONFIG_MT7921E=m
3852 +CONFIG_WLAN_VENDOR_MICROCHIP=y
3853 +CONFIG_WILC1000=m
3854 +CONFIG_WILC1000_SDIO=m
3855 +CONFIG_WILC1000_SPI=m
3856 +CONFIG_WILC1000_HW_OOB_INTR=y
3857 +CONFIG_WLAN_VENDOR_RALINK=y
3858 +CONFIG_RT2X00=m
3859 +CONFIG_RT2400PCI=m
3860 +CONFIG_RT2500PCI=m
3861 +CONFIG_RT61PCI=m
3862 +CONFIG_RT2800PCI=m
3863 +CONFIG_RT2800PCI_RT33XX=y
3864 +CONFIG_RT2800PCI_RT35XX=y
3865 +CONFIG_RT2800PCI_RT53XX=y
3866 +CONFIG_RT2800PCI_RT3290=y
3867 +CONFIG_RT2500USB=m
3868 +CONFIG_RT73USB=m
3869 +CONFIG_RT2800USB=m
3870 +CONFIG_RT2800USB_RT33XX=y
3871 +CONFIG_RT2800USB_RT35XX=y
3872 +CONFIG_RT2800USB_RT3573=y
3873 +CONFIG_RT2800USB_RT53XX=y
3874 +CONFIG_RT2800USB_RT55XX=y
3875 +CONFIG_RT2800USB_UNKNOWN=y
3876 +CONFIG_RT2800_LIB=m
3877 +CONFIG_RT2800_LIB_MMIO=m
3878 +CONFIG_RT2X00_LIB_MMIO=m
3879 +CONFIG_RT2X00_LIB_PCI=m
3880 +CONFIG_RT2X00_LIB_USB=m
3881 +CONFIG_RT2X00_LIB=m
3882 +CONFIG_RT2X00_LIB_FIRMWARE=y
3883 +CONFIG_RT2X00_LIB_CRYPTO=y
3884 +CONFIG_RT2X00_LIB_LEDS=y
3885 +# CONFIG_RT2X00_LIB_DEBUGFS is not set
3886 +# CONFIG_RT2X00_DEBUG is not set
3887 +CONFIG_WLAN_VENDOR_REALTEK=y
3888 +CONFIG_RTL8180=m
3889 +CONFIG_RTL8187=m
3890 +CONFIG_RTL8187_LEDS=y
3891 +CONFIG_RTL_CARDS=m
3892 +CONFIG_RTL8192CE=m
3893 +CONFIG_RTL8192SE=m
3894 +CONFIG_RTL8192DE=m
3895 +CONFIG_RTL8723AE=m
3896 +CONFIG_RTL8723BE=m
3897 +CONFIG_RTL8188EE=m
3898 +CONFIG_RTL8192EE=m
3899 +CONFIG_RTL8821AE=m
3900 +CONFIG_RTL8192CU=m
3901 +CONFIG_RTLWIFI=m
3902 +CONFIG_RTLWIFI_PCI=m
3903 +CONFIG_RTLWIFI_USB=m
3904 +# CONFIG_RTLWIFI_DEBUG is not set
3905 +CONFIG_RTL8192C_COMMON=m
3906 +CONFIG_RTL8723_COMMON=m
3907 +CONFIG_RTLBTCOEXIST=m
3908 +CONFIG_RTL8XXXU=m
3909 +CONFIG_RTL8XXXU_UNTESTED=y
3910 +CONFIG_RTW88=m
3911 +CONFIG_RTW88_CORE=m
3912 +CONFIG_RTW88_PCI=m
3913 +CONFIG_RTW88_8822B=m
3914 +CONFIG_RTW88_8822C=m
3915 +CONFIG_RTW88_8723D=m
3916 +CONFIG_RTW88_8821C=m
3917 +CONFIG_RTW88_8822BE=m
3918 +CONFIG_RTW88_8822CE=m
3919 +CONFIG_RTW88_8723DE=m
3920 +CONFIG_RTW88_8821CE=m
3921 +CONFIG_RTW88_DEBUG=y
3922 +CONFIG_RTW88_DEBUGFS=y
3923 +CONFIG_WLAN_VENDOR_RSI=y
3924 +CONFIG_RSI_91X=m
3925 +# CONFIG_RSI_DEBUGFS is not set
3926 +CONFIG_RSI_SDIO=m
3927 +CONFIG_RSI_USB=m
3928 +CONFIG_RSI_COEX=y
3929 +CONFIG_WLAN_VENDOR_ST=y
3930 +CONFIG_CW1200=m
3931 +CONFIG_CW1200_WLAN_SDIO=m
3932 +CONFIG_CW1200_WLAN_SPI=m
3933 +CONFIG_WLAN_VENDOR_TI=y
3934 +CONFIG_WL1251=m
3935 +CONFIG_WL1251_SPI=m
3936 +CONFIG_WL1251_SDIO=m
3937 +CONFIG_WL12XX=m
3938 +CONFIG_WL18XX=m
3939 +CONFIG_WLCORE=m
3940 +CONFIG_WLCORE_SDIO=m
3941 +CONFIG_WILINK_PLATFORM_DATA=y
3942 +CONFIG_WLAN_VENDOR_ZYDAS=y
3943 +CONFIG_USB_ZD1201=m
3944 +CONFIG_ZD1211RW=m
3945 +# CONFIG_ZD1211RW_DEBUG is not set
3946 +CONFIG_WLAN_VENDOR_QUANTENNA=y
3947 +CONFIG_QTNFMAC=m
3948 +CONFIG_QTNFMAC_PCIE=m
3949 +CONFIG_PCMCIA_RAYCS=m
3950 +CONFIG_PCMCIA_WL3501=m
3951 +CONFIG_MAC80211_HWSIM=m
3952 +CONFIG_USB_NET_RNDIS_WLAN=m
3953 +CONFIG_VIRT_WIFI=m
3954 +CONFIG_WAN=y
3955 +CONFIG_LANMEDIA=m
3956 +CONFIG_HDLC=m
3957 +CONFIG_HDLC_RAW=m
3958 +CONFIG_HDLC_RAW_ETH=m
3959 +CONFIG_HDLC_CISCO=m
3960 +CONFIG_HDLC_FR=m
3961 +CONFIG_HDLC_PPP=m
3962 +CONFIG_HDLC_X25=m
3963 +CONFIG_PCI200SYN=m
3964 +CONFIG_WANXL=m
3965 +CONFIG_PC300TOO=m
3966 +CONFIG_FARSYNC=m
3967 +CONFIG_LAPBETHER=m
3968 +CONFIG_SBNI=m
3969 +# CONFIG_SBNI_MULTILINE is not set
3970 +CONFIG_IEEE802154_DRIVERS=m
3971 +CONFIG_IEEE802154_FAKELB=m
3972 +CONFIG_IEEE802154_AT86RF230=m
3973 +CONFIG_IEEE802154_AT86RF230_DEBUGFS=y
3974 +CONFIG_IEEE802154_MRF24J40=m
3975 +CONFIG_IEEE802154_CC2520=m
3976 +CONFIG_IEEE802154_ATUSB=m
3977 +CONFIG_IEEE802154_ADF7242=m
3978 +CONFIG_IEEE802154_CA8210=m
3979 +CONFIG_IEEE802154_CA8210_DEBUGFS=y
3980 +CONFIG_IEEE802154_MCR20A=m
3981 +CONFIG_IEEE802154_HWSIM=m
3982 +CONFIG_XEN_NETDEV_FRONTEND=y
3983 +CONFIG_XEN_NETDEV_BACKEND=m
3984 +CONFIG_VMXNET3=m
3985 +CONFIG_FUJITSU_ES=m
3986 +CONFIG_USB4_NET=m
3987 +CONFIG_HYPERV_NET=m
3988 +CONFIG_NETDEVSIM=m
3989 +CONFIG_NET_FAILOVER=m
3990 +CONFIG_ISDN=y
3991 +CONFIG_ISDN_CAPI=y
3992 +CONFIG_CAPI_TRACE=y
3993 +CONFIG_ISDN_CAPI_MIDDLEWARE=y
3994 +CONFIG_MISDN=m
3995 +CONFIG_MISDN_DSP=m
3996 +CONFIG_MISDN_L1OIP=m
3999 +# mISDN hardware drivers
4001 +CONFIG_MISDN_HFCPCI=m
4002 +CONFIG_MISDN_HFCMULTI=m
4003 +CONFIG_MISDN_HFCUSB=m
4004 +CONFIG_MISDN_AVMFRITZ=m
4005 +CONFIG_MISDN_SPEEDFAX=m
4006 +CONFIG_MISDN_INFINEON=m
4007 +CONFIG_MISDN_W6692=m
4008 +CONFIG_MISDN_NETJET=m
4009 +CONFIG_MISDN_HDLC=m
4010 +CONFIG_MISDN_IPAC=m
4011 +CONFIG_MISDN_ISAR=m
4012 +CONFIG_NVM=y
4013 +CONFIG_NVM_PBLK=m
4014 +# CONFIG_NVM_PBLK_DEBUG is not set
4017 +# Input device support
4019 +CONFIG_INPUT=y
4020 +CONFIG_INPUT_LEDS=m
4021 +CONFIG_INPUT_FF_MEMLESS=m
4022 +CONFIG_INPUT_SPARSEKMAP=m
4023 +CONFIG_INPUT_MATRIXKMAP=m
4026 +# Userland interfaces
4028 +CONFIG_INPUT_MOUSEDEV=y
4029 +CONFIG_INPUT_MOUSEDEV_PSAUX=y
4030 +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
4031 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
4032 +CONFIG_INPUT_JOYDEV=m
4033 +CONFIG_INPUT_EVDEV=y
4034 +# CONFIG_INPUT_EVBUG is not set
4037 +# Input Device Drivers
4039 +CONFIG_INPUT_KEYBOARD=y
4040 +CONFIG_KEYBOARD_ADC=m
4041 +CONFIG_KEYBOARD_ADP5520=m
4042 +CONFIG_KEYBOARD_ADP5588=m
4043 +CONFIG_KEYBOARD_ADP5589=m
4044 +CONFIG_KEYBOARD_APPLESPI=m
4045 +CONFIG_KEYBOARD_ATKBD=y
4046 +CONFIG_KEYBOARD_QT1050=m
4047 +CONFIG_KEYBOARD_QT1070=m
4048 +CONFIG_KEYBOARD_QT2160=m
4049 +CONFIG_KEYBOARD_DLINK_DIR685=m
4050 +CONFIG_KEYBOARD_LKKBD=m
4051 +CONFIG_KEYBOARD_GPIO=m
4052 +CONFIG_KEYBOARD_GPIO_POLLED=m
4053 +CONFIG_KEYBOARD_TCA6416=m
4054 +CONFIG_KEYBOARD_TCA8418=m
4055 +CONFIG_KEYBOARD_MATRIX=m
4056 +CONFIG_KEYBOARD_LM8323=m
4057 +CONFIG_KEYBOARD_LM8333=m
4058 +CONFIG_KEYBOARD_MAX7359=m
4059 +CONFIG_KEYBOARD_MCS=m
4060 +CONFIG_KEYBOARD_MPR121=m
4061 +CONFIG_KEYBOARD_NEWTON=m
4062 +CONFIG_KEYBOARD_OPENCORES=m
4063 +CONFIG_KEYBOARD_SAMSUNG=m
4064 +CONFIG_KEYBOARD_STOWAWAY=m
4065 +CONFIG_KEYBOARD_SUNKBD=m
4066 +CONFIG_KEYBOARD_IQS62X=m
4067 +CONFIG_KEYBOARD_TM2_TOUCHKEY=m
4068 +CONFIG_KEYBOARD_TWL4030=m
4069 +CONFIG_KEYBOARD_XTKBD=m
4070 +CONFIG_KEYBOARD_CROS_EC=m
4071 +CONFIG_KEYBOARD_MTK_PMIC=m
4072 +CONFIG_INPUT_MOUSE=y
4073 +CONFIG_MOUSE_PS2=m
4074 +CONFIG_MOUSE_PS2_ALPS=y
4075 +CONFIG_MOUSE_PS2_BYD=y
4076 +CONFIG_MOUSE_PS2_LOGIPS2PP=y
4077 +CONFIG_MOUSE_PS2_SYNAPTICS=y
4078 +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y
4079 +CONFIG_MOUSE_PS2_CYPRESS=y
4080 +CONFIG_MOUSE_PS2_LIFEBOOK=y
4081 +CONFIG_MOUSE_PS2_TRACKPOINT=y
4082 +CONFIG_MOUSE_PS2_ELANTECH=y
4083 +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y
4084 +CONFIG_MOUSE_PS2_SENTELIC=y
4085 +CONFIG_MOUSE_PS2_TOUCHKIT=y
4086 +CONFIG_MOUSE_PS2_FOCALTECH=y
4087 +CONFIG_MOUSE_PS2_VMMOUSE=y
4088 +CONFIG_MOUSE_PS2_SMBUS=y
4089 +CONFIG_MOUSE_SERIAL=m
4090 +CONFIG_MOUSE_APPLETOUCH=m
4091 +CONFIG_MOUSE_BCM5974=m
4092 +CONFIG_MOUSE_CYAPA=m
4093 +CONFIG_MOUSE_ELAN_I2C=m
4094 +CONFIG_MOUSE_ELAN_I2C_I2C=y
4095 +CONFIG_MOUSE_ELAN_I2C_SMBUS=y
4096 +CONFIG_MOUSE_VSXXXAA=m
4097 +CONFIG_MOUSE_GPIO=m
4098 +CONFIG_MOUSE_SYNAPTICS_I2C=m
4099 +CONFIG_MOUSE_SYNAPTICS_USB=m
4100 +CONFIG_INPUT_JOYSTICK=y
4101 +CONFIG_JOYSTICK_ANALOG=m
4102 +CONFIG_JOYSTICK_A3D=m
4103 +CONFIG_JOYSTICK_ADC=m
4104 +CONFIG_JOYSTICK_ADI=m
4105 +CONFIG_JOYSTICK_COBRA=m
4106 +CONFIG_JOYSTICK_GF2K=m
4107 +CONFIG_JOYSTICK_GRIP=m
4108 +CONFIG_JOYSTICK_GRIP_MP=m
4109 +CONFIG_JOYSTICK_GUILLEMOT=m
4110 +CONFIG_JOYSTICK_INTERACT=m
4111 +CONFIG_JOYSTICK_SIDEWINDER=m
4112 +CONFIG_JOYSTICK_TMDC=m
4113 +CONFIG_JOYSTICK_IFORCE=m
4114 +CONFIG_JOYSTICK_IFORCE_USB=m
4115 +CONFIG_JOYSTICK_IFORCE_232=m
4116 +CONFIG_JOYSTICK_WARRIOR=m
4117 +CONFIG_JOYSTICK_MAGELLAN=m
4118 +CONFIG_JOYSTICK_SPACEORB=m
4119 +CONFIG_JOYSTICK_SPACEBALL=m
4120 +CONFIG_JOYSTICK_STINGER=m
4121 +CONFIG_JOYSTICK_TWIDJOY=m
4122 +CONFIG_JOYSTICK_ZHENHUA=m
4123 +CONFIG_JOYSTICK_DB9=m
4124 +CONFIG_JOYSTICK_GAMECON=m
4125 +CONFIG_JOYSTICK_TURBOGRAFX=m
4126 +CONFIG_JOYSTICK_AS5011=m
4127 +CONFIG_JOYSTICK_JOYDUMP=m
4128 +CONFIG_JOYSTICK_XPAD=m
4129 +CONFIG_JOYSTICK_XPAD_FF=y
4130 +CONFIG_JOYSTICK_XPAD_LEDS=y
4131 +CONFIG_JOYSTICK_WALKERA0701=m
4132 +CONFIG_JOYSTICK_PSXPAD_SPI=m
4133 +CONFIG_JOYSTICK_PSXPAD_SPI_FF=y
4134 +CONFIG_JOYSTICK_PXRC=m
4135 +CONFIG_JOYSTICK_FSIA6B=m
4136 +CONFIG_INPUT_TABLET=y
4137 +CONFIG_TABLET_USB_ACECAD=m
4138 +CONFIG_TABLET_USB_AIPTEK=m
4139 +CONFIG_TABLET_USB_HANWANG=m
4140 +CONFIG_TABLET_USB_KBTAB=m
4141 +CONFIG_TABLET_USB_PEGASUS=m
4142 +CONFIG_TABLET_SERIAL_WACOM4=m
4143 +CONFIG_INPUT_TOUCHSCREEN=y
4144 +CONFIG_TOUCHSCREEN_PROPERTIES=y
4145 +CONFIG_TOUCHSCREEN_88PM860X=m
4146 +CONFIG_TOUCHSCREEN_ADS7846=m
4147 +CONFIG_TOUCHSCREEN_AD7877=m
4148 +CONFIG_TOUCHSCREEN_AD7879=m
4149 +CONFIG_TOUCHSCREEN_AD7879_I2C=m
4150 +CONFIG_TOUCHSCREEN_AD7879_SPI=m
4151 +CONFIG_TOUCHSCREEN_ADC=m
4152 +CONFIG_TOUCHSCREEN_ATMEL_MXT=m
4153 +CONFIG_TOUCHSCREEN_ATMEL_MXT_T37=y
4154 +CONFIG_TOUCHSCREEN_AUO_PIXCIR=m
4155 +CONFIG_TOUCHSCREEN_BU21013=m
4156 +CONFIG_TOUCHSCREEN_BU21029=m
4157 +CONFIG_TOUCHSCREEN_CHIPONE_ICN8505=m
4158 +CONFIG_TOUCHSCREEN_CY8CTMA140=m
4159 +CONFIG_TOUCHSCREEN_CY8CTMG110=m
4160 +CONFIG_TOUCHSCREEN_CYTTSP_CORE=m
4161 +CONFIG_TOUCHSCREEN_CYTTSP_I2C=m
4162 +CONFIG_TOUCHSCREEN_CYTTSP_SPI=m
4163 +CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m
4164 +CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m
4165 +CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m
4166 +CONFIG_TOUCHSCREEN_DA9034=m
4167 +CONFIG_TOUCHSCREEN_DA9052=m
4168 +CONFIG_TOUCHSCREEN_DYNAPRO=m
4169 +CONFIG_TOUCHSCREEN_HAMPSHIRE=m
4170 +CONFIG_TOUCHSCREEN_EETI=m
4171 +CONFIG_TOUCHSCREEN_EGALAX_SERIAL=m
4172 +CONFIG_TOUCHSCREEN_EXC3000=m
4173 +CONFIG_TOUCHSCREEN_FUJITSU=m
4174 +CONFIG_TOUCHSCREEN_GOODIX=m
4175 +CONFIG_TOUCHSCREEN_HIDEEP=m
4176 +CONFIG_TOUCHSCREEN_ILI210X=m
4177 +CONFIG_TOUCHSCREEN_S6SY761=m
4178 +CONFIG_TOUCHSCREEN_GUNZE=m
4179 +CONFIG_TOUCHSCREEN_EKTF2127=m
4180 +CONFIG_TOUCHSCREEN_ELAN=y
4181 +CONFIG_TOUCHSCREEN_ELO=m
4182 +CONFIG_TOUCHSCREEN_WACOM_W8001=m
4183 +CONFIG_TOUCHSCREEN_WACOM_I2C=m
4184 +CONFIG_TOUCHSCREEN_MAX11801=m
4185 +CONFIG_TOUCHSCREEN_MCS5000=m
4186 +CONFIG_TOUCHSCREEN_MMS114=m
4187 +CONFIG_TOUCHSCREEN_MELFAS_MIP4=m
4188 +CONFIG_TOUCHSCREEN_MTOUCH=m
4189 +CONFIG_TOUCHSCREEN_INEXIO=m
4190 +CONFIG_TOUCHSCREEN_MK712=m
4191 +CONFIG_TOUCHSCREEN_PENMOUNT=m
4192 +CONFIG_TOUCHSCREEN_EDT_FT5X06=m
4193 +CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
4194 +CONFIG_TOUCHSCREEN_TOUCHWIN=m
4195 +CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m
4196 +CONFIG_TOUCHSCREEN_UCB1400=m
4197 +CONFIG_TOUCHSCREEN_PIXCIR=m
4198 +CONFIG_TOUCHSCREEN_WDT87XX_I2C=m
4199 +CONFIG_TOUCHSCREEN_WM831X=m
4200 +CONFIG_TOUCHSCREEN_WM97XX=m
4201 +CONFIG_TOUCHSCREEN_WM9705=y
4202 +CONFIG_TOUCHSCREEN_WM9712=y
4203 +CONFIG_TOUCHSCREEN_WM9713=y
4204 +CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
4205 +CONFIG_TOUCHSCREEN_MC13783=m
4206 +CONFIG_TOUCHSCREEN_USB_EGALAX=y
4207 +CONFIG_TOUCHSCREEN_USB_PANJIT=y
4208 +CONFIG_TOUCHSCREEN_USB_3M=y
4209 +CONFIG_TOUCHSCREEN_USB_ITM=y
4210 +CONFIG_TOUCHSCREEN_USB_ETURBO=y
4211 +CONFIG_TOUCHSCREEN_USB_GUNZE=y
4212 +CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y
4213 +CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
4214 +CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
4215 +CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
4216 +CONFIG_TOUCHSCREEN_USB_GOTOP=y
4217 +CONFIG_TOUCHSCREEN_USB_JASTEC=y
4218 +CONFIG_TOUCHSCREEN_USB_ELO=y
4219 +CONFIG_TOUCHSCREEN_USB_E2I=y
4220 +CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y
4221 +CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y
4222 +CONFIG_TOUCHSCREEN_USB_NEXIO=y
4223 +CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y
4224 +CONFIG_TOUCHSCREEN_TOUCHIT213=m
4225 +CONFIG_TOUCHSCREEN_TSC_SERIO=m
4226 +CONFIG_TOUCHSCREEN_TSC200X_CORE=m
4227 +CONFIG_TOUCHSCREEN_TSC2004=m
4228 +CONFIG_TOUCHSCREEN_TSC2005=m
4229 +CONFIG_TOUCHSCREEN_TSC2007=m
4230 +CONFIG_TOUCHSCREEN_TSC2007_IIO=y
4231 +CONFIG_TOUCHSCREEN_PCAP=m
4232 +CONFIG_TOUCHSCREEN_RM_TS=m
4233 +CONFIG_TOUCHSCREEN_SILEAD=m
4234 +CONFIG_TOUCHSCREEN_SIS_I2C=m
4235 +CONFIG_TOUCHSCREEN_ST1232=m
4236 +CONFIG_TOUCHSCREEN_STMFTS=m
4237 +CONFIG_TOUCHSCREEN_SUR40=m
4238 +CONFIG_TOUCHSCREEN_SURFACE3_SPI=m
4239 +CONFIG_TOUCHSCREEN_SX8654=m
4240 +CONFIG_TOUCHSCREEN_TPS6507X=m
4241 +CONFIG_TOUCHSCREEN_ZET6223=m
4242 +CONFIG_TOUCHSCREEN_ZFORCE=m
4243 +CONFIG_TOUCHSCREEN_ROHM_BU21023=m
4244 +CONFIG_TOUCHSCREEN_IQS5XX=m
4245 +CONFIG_TOUCHSCREEN_ZINITIX=m
4246 +CONFIG_INPUT_MISC=y
4247 +CONFIG_INPUT_88PM860X_ONKEY=m
4248 +CONFIG_INPUT_88PM80X_ONKEY=m
4249 +CONFIG_INPUT_AD714X=m
4250 +CONFIG_INPUT_AD714X_I2C=m
4251 +CONFIG_INPUT_AD714X_SPI=m
4252 +CONFIG_INPUT_ARIZONA_HAPTICS=m
4253 +CONFIG_INPUT_BMA150=m
4254 +CONFIG_INPUT_E3X0_BUTTON=m
4255 +CONFIG_INPUT_PCSPKR=m
4256 +CONFIG_INPUT_MAX77693_HAPTIC=m
4257 +CONFIG_INPUT_MAX8925_ONKEY=m
4258 +CONFIG_INPUT_MAX8997_HAPTIC=m
4259 +CONFIG_INPUT_MC13783_PWRBUTTON=m
4260 +CONFIG_INPUT_MMA8450=m
4261 +CONFIG_INPUT_APANEL=m
4262 +CONFIG_INPUT_GPIO_BEEPER=m
4263 +CONFIG_INPUT_GPIO_DECODER=m
4264 +CONFIG_INPUT_GPIO_VIBRA=m
4265 +CONFIG_INPUT_ATLAS_BTNS=m
4266 +CONFIG_INPUT_ATI_REMOTE2=m
4267 +CONFIG_INPUT_KEYSPAN_REMOTE=m
4268 +CONFIG_INPUT_KXTJ9=m
4269 +CONFIG_INPUT_POWERMATE=m
4270 +CONFIG_INPUT_YEALINK=m
4271 +CONFIG_INPUT_CM109=m
4272 +CONFIG_INPUT_REGULATOR_HAPTIC=m
4273 +CONFIG_INPUT_RETU_PWRBUTTON=m
4274 +CONFIG_INPUT_AXP20X_PEK=m
4275 +CONFIG_INPUT_TWL4030_PWRBUTTON=m
4276 +CONFIG_INPUT_TWL4030_VIBRA=m
4277 +CONFIG_INPUT_TWL6040_VIBRA=m
4278 +CONFIG_INPUT_UINPUT=y
4279 +CONFIG_INPUT_PALMAS_PWRBUTTON=m
4280 +CONFIG_INPUT_PCF50633_PMU=m
4281 +CONFIG_INPUT_PCF8574=m
4282 +CONFIG_INPUT_PWM_BEEPER=m
4283 +CONFIG_INPUT_PWM_VIBRA=m
4284 +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
4285 +CONFIG_INPUT_DA7280_HAPTICS=m
4286 +CONFIG_INPUT_DA9052_ONKEY=m
4287 +CONFIG_INPUT_DA9055_ONKEY=m
4288 +CONFIG_INPUT_DA9063_ONKEY=m
4289 +CONFIG_INPUT_WM831X_ON=m
4290 +CONFIG_INPUT_PCAP=m
4291 +CONFIG_INPUT_ADXL34X=m
4292 +CONFIG_INPUT_ADXL34X_I2C=m
4293 +CONFIG_INPUT_ADXL34X_SPI=m
4294 +CONFIG_INPUT_IMS_PCU=m
4295 +CONFIG_INPUT_IQS269A=m
4296 +CONFIG_INPUT_CMA3000=m
4297 +CONFIG_INPUT_CMA3000_I2C=m
4298 +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m
4299 +CONFIG_INPUT_IDEAPAD_SLIDEBAR=m
4300 +CONFIG_INPUT_SOC_BUTTON_ARRAY=m
4301 +CONFIG_INPUT_DRV260X_HAPTICS=m
4302 +CONFIG_INPUT_DRV2665_HAPTICS=m
4303 +CONFIG_INPUT_DRV2667_HAPTICS=m
4304 +CONFIG_INPUT_RAVE_SP_PWRBUTTON=m
4305 +CONFIG_RMI4_CORE=m
4306 +CONFIG_RMI4_I2C=m
4307 +CONFIG_RMI4_SPI=m
4308 +CONFIG_RMI4_SMB=m
4309 +CONFIG_RMI4_F03=y
4310 +CONFIG_RMI4_F03_SERIO=m
4311 +CONFIG_RMI4_2D_SENSOR=y
4312 +CONFIG_RMI4_F11=y
4313 +CONFIG_RMI4_F12=y
4314 +CONFIG_RMI4_F30=y
4315 +CONFIG_RMI4_F34=y
4316 +CONFIG_RMI4_F3A=y
4317 +CONFIG_RMI4_F54=y
4318 +CONFIG_RMI4_F55=y
4321 +# Hardware I/O ports
4323 +CONFIG_SERIO=y
4324 +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y
4325 +CONFIG_SERIO_I8042=y
4326 +CONFIG_SERIO_SERPORT=m
4327 +CONFIG_SERIO_CT82C710=m
4328 +CONFIG_SERIO_PARKBD=m
4329 +CONFIG_SERIO_PCIPS2=m
4330 +CONFIG_SERIO_LIBPS2=y
4331 +CONFIG_SERIO_RAW=m
4332 +CONFIG_SERIO_ALTERA_PS2=m
4333 +CONFIG_SERIO_PS2MULT=m
4334 +CONFIG_SERIO_ARC_PS2=m
4335 +CONFIG_HYPERV_KEYBOARD=m
4336 +CONFIG_SERIO_GPIO_PS2=m
4337 +CONFIG_USERIO=m
4338 +CONFIG_GAMEPORT=m
4339 +CONFIG_GAMEPORT_NS558=m
4340 +CONFIG_GAMEPORT_L4=m
4341 +CONFIG_GAMEPORT_EMU10K1=m
4342 +CONFIG_GAMEPORT_FM801=m
4343 +# end of Hardware I/O ports
4344 +# end of Input device support
4347 +# Character devices
4349 +CONFIG_TTY=y
4350 +CONFIG_VT=y
4351 +CONFIG_CONSOLE_TRANSLATIONS=y
4352 +CONFIG_VT_CONSOLE=y
4353 +CONFIG_VT_CONSOLE_SLEEP=y
4354 +CONFIG_HW_CONSOLE=y
4355 +CONFIG_VT_HW_CONSOLE_BINDING=y
4356 +CONFIG_UNIX98_PTYS=y
4357 +CONFIG_LEGACY_PTYS=y
4358 +CONFIG_LEGACY_PTY_COUNT=0
4359 +CONFIG_LDISC_AUTOLOAD=y
4362 +# Serial drivers
4364 +CONFIG_SERIAL_EARLYCON=y
4365 +CONFIG_SERIAL_8250=y
4366 +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
4367 +CONFIG_SERIAL_8250_PNP=y
4368 +CONFIG_SERIAL_8250_16550A_VARIANTS=y
4369 +CONFIG_SERIAL_8250_FINTEK=y
4370 +CONFIG_SERIAL_8250_CONSOLE=y
4371 +CONFIG_SERIAL_8250_DMA=y
4372 +CONFIG_SERIAL_8250_PCI=y
4373 +CONFIG_SERIAL_8250_EXAR=m
4374 +CONFIG_SERIAL_8250_CS=m
4375 +CONFIG_SERIAL_8250_MEN_MCB=m
4376 +CONFIG_SERIAL_8250_NR_UARTS=48
4377 +CONFIG_SERIAL_8250_RUNTIME_UARTS=32
4378 +CONFIG_SERIAL_8250_EXTENDED=y
4379 +CONFIG_SERIAL_8250_MANY_PORTS=y
4380 +CONFIG_SERIAL_8250_SHARE_IRQ=y
4381 +# CONFIG_SERIAL_8250_DETECT_IRQ is not set
4382 +CONFIG_SERIAL_8250_RSA=y
4383 +CONFIG_SERIAL_8250_DWLIB=y
4384 +CONFIG_SERIAL_8250_DW=m
4385 +CONFIG_SERIAL_8250_RT288X=y
4386 +CONFIG_SERIAL_8250_LPSS=m
4387 +CONFIG_SERIAL_8250_MID=m
4390 +# Non-8250 serial port support
4392 +CONFIG_SERIAL_KGDB_NMI=y
4393 +CONFIG_SERIAL_MAX3100=m
4394 +CONFIG_SERIAL_MAX310X=y
4395 +CONFIG_SERIAL_UARTLITE=m
4396 +CONFIG_SERIAL_UARTLITE_NR_UARTS=1
4397 +CONFIG_SERIAL_CORE=y
4398 +CONFIG_SERIAL_CORE_CONSOLE=y
4399 +CONFIG_CONSOLE_POLL=y
4400 +CONFIG_SERIAL_JSM=m
4401 +CONFIG_SERIAL_LANTIQ=m
4402 +CONFIG_SERIAL_SCCNXP=y
4403 +CONFIG_SERIAL_SCCNXP_CONSOLE=y
4404 +CONFIG_SERIAL_SC16IS7XX_CORE=m
4405 +CONFIG_SERIAL_SC16IS7XX=m
4406 +CONFIG_SERIAL_SC16IS7XX_I2C=y
4407 +CONFIG_SERIAL_SC16IS7XX_SPI=y
4408 +CONFIG_SERIAL_BCM63XX=m
4409 +CONFIG_SERIAL_ALTERA_JTAGUART=m
4410 +CONFIG_SERIAL_ALTERA_UART=m
4411 +CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4
4412 +CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200
4413 +CONFIG_SERIAL_ARC=m
4414 +CONFIG_SERIAL_ARC_NR_PORTS=1
4415 +CONFIG_SERIAL_RP2=m
4416 +CONFIG_SERIAL_RP2_NR_UARTS=32
4417 +CONFIG_SERIAL_FSL_LPUART=m
4418 +CONFIG_SERIAL_FSL_LINFLEXUART=m
4419 +CONFIG_SERIAL_MEN_Z135=m
4420 +CONFIG_SERIAL_SPRD=m
4421 +# end of Serial drivers
4423 +CONFIG_SERIAL_MCTRL_GPIO=y
4424 +CONFIG_SERIAL_NONSTANDARD=y
4425 +CONFIG_ROCKETPORT=m
4426 +CONFIG_CYCLADES=m
4427 +# CONFIG_CYZ_INTR is not set
4428 +CONFIG_MOXA_INTELLIO=m
4429 +CONFIG_MOXA_SMARTIO=m
4430 +CONFIG_SYNCLINK_GT=m
4431 +CONFIG_ISI=m
4432 +CONFIG_N_HDLC=m
4433 +CONFIG_N_GSM=m
4434 +CONFIG_NOZOMI=m
4435 +CONFIG_NULL_TTY=m
4436 +CONFIG_TRACE_ROUTER=m
4437 +CONFIG_TRACE_SINK=m
4438 +CONFIG_HVC_DRIVER=y
4439 +CONFIG_HVC_IRQ=y
4440 +CONFIG_HVC_XEN=y
4441 +CONFIG_HVC_XEN_FRONTEND=y
4442 +CONFIG_SERIAL_DEV_BUS=y
4443 +CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
4444 +CONFIG_TTY_PRINTK=y
4445 +CONFIG_TTY_PRINTK_LEVEL=6
4446 +CONFIG_PRINTER=m
4447 +# CONFIG_LP_CONSOLE is not set
4448 +CONFIG_PPDEV=m
4449 +CONFIG_VIRTIO_CONSOLE=y
4450 +CONFIG_IPMI_HANDLER=m
4451 +CONFIG_IPMI_DMI_DECODE=y
4452 +CONFIG_IPMI_PLAT_DATA=y
4453 +# CONFIG_IPMI_PANIC_EVENT is not set
4454 +CONFIG_IPMI_DEVICE_INTERFACE=m
4455 +CONFIG_IPMI_SI=m
4456 +CONFIG_IPMI_SSIF=m
4457 +CONFIG_IPMI_WATCHDOG=m
4458 +CONFIG_IPMI_POWEROFF=m
4459 +CONFIG_HW_RANDOM=y
4460 +CONFIG_HW_RANDOM_TIMERIOMEM=m
4461 +CONFIG_HW_RANDOM_INTEL=m
4462 +CONFIG_HW_RANDOM_AMD=m
4463 +CONFIG_HW_RANDOM_BA431=m
4464 +CONFIG_HW_RANDOM_VIA=m
4465 +CONFIG_HW_RANDOM_VIRTIO=m
4466 +CONFIG_HW_RANDOM_XIPHERA=m
4467 +CONFIG_APPLICOM=m
4470 +# PCMCIA character devices
4472 +CONFIG_SYNCLINK_CS=m
4473 +CONFIG_CARDMAN_4000=m
4474 +CONFIG_CARDMAN_4040=m
4475 +CONFIG_SCR24X=m
4476 +CONFIG_IPWIRELESS=m
4477 +# end of PCMCIA character devices
4479 +CONFIG_MWAVE=m
4480 +CONFIG_DEVMEM=y
4481 +# CONFIG_DEVKMEM is not set
4482 +CONFIG_NVRAM=m
4483 +CONFIG_RAW_DRIVER=m
4484 +CONFIG_MAX_RAW_DEVS=256
4485 +CONFIG_DEVPORT=y
4486 +CONFIG_HPET=y
4487 +CONFIG_HPET_MMAP=y
4488 +CONFIG_HPET_MMAP_DEFAULT=y
4489 +CONFIG_HANGCHECK_TIMER=m
4490 +CONFIG_UV_MMTIMER=m
4491 +CONFIG_TCG_TPM=y
4492 +CONFIG_HW_RANDOM_TPM=y
4493 +CONFIG_TCG_TIS_CORE=y
4494 +CONFIG_TCG_TIS=y
4495 +CONFIG_TCG_TIS_SPI=m
4496 +CONFIG_TCG_TIS_SPI_CR50=y
4497 +CONFIG_TCG_TIS_I2C_CR50=m
4498 +CONFIG_TCG_TIS_I2C_ATMEL=m
4499 +CONFIG_TCG_TIS_I2C_INFINEON=m
4500 +CONFIG_TCG_TIS_I2C_NUVOTON=m
4501 +CONFIG_TCG_NSC=m
4502 +CONFIG_TCG_ATMEL=m
4503 +CONFIG_TCG_INFINEON=m
4504 +CONFIG_TCG_XEN=m
4505 +CONFIG_TCG_CRB=y
4506 +CONFIG_TCG_VTPM_PROXY=m
4507 +CONFIG_TCG_TIS_ST33ZP24=m
4508 +CONFIG_TCG_TIS_ST33ZP24_I2C=m
4509 +CONFIG_TCG_TIS_ST33ZP24_SPI=m
4510 +CONFIG_TELCLOCK=m
4511 +CONFIG_XILLYBUS=m
4512 +CONFIG_XILLYBUS_PCIE=m
4513 +# end of Character devices
4515 +CONFIG_RANDOM_TRUST_CPU=y
4516 +CONFIG_RANDOM_TRUST_BOOTLOADER=y
4519 +# I2C support
4521 +CONFIG_I2C=y
4522 +CONFIG_ACPI_I2C_OPREGION=y
4523 +CONFIG_I2C_BOARDINFO=y
4524 +CONFIG_I2C_COMPAT=y
4525 +CONFIG_I2C_CHARDEV=y
4526 +CONFIG_I2C_MUX=m
4529 +# Multiplexer I2C Chip support
4531 +CONFIG_I2C_MUX_GPIO=m
4532 +CONFIG_I2C_MUX_LTC4306=m
4533 +CONFIG_I2C_MUX_PCA9541=m
4534 +CONFIG_I2C_MUX_PCA954x=m
4535 +CONFIG_I2C_MUX_REG=m
4536 +CONFIG_I2C_MUX_MLXCPLD=m
4537 +# end of Multiplexer I2C Chip support
4539 +CONFIG_I2C_HELPER_AUTO=y
4540 +CONFIG_I2C_SMBUS=m
4541 +CONFIG_I2C_ALGOBIT=m
4542 +CONFIG_I2C_ALGOPCA=m
4545 +# I2C Hardware Bus support
4549 +# PC SMBus host controller drivers
4551 +CONFIG_I2C_ALI1535=m
4552 +CONFIG_I2C_ALI1563=m
4553 +CONFIG_I2C_ALI15X3=m
4554 +CONFIG_I2C_AMD756=m
4555 +CONFIG_I2C_AMD756_S4882=m
4556 +CONFIG_I2C_AMD8111=m
4557 +CONFIG_I2C_AMD_MP2=m
4558 +CONFIG_I2C_I801=m
4559 +CONFIG_I2C_ISCH=m
4560 +CONFIG_I2C_ISMT=m
4561 +CONFIG_I2C_PIIX4=m
4562 +CONFIG_I2C_CHT_WC=m
4563 +CONFIG_I2C_NFORCE2=m
4564 +CONFIG_I2C_NFORCE2_S4985=m
4565 +CONFIG_I2C_NVIDIA_GPU=m
4566 +CONFIG_I2C_SIS5595=m
4567 +CONFIG_I2C_SIS630=m
4568 +CONFIG_I2C_SIS96X=m
4569 +CONFIG_I2C_VIA=m
4570 +CONFIG_I2C_VIAPRO=m
4573 +# ACPI drivers
4575 +CONFIG_I2C_SCMI=m
4578 +# I2C system bus drivers (mostly embedded / system-on-chip)
4580 +CONFIG_I2C_CBUS_GPIO=m
4581 +CONFIG_I2C_DESIGNWARE_CORE=y
4582 +# CONFIG_I2C_DESIGNWARE_SLAVE is not set
4583 +CONFIG_I2C_DESIGNWARE_PLATFORM=y
4584 +CONFIG_I2C_DESIGNWARE_BAYTRAIL=y
4585 +CONFIG_I2C_DESIGNWARE_PCI=m
4586 +# CONFIG_I2C_EMEV2 is not set
4587 +CONFIG_I2C_GPIO=m
4588 +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set
4589 +CONFIG_I2C_KEMPLD=m
4590 +CONFIG_I2C_OCORES=m
4591 +CONFIG_I2C_PCA_PLATFORM=m
4592 +CONFIG_I2C_SIMTEC=m
4593 +CONFIG_I2C_XILINX=m
4596 +# External I2C/SMBus adapter drivers
4598 +CONFIG_I2C_DIOLAN_U2C=m
4599 +CONFIG_I2C_DLN2=m
4600 +CONFIG_I2C_PARPORT=m
4601 +CONFIG_I2C_ROBOTFUZZ_OSIF=m
4602 +CONFIG_I2C_TAOS_EVM=m
4603 +CONFIG_I2C_TINY_USB=m
4604 +CONFIG_I2C_VIPERBOARD=m
4607 +# Other I2C/SMBus bus drivers
4609 +CONFIG_I2C_MLXCPLD=m
4610 +CONFIG_I2C_CROS_EC_TUNNEL=m
4611 +# end of I2C Hardware Bus support
4613 +CONFIG_I2C_STUB=m
4614 +# CONFIG_I2C_SLAVE is not set
4615 +# CONFIG_I2C_DEBUG_CORE is not set
4616 +# CONFIG_I2C_DEBUG_ALGO is not set
4617 +# CONFIG_I2C_DEBUG_BUS is not set
4618 +# end of I2C support
4620 +CONFIG_I3C=m
4621 +CONFIG_CDNS_I3C_MASTER=m
4622 +CONFIG_DW_I3C_MASTER=m
4623 +CONFIG_SVC_I3C_MASTER=m
4624 +CONFIG_MIPI_I3C_HCI=m
4625 +CONFIG_SPI=y
4626 +# CONFIG_SPI_DEBUG is not set
4627 +CONFIG_SPI_MASTER=y
4628 +CONFIG_SPI_MEM=y
4631 +# SPI Master Controller Drivers
4633 +CONFIG_SPI_ALTERA=m
4634 +CONFIG_SPI_AXI_SPI_ENGINE=m
4635 +CONFIG_SPI_BITBANG=m
4636 +CONFIG_SPI_BUTTERFLY=m
4637 +CONFIG_SPI_CADENCE=m
4638 +CONFIG_SPI_DESIGNWARE=m
4639 +CONFIG_SPI_DW_DMA=y
4640 +CONFIG_SPI_DW_PCI=m
4641 +CONFIG_SPI_DW_MMIO=m
4642 +CONFIG_SPI_DLN2=m
4643 +CONFIG_SPI_NXP_FLEXSPI=m
4644 +CONFIG_SPI_GPIO=m
4645 +CONFIG_SPI_LM70_LLP=m
4646 +CONFIG_SPI_LANTIQ_SSC=m
4647 +CONFIG_SPI_OC_TINY=m
4648 +CONFIG_SPI_PXA2XX=m
4649 +CONFIG_SPI_PXA2XX_PCI=m
4650 +# CONFIG_SPI_ROCKCHIP is not set
4651 +CONFIG_SPI_SC18IS602=m
4652 +CONFIG_SPI_SIFIVE=m
4653 +CONFIG_SPI_MXIC=m
4654 +CONFIG_SPI_XCOMM=m
4655 +# CONFIG_SPI_XILINX is not set
4656 +CONFIG_SPI_ZYNQMP_GQSPI=m
4657 +CONFIG_SPI_AMD=m
4660 +# SPI Multiplexer support
4662 +CONFIG_SPI_MUX=m
4665 +# SPI Protocol Masters
4667 +CONFIG_SPI_SPIDEV=m
4668 +CONFIG_SPI_LOOPBACK_TEST=m
4669 +CONFIG_SPI_TLE62X0=m
4670 +CONFIG_SPI_SLAVE=y
4671 +CONFIG_SPI_SLAVE_TIME=m
4672 +CONFIG_SPI_SLAVE_SYSTEM_CONTROL=m
4673 +CONFIG_SPI_DYNAMIC=y
4674 +CONFIG_SPMI=m
4675 +CONFIG_HSI=m
4676 +CONFIG_HSI_BOARDINFO=y
4679 +# HSI controllers
4683 +# HSI clients
4685 +CONFIG_HSI_CHAR=m
4686 +CONFIG_PPS=y
4687 +# CONFIG_PPS_DEBUG is not set
4690 +# PPS clients support
4692 +# CONFIG_PPS_CLIENT_KTIMER is not set
4693 +CONFIG_PPS_CLIENT_LDISC=m
4694 +CONFIG_PPS_CLIENT_PARPORT=m
4695 +CONFIG_PPS_CLIENT_GPIO=m
4698 +# PPS generators support
4702 +# PTP clock support
4704 +CONFIG_PTP_1588_CLOCK=y
4705 +CONFIG_DP83640_PHY=m
4706 +CONFIG_PTP_1588_CLOCK_INES=m
4707 +CONFIG_PTP_1588_CLOCK_KVM=m
4708 +CONFIG_PTP_1588_CLOCK_IDT82P33=m
4709 +CONFIG_PTP_1588_CLOCK_IDTCM=m
4710 +CONFIG_PTP_1588_CLOCK_VMW=m
4711 +CONFIG_PTP_1588_CLOCK_OCP=m
4712 +# end of PTP clock support
4714 +CONFIG_PINCTRL=y
4715 +CONFIG_PINMUX=y
4716 +CONFIG_PINCONF=y
4717 +CONFIG_GENERIC_PINCONF=y
4718 +# CONFIG_DEBUG_PINCTRL is not set
4719 +CONFIG_PINCTRL_AMD=y
4720 +CONFIG_PINCTRL_DA9062=m
4721 +CONFIG_PINCTRL_MCP23S08_I2C=m
4722 +CONFIG_PINCTRL_MCP23S08_SPI=m
4723 +CONFIG_PINCTRL_MCP23S08=m
4724 +CONFIG_PINCTRL_SX150X=y
4725 +CONFIG_PINCTRL_BAYTRAIL=y
4726 +CONFIG_PINCTRL_CHERRYVIEW=y
4727 +CONFIG_PINCTRL_LYNXPOINT=m
4728 +CONFIG_PINCTRL_INTEL=y
4729 +CONFIG_PINCTRL_ALDERLAKE=m
4730 +CONFIG_PINCTRL_BROXTON=m
4731 +CONFIG_PINCTRL_CANNONLAKE=m
4732 +CONFIG_PINCTRL_CEDARFORK=m
4733 +CONFIG_PINCTRL_DENVERTON=m
4734 +CONFIG_PINCTRL_ELKHARTLAKE=m
4735 +CONFIG_PINCTRL_EMMITSBURG=m
4736 +CONFIG_PINCTRL_GEMINILAKE=m
4737 +CONFIG_PINCTRL_ICELAKE=m
4738 +CONFIG_PINCTRL_JASPERLAKE=m
4739 +CONFIG_PINCTRL_LAKEFIELD=m
4740 +CONFIG_PINCTRL_LEWISBURG=m
4741 +CONFIG_PINCTRL_SUNRISEPOINT=m
4742 +CONFIG_PINCTRL_TIGERLAKE=m
4745 +# Renesas pinctrl drivers
4747 +# end of Renesas pinctrl drivers
4749 +CONFIG_PINCTRL_MADERA=m
4750 +CONFIG_PINCTRL_CS47L15=y
4751 +CONFIG_PINCTRL_CS47L35=y
4752 +CONFIG_PINCTRL_CS47L85=y
4753 +CONFIG_PINCTRL_CS47L90=y
4754 +CONFIG_PINCTRL_CS47L92=y
4755 +CONFIG_GPIOLIB=y
4756 +CONFIG_GPIOLIB_FASTPATH_LIMIT=512
4757 +CONFIG_GPIO_ACPI=y
4758 +CONFIG_GPIOLIB_IRQCHIP=y
4759 +# CONFIG_DEBUG_GPIO is not set
4760 +CONFIG_GPIO_SYSFS=y
4761 +CONFIG_GPIO_CDEV=y
4762 +# CONFIG_GPIO_CDEV_V1 is not set
4763 +CONFIG_GPIO_GENERIC=m
4764 +CONFIG_GPIO_MAX730X=m
4767 +# Memory mapped GPIO drivers
4769 +CONFIG_GPIO_AMDPT=m
4770 +CONFIG_GPIO_DWAPB=m
4771 +CONFIG_GPIO_EXAR=m
4772 +CONFIG_GPIO_GENERIC_PLATFORM=m
4773 +CONFIG_GPIO_ICH=m
4774 +CONFIG_GPIO_MB86S7X=m
4775 +CONFIG_GPIO_MENZ127=m
4776 +CONFIG_GPIO_SIOX=m
4777 +CONFIG_GPIO_VX855=m
4778 +CONFIG_GPIO_AMD_FCH=m
4779 +# end of Memory mapped GPIO drivers
4782 +# Port-mapped I/O GPIO drivers
4784 +CONFIG_GPIO_104_DIO_48E=m
4785 +CONFIG_GPIO_104_IDIO_16=m
4786 +CONFIG_GPIO_104_IDI_48=m
4787 +CONFIG_GPIO_F7188X=m
4788 +CONFIG_GPIO_GPIO_MM=m
4789 +CONFIG_GPIO_IT87=m
4790 +CONFIG_GPIO_SCH=m
4791 +CONFIG_GPIO_SCH311X=m
4792 +CONFIG_GPIO_WINBOND=m
4793 +CONFIG_GPIO_WS16C48=m
4794 +# end of Port-mapped I/O GPIO drivers
4797 +# I2C GPIO expanders
4799 +CONFIG_GPIO_ADP5588=m
4800 +CONFIG_GPIO_MAX7300=m
4801 +CONFIG_GPIO_MAX732X=m
4802 +CONFIG_GPIO_PCA953X=m
4803 +CONFIG_GPIO_PCA953X_IRQ=y
4804 +CONFIG_GPIO_PCA9570=m
4805 +CONFIG_GPIO_PCF857X=m
4806 +CONFIG_GPIO_TPIC2810=m
4807 +# end of I2C GPIO expanders
4810 +# MFD GPIO expanders
4812 +CONFIG_GPIO_ADP5520=m
4813 +CONFIG_GPIO_ARIZONA=m
4814 +CONFIG_GPIO_BD9571MWV=m
4815 +CONFIG_GPIO_CRYSTAL_COVE=y
4816 +CONFIG_GPIO_DA9052=m
4817 +CONFIG_GPIO_DA9055=m
4818 +CONFIG_GPIO_DLN2=m
4819 +CONFIG_GPIO_JANZ_TTL=m
4820 +CONFIG_GPIO_KEMPLD=m
4821 +CONFIG_GPIO_LP3943=m
4822 +CONFIG_GPIO_LP873X=m
4823 +CONFIG_GPIO_MADERA=m
4824 +CONFIG_GPIO_PALMAS=y
4825 +CONFIG_GPIO_RC5T583=y
4826 +CONFIG_GPIO_TPS65086=m
4827 +CONFIG_GPIO_TPS6586X=y
4828 +CONFIG_GPIO_TPS65910=y
4829 +CONFIG_GPIO_TPS65912=m
4830 +CONFIG_GPIO_TPS68470=y
4831 +CONFIG_GPIO_TQMX86=m
4832 +CONFIG_GPIO_TWL4030=m
4833 +CONFIG_GPIO_TWL6040=m
4834 +CONFIG_GPIO_UCB1400=m
4835 +CONFIG_GPIO_WHISKEY_COVE=m
4836 +CONFIG_GPIO_WM831X=m
4837 +CONFIG_GPIO_WM8350=m
4838 +CONFIG_GPIO_WM8994=m
4839 +# end of MFD GPIO expanders
4842 +# PCI GPIO expanders
4844 +CONFIG_GPIO_AMD8111=m
4845 +CONFIG_GPIO_ML_IOH=m
4846 +CONFIG_GPIO_PCI_IDIO_16=m
4847 +CONFIG_GPIO_PCIE_IDIO_24=m
4848 +CONFIG_GPIO_RDC321X=m
4849 +# end of PCI GPIO expanders
4852 +# SPI GPIO expanders
4854 +CONFIG_GPIO_MAX3191X=m
4855 +CONFIG_GPIO_MAX7301=m
4856 +CONFIG_GPIO_MC33880=m
4857 +CONFIG_GPIO_PISOSR=m
4858 +CONFIG_GPIO_XRA1403=m
4859 +# end of SPI GPIO expanders
4862 +# USB GPIO expanders
4864 +CONFIG_GPIO_VIPERBOARD=m
4865 +# end of USB GPIO expanders
4868 +# Virtual GPIO drivers
4870 +CONFIG_GPIO_AGGREGATOR=m
4871 +# CONFIG_GPIO_MOCKUP is not set
4872 +# end of Virtual GPIO drivers
4874 +CONFIG_W1=m
4875 +CONFIG_W1_CON=y
4878 +# 1-wire Bus Masters
4880 +CONFIG_W1_MASTER_MATROX=m
4881 +CONFIG_W1_MASTER_DS2490=m
4882 +CONFIG_W1_MASTER_DS2482=m
4883 +CONFIG_W1_MASTER_DS1WM=m
4884 +CONFIG_W1_MASTER_GPIO=m
4885 +CONFIG_W1_MASTER_SGI=m
4886 +# end of 1-wire Bus Masters
4889 +# 1-wire Slaves
4891 +CONFIG_W1_SLAVE_THERM=m
4892 +CONFIG_W1_SLAVE_SMEM=m
4893 +CONFIG_W1_SLAVE_DS2405=m
4894 +CONFIG_W1_SLAVE_DS2408=m
4895 +CONFIG_W1_SLAVE_DS2408_READBACK=y
4896 +CONFIG_W1_SLAVE_DS2413=m
4897 +CONFIG_W1_SLAVE_DS2406=m
4898 +CONFIG_W1_SLAVE_DS2423=m
4899 +CONFIG_W1_SLAVE_DS2805=m
4900 +CONFIG_W1_SLAVE_DS2430=m
4901 +CONFIG_W1_SLAVE_DS2431=m
4902 +CONFIG_W1_SLAVE_DS2433=m
4903 +# CONFIG_W1_SLAVE_DS2433_CRC is not set
4904 +CONFIG_W1_SLAVE_DS2438=m
4905 +CONFIG_W1_SLAVE_DS250X=m
4906 +CONFIG_W1_SLAVE_DS2780=m
4907 +CONFIG_W1_SLAVE_DS2781=m
4908 +CONFIG_W1_SLAVE_DS28E04=m
4909 +CONFIG_W1_SLAVE_DS28E17=m
4910 +# end of 1-wire Slaves
4912 +CONFIG_POWER_RESET=y
4913 +CONFIG_POWER_RESET_MT6323=y
4914 +CONFIG_POWER_RESET_RESTART=y
4915 +CONFIG_POWER_SUPPLY=y
4916 +# CONFIG_POWER_SUPPLY_DEBUG is not set
4917 +CONFIG_POWER_SUPPLY_HWMON=y
4918 +CONFIG_PDA_POWER=m
4919 +CONFIG_GENERIC_ADC_BATTERY=m
4920 +CONFIG_MAX8925_POWER=m
4921 +CONFIG_WM831X_BACKUP=m
4922 +CONFIG_WM831X_POWER=m
4923 +CONFIG_WM8350_POWER=m
4924 +CONFIG_TEST_POWER=m
4925 +CONFIG_BATTERY_88PM860X=m
4926 +CONFIG_CHARGER_ADP5061=m
4927 +CONFIG_BATTERY_CW2015=m
4928 +CONFIG_BATTERY_DS2760=m
4929 +CONFIG_BATTERY_DS2780=m
4930 +CONFIG_BATTERY_DS2781=m
4931 +CONFIG_BATTERY_DS2782=m
4932 +CONFIG_BATTERY_SBS=m
4933 +CONFIG_CHARGER_SBS=m
4934 +CONFIG_MANAGER_SBS=m
4935 +CONFIG_BATTERY_BQ27XXX=m
4936 +CONFIG_BATTERY_BQ27XXX_I2C=m
4937 +CONFIG_BATTERY_BQ27XXX_HDQ=m
4938 +# CONFIG_BATTERY_BQ27XXX_DT_UPDATES_NVM is not set
4939 +CONFIG_BATTERY_DA9030=m
4940 +CONFIG_BATTERY_DA9052=m
4941 +CONFIG_CHARGER_DA9150=m
4942 +CONFIG_BATTERY_DA9150=m
4943 +CONFIG_CHARGER_AXP20X=m
4944 +CONFIG_BATTERY_AXP20X=m
4945 +CONFIG_AXP20X_POWER=m
4946 +CONFIG_AXP288_CHARGER=m
4947 +CONFIG_AXP288_FUEL_GAUGE=m
4948 +CONFIG_BATTERY_MAX17040=m
4949 +CONFIG_BATTERY_MAX17042=m
4950 +CONFIG_BATTERY_MAX1721X=m
4951 +CONFIG_BATTERY_TWL4030_MADC=m
4952 +CONFIG_CHARGER_88PM860X=m
4953 +CONFIG_CHARGER_PCF50633=m
4954 +CONFIG_BATTERY_RX51=m
4955 +CONFIG_CHARGER_ISP1704=m
4956 +CONFIG_CHARGER_MAX8903=m
4957 +CONFIG_CHARGER_TWL4030=m
4958 +CONFIG_CHARGER_LP8727=m
4959 +CONFIG_CHARGER_LP8788=m
4960 +CONFIG_CHARGER_GPIO=m
4961 +CONFIG_CHARGER_MANAGER=y
4962 +CONFIG_CHARGER_LT3651=m
4963 +CONFIG_CHARGER_LTC4162L=m
4964 +CONFIG_CHARGER_MAX14577=m
4965 +CONFIG_CHARGER_MAX77693=m
4966 +CONFIG_CHARGER_MAX8997=m
4967 +CONFIG_CHARGER_MAX8998=m
4968 +CONFIG_CHARGER_MP2629=m
4969 +CONFIG_CHARGER_BQ2415X=m
4970 +CONFIG_CHARGER_BQ24190=m
4971 +CONFIG_CHARGER_BQ24257=m
4972 +CONFIG_CHARGER_BQ24735=m
4973 +CONFIG_CHARGER_BQ2515X=m
4974 +CONFIG_CHARGER_BQ25890=m
4975 +CONFIG_CHARGER_BQ25980=m
4976 +CONFIG_CHARGER_BQ256XX=m
4977 +CONFIG_CHARGER_SMB347=m
4978 +CONFIG_CHARGER_TPS65090=m
4979 +CONFIG_BATTERY_GAUGE_LTC2941=m
4980 +CONFIG_BATTERY_RT5033=m
4981 +CONFIG_CHARGER_RT9455=m
4982 +CONFIG_CHARGER_CROS_USBPD=m
4983 +CONFIG_CHARGER_BD99954=m
4984 +CONFIG_CHARGER_WILCO=m
4985 +CONFIG_HWMON=y
4986 +CONFIG_HWMON_VID=m
4987 +# CONFIG_HWMON_DEBUG_CHIP is not set
4990 +# Native drivers
4992 +CONFIG_SENSORS_ABITUGURU=m
4993 +CONFIG_SENSORS_ABITUGURU3=m
4994 +CONFIG_SENSORS_AD7314=m
4995 +CONFIG_SENSORS_AD7414=m
4996 +CONFIG_SENSORS_AD7418=m
4997 +CONFIG_SENSORS_ADM1021=m
4998 +CONFIG_SENSORS_ADM1025=m
4999 +CONFIG_SENSORS_ADM1026=m
5000 +CONFIG_SENSORS_ADM1029=m
5001 +CONFIG_SENSORS_ADM1031=m
5002 +CONFIG_SENSORS_ADM1177=m
5003 +CONFIG_SENSORS_ADM9240=m
5004 +CONFIG_SENSORS_ADT7X10=m
5005 +CONFIG_SENSORS_ADT7310=m
5006 +CONFIG_SENSORS_ADT7410=m
5007 +CONFIG_SENSORS_ADT7411=m
5008 +CONFIG_SENSORS_ADT7462=m
5009 +CONFIG_SENSORS_ADT7470=m
5010 +CONFIG_SENSORS_ADT7475=m
5011 +CONFIG_SENSORS_AHT10=m
5012 +CONFIG_SENSORS_AS370=m
5013 +CONFIG_SENSORS_ASC7621=m
5014 +CONFIG_SENSORS_AXI_FAN_CONTROL=m
5015 +CONFIG_SENSORS_K8TEMP=m
5016 +CONFIG_SENSORS_K10TEMP=m
5017 +CONFIG_SENSORS_FAM15H_POWER=m
5018 +CONFIG_SENSORS_AMD_ENERGY=m
5019 +CONFIG_SENSORS_APPLESMC=m
5020 +CONFIG_SENSORS_ASB100=m
5021 +CONFIG_SENSORS_ASPEED=m
5022 +CONFIG_SENSORS_ATXP1=m
5023 +CONFIG_SENSORS_CORSAIR_CPRO=m
5024 +CONFIG_SENSORS_CORSAIR_PSU=m
5025 +CONFIG_SENSORS_DRIVETEMP=m
5026 +CONFIG_SENSORS_DS620=m
5027 +CONFIG_SENSORS_DS1621=m
5028 +CONFIG_SENSORS_DELL_SMM=m
5029 +CONFIG_SENSORS_DA9052_ADC=m
5030 +CONFIG_SENSORS_DA9055=m
5031 +CONFIG_SENSORS_I5K_AMB=m
5032 +CONFIG_SENSORS_F71805F=m
5033 +CONFIG_SENSORS_F71882FG=m
5034 +CONFIG_SENSORS_F75375S=m
5035 +CONFIG_SENSORS_MC13783_ADC=m
5036 +CONFIG_SENSORS_FSCHMD=m
5037 +CONFIG_SENSORS_FTSTEUTATES=m
5038 +CONFIG_SENSORS_GL518SM=m
5039 +CONFIG_SENSORS_GL520SM=m
5040 +CONFIG_SENSORS_G760A=m
5041 +CONFIG_SENSORS_G762=m
5042 +CONFIG_SENSORS_HIH6130=m
5043 +CONFIG_SENSORS_IBMAEM=m
5044 +CONFIG_SENSORS_IBMPEX=m
5045 +CONFIG_SENSORS_IIO_HWMON=m
5046 +CONFIG_SENSORS_I5500=m
5047 +CONFIG_SENSORS_CORETEMP=m
5048 +CONFIG_SENSORS_IT87=m
5049 +CONFIG_SENSORS_JC42=m
5050 +CONFIG_SENSORS_POWR1220=m
5051 +CONFIG_SENSORS_LINEAGE=m
5052 +CONFIG_SENSORS_LTC2945=m
5053 +CONFIG_SENSORS_LTC2947=m
5054 +CONFIG_SENSORS_LTC2947_I2C=m
5055 +CONFIG_SENSORS_LTC2947_SPI=m
5056 +CONFIG_SENSORS_LTC2990=m
5057 +CONFIG_SENSORS_LTC2992=m
5058 +CONFIG_SENSORS_LTC4151=m
5059 +CONFIG_SENSORS_LTC4215=m
5060 +CONFIG_SENSORS_LTC4222=m
5061 +CONFIG_SENSORS_LTC4245=m
5062 +CONFIG_SENSORS_LTC4260=m
5063 +CONFIG_SENSORS_LTC4261=m
5064 +CONFIG_SENSORS_MAX1111=m
5065 +CONFIG_SENSORS_MAX127=m
5066 +CONFIG_SENSORS_MAX16065=m
5067 +CONFIG_SENSORS_MAX1619=m
5068 +CONFIG_SENSORS_MAX1668=m
5069 +CONFIG_SENSORS_MAX197=m
5070 +CONFIG_SENSORS_MAX31722=m
5071 +CONFIG_SENSORS_MAX31730=m
5072 +CONFIG_SENSORS_MAX6621=m
5073 +CONFIG_SENSORS_MAX6639=m
5074 +CONFIG_SENSORS_MAX6642=m
5075 +CONFIG_SENSORS_MAX6650=m
5076 +CONFIG_SENSORS_MAX6697=m
5077 +CONFIG_SENSORS_MAX31790=m
5078 +CONFIG_SENSORS_MCP3021=m
5079 +CONFIG_SENSORS_MLXREG_FAN=m
5080 +CONFIG_SENSORS_TC654=m
5081 +CONFIG_SENSORS_TPS23861=m
5082 +CONFIG_SENSORS_MENF21BMC_HWMON=m
5083 +CONFIG_SENSORS_MR75203=m
5084 +CONFIG_SENSORS_ADCXX=m
5085 +CONFIG_SENSORS_LM63=m
5086 +CONFIG_SENSORS_LM70=m
5087 +CONFIG_SENSORS_LM73=m
5088 +CONFIG_SENSORS_LM75=m
5089 +CONFIG_SENSORS_LM77=m
5090 +CONFIG_SENSORS_LM78=m
5091 +CONFIG_SENSORS_LM80=m
5092 +CONFIG_SENSORS_LM83=m
5093 +CONFIG_SENSORS_LM85=m
5094 +CONFIG_SENSORS_LM87=m
5095 +CONFIG_SENSORS_LM90=m
5096 +CONFIG_SENSORS_LM92=m
5097 +CONFIG_SENSORS_LM93=m
5098 +CONFIG_SENSORS_LM95234=m
5099 +CONFIG_SENSORS_LM95241=m
5100 +CONFIG_SENSORS_LM95245=m
5101 +CONFIG_SENSORS_PC87360=m
5102 +CONFIG_SENSORS_PC87427=m
5103 +CONFIG_SENSORS_NTC_THERMISTOR=m
5104 +CONFIG_SENSORS_NCT6683=m
5105 +CONFIG_SENSORS_NCT6775=m
5106 +CONFIG_SENSORS_NCT7802=m
5107 +CONFIG_SENSORS_NCT7904=m
5108 +CONFIG_SENSORS_NPCM7XX=m
5109 +CONFIG_SENSORS_PCF8591=m
5110 +CONFIG_PMBUS=m
5111 +CONFIG_SENSORS_PMBUS=m
5112 +CONFIG_SENSORS_ADM1266=m
5113 +CONFIG_SENSORS_ADM1275=m
5114 +CONFIG_SENSORS_BEL_PFE=m
5115 +CONFIG_SENSORS_IBM_CFFPS=m
5116 +CONFIG_SENSORS_INSPUR_IPSPS=m
5117 +CONFIG_SENSORS_IR35221=m
5118 +CONFIG_SENSORS_IR38064=m
5119 +CONFIG_SENSORS_IRPS5401=m
5120 +CONFIG_SENSORS_ISL68137=m
5121 +CONFIG_SENSORS_LM25066=m
5122 +CONFIG_SENSORS_LTC2978=m
5123 +CONFIG_SENSORS_LTC2978_REGULATOR=y
5124 +CONFIG_SENSORS_LTC3815=m
5125 +CONFIG_SENSORS_MAX16064=m
5126 +CONFIG_SENSORS_MAX16601=m
5127 +CONFIG_SENSORS_MAX20730=m
5128 +CONFIG_SENSORS_MAX20751=m
5129 +CONFIG_SENSORS_MAX31785=m
5130 +CONFIG_SENSORS_MAX34440=m
5131 +CONFIG_SENSORS_MAX8688=m
5132 +CONFIG_SENSORS_MP2975=m
5133 +CONFIG_SENSORS_PM6764TR=m
5134 +CONFIG_SENSORS_PXE1610=m
5135 +CONFIG_SENSORS_Q54SJ108A2=m
5136 +CONFIG_SENSORS_TPS40422=m
5137 +CONFIG_SENSORS_TPS53679=m
5138 +CONFIG_SENSORS_UCD9000=m
5139 +CONFIG_SENSORS_UCD9200=m
5140 +CONFIG_SENSORS_XDPE122=m
5141 +CONFIG_SENSORS_ZL6100=m
5142 +CONFIG_SENSORS_SBTSI=m
5143 +CONFIG_SENSORS_SHT15=m
5144 +CONFIG_SENSORS_SHT21=m
5145 +CONFIG_SENSORS_SHT3x=m
5146 +CONFIG_SENSORS_SHTC1=m
5147 +CONFIG_SENSORS_SIS5595=m
5148 +CONFIG_SENSORS_DME1737=m
5149 +CONFIG_SENSORS_EMC1403=m
5150 +CONFIG_SENSORS_EMC2103=m
5151 +CONFIG_SENSORS_EMC6W201=m
5152 +CONFIG_SENSORS_SMSC47M1=m
5153 +CONFIG_SENSORS_SMSC47M192=m
5154 +CONFIG_SENSORS_SMSC47B397=m
5155 +CONFIG_SENSORS_SCH56XX_COMMON=m
5156 +CONFIG_SENSORS_SCH5627=m
5157 +CONFIG_SENSORS_SCH5636=m
5158 +CONFIG_SENSORS_STTS751=m
5159 +CONFIG_SENSORS_SMM665=m
5160 +CONFIG_SENSORS_ADC128D818=m
5161 +CONFIG_SENSORS_ADS7828=m
5162 +CONFIG_SENSORS_ADS7871=m
5163 +CONFIG_SENSORS_AMC6821=m
5164 +CONFIG_SENSORS_INA209=m
5165 +CONFIG_SENSORS_INA2XX=m
5166 +CONFIG_SENSORS_INA3221=m
5167 +CONFIG_SENSORS_TC74=m
5168 +CONFIG_SENSORS_THMC50=m
5169 +CONFIG_SENSORS_TMP102=m
5170 +CONFIG_SENSORS_TMP103=m
5171 +CONFIG_SENSORS_TMP108=m
5172 +CONFIG_SENSORS_TMP401=m
5173 +CONFIG_SENSORS_TMP421=m
5174 +CONFIG_SENSORS_TMP513=m
5175 +CONFIG_SENSORS_VIA_CPUTEMP=m
5176 +CONFIG_SENSORS_VIA686A=m
5177 +CONFIG_SENSORS_VT1211=m
5178 +CONFIG_SENSORS_VT8231=m
5179 +CONFIG_SENSORS_W83773G=m
5180 +CONFIG_SENSORS_W83781D=m
5181 +CONFIG_SENSORS_W83791D=m
5182 +CONFIG_SENSORS_W83792D=m
5183 +CONFIG_SENSORS_W83793=m
5184 +CONFIG_SENSORS_W83795=m
5185 +# CONFIG_SENSORS_W83795_FANCTRL is not set
5186 +CONFIG_SENSORS_W83L785TS=m
5187 +CONFIG_SENSORS_W83L786NG=m
5188 +CONFIG_SENSORS_W83627HF=m
5189 +CONFIG_SENSORS_W83627EHF=m
5190 +CONFIG_SENSORS_WM831X=m
5191 +CONFIG_SENSORS_WM8350=m
5192 +CONFIG_SENSORS_XGENE=m
5193 +CONFIG_SENSORS_INTEL_M10_BMC_HWMON=m
5196 +# ACPI drivers
5198 +CONFIG_SENSORS_ACPI_POWER=m
5199 +CONFIG_SENSORS_ATK0110=m
5200 +CONFIG_THERMAL=y
5201 +CONFIG_THERMAL_NETLINK=y
5202 +CONFIG_THERMAL_STATISTICS=y
5203 +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
5204 +CONFIG_THERMAL_HWMON=y
5205 +CONFIG_THERMAL_WRITABLE_TRIPS=y
5206 +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
5207 +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
5208 +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
5209 +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set
5210 +CONFIG_THERMAL_GOV_FAIR_SHARE=y
5211 +CONFIG_THERMAL_GOV_STEP_WISE=y
5212 +CONFIG_THERMAL_GOV_BANG_BANG=y
5213 +CONFIG_THERMAL_GOV_USER_SPACE=y
5214 +CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
5215 +CONFIG_DEVFREQ_THERMAL=y
5216 +CONFIG_THERMAL_EMULATION=y
5219 +# Intel thermal drivers
5221 +CONFIG_INTEL_POWERCLAMP=m
5222 +CONFIG_X86_THERMAL_VECTOR=y
5223 +CONFIG_X86_PKG_TEMP_THERMAL=m
5224 +CONFIG_INTEL_SOC_DTS_IOSF_CORE=m
5225 +CONFIG_INTEL_SOC_DTS_THERMAL=m
5228 +# ACPI INT340X thermal drivers
5230 +CONFIG_INT340X_THERMAL=m
5231 +CONFIG_ACPI_THERMAL_REL=m
5232 +CONFIG_INT3406_THERMAL=m
5233 +CONFIG_PROC_THERMAL_MMIO_RAPL=m
5234 +# end of ACPI INT340X thermal drivers
5236 +CONFIG_INTEL_BXT_PMIC_THERMAL=m
5237 +CONFIG_INTEL_PCH_THERMAL=m
5238 +# end of Intel thermal drivers
5240 +CONFIG_GENERIC_ADC_THERMAL=m
5241 +CONFIG_WATCHDOG=y
5242 +CONFIG_WATCHDOG_CORE=y
5243 +# CONFIG_WATCHDOG_NOWAYOUT is not set
5244 +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y
5245 +CONFIG_WATCHDOG_OPEN_TIMEOUT=0
5246 +CONFIG_WATCHDOG_SYSFS=y
5249 +# Watchdog Pretimeout Governors
5251 +CONFIG_WATCHDOG_PRETIMEOUT_GOV=y
5252 +CONFIG_WATCHDOG_PRETIMEOUT_GOV_SEL=m
5253 +CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP=y
5254 +CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC=m
5255 +CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP=y
5256 +# CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC is not set
5259 +# Watchdog Device Drivers
5261 +CONFIG_SOFT_WATCHDOG=m
5262 +CONFIG_SOFT_WATCHDOG_PRETIMEOUT=y
5263 +CONFIG_DA9052_WATCHDOG=m
5264 +CONFIG_DA9055_WATCHDOG=m
5265 +CONFIG_DA9063_WATCHDOG=m
5266 +CONFIG_DA9062_WATCHDOG=m
5267 +CONFIG_MENF21BMC_WATCHDOG=m
5268 +CONFIG_MENZ069_WATCHDOG=m
5269 +CONFIG_WDAT_WDT=m
5270 +CONFIG_WM831X_WATCHDOG=m
5271 +CONFIG_WM8350_WATCHDOG=m
5272 +CONFIG_XILINX_WATCHDOG=m
5273 +CONFIG_ZIIRAVE_WATCHDOG=m
5274 +CONFIG_RAVE_SP_WATCHDOG=m
5275 +CONFIG_MLX_WDT=m
5276 +CONFIG_CADENCE_WATCHDOG=m
5277 +CONFIG_DW_WATCHDOG=m
5278 +CONFIG_TWL4030_WATCHDOG=m
5279 +CONFIG_MAX63XX_WATCHDOG=m
5280 +CONFIG_RETU_WATCHDOG=m
5281 +CONFIG_ACQUIRE_WDT=m
5282 +CONFIG_ADVANTECH_WDT=m
5283 +CONFIG_ALIM1535_WDT=m
5284 +CONFIG_ALIM7101_WDT=m
5285 +CONFIG_EBC_C384_WDT=m
5286 +CONFIG_F71808E_WDT=m
5287 +CONFIG_SP5100_TCO=m
5288 +CONFIG_SBC_FITPC2_WATCHDOG=m
5289 +CONFIG_EUROTECH_WDT=m
5290 +CONFIG_IB700_WDT=m
5291 +CONFIG_IBMASR=m
5292 +CONFIG_WAFER_WDT=m
5293 +CONFIG_I6300ESB_WDT=m
5294 +CONFIG_IE6XX_WDT=m
5295 +CONFIG_ITCO_WDT=m
5296 +CONFIG_ITCO_VENDOR_SUPPORT=y
5297 +CONFIG_IT8712F_WDT=m
5298 +CONFIG_IT87_WDT=m
5299 +CONFIG_HP_WATCHDOG=m
5300 +CONFIG_HPWDT_NMI_DECODING=y
5301 +CONFIG_KEMPLD_WDT=m
5302 +CONFIG_SC1200_WDT=m
5303 +CONFIG_PC87413_WDT=m
5304 +CONFIG_NV_TCO=m
5305 +CONFIG_60XX_WDT=m
5306 +CONFIG_CPU5_WDT=m
5307 +CONFIG_SMSC_SCH311X_WDT=m
5308 +CONFIG_SMSC37B787_WDT=m
5309 +CONFIG_TQMX86_WDT=m
5310 +CONFIG_VIA_WDT=m
5311 +CONFIG_W83627HF_WDT=m
5312 +CONFIG_W83877F_WDT=m
5313 +CONFIG_W83977F_WDT=m
5314 +CONFIG_MACHZ_WDT=m
5315 +CONFIG_SBC_EPX_C3_WATCHDOG=m
5316 +CONFIG_INTEL_MEI_WDT=m
5317 +CONFIG_NI903X_WDT=m
5318 +CONFIG_NIC7018_WDT=m
5319 +CONFIG_MEN_A21_WDT=m
5320 +CONFIG_XEN_WDT=m
5323 +# PCI-based Watchdog Cards
5325 +CONFIG_PCIPCWATCHDOG=m
5326 +CONFIG_WDTPCI=m
5329 +# USB-based Watchdog Cards
5331 +CONFIG_USBPCWATCHDOG=m
5332 +CONFIG_SSB_POSSIBLE=y
5333 +CONFIG_SSB=m
5334 +CONFIG_SSB_SPROM=y
5335 +CONFIG_SSB_BLOCKIO=y
5336 +CONFIG_SSB_PCIHOST_POSSIBLE=y
5337 +CONFIG_SSB_PCIHOST=y
5338 +CONFIG_SSB_B43_PCI_BRIDGE=y
5339 +CONFIG_SSB_PCMCIAHOST_POSSIBLE=y
5340 +# CONFIG_SSB_PCMCIAHOST is not set
5341 +CONFIG_SSB_SDIOHOST_POSSIBLE=y
5342 +CONFIG_SSB_SDIOHOST=y
5343 +CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
5344 +CONFIG_SSB_DRIVER_PCICORE=y
5345 +CONFIG_SSB_DRIVER_GPIO=y
5346 +CONFIG_BCMA_POSSIBLE=y
5347 +CONFIG_BCMA=m
5348 +CONFIG_BCMA_BLOCKIO=y
5349 +CONFIG_BCMA_HOST_PCI_POSSIBLE=y
5350 +CONFIG_BCMA_HOST_PCI=y
5351 +CONFIG_BCMA_HOST_SOC=y
5352 +CONFIG_BCMA_DRIVER_PCI=y
5353 +CONFIG_BCMA_SFLASH=y
5354 +CONFIG_BCMA_DRIVER_GMAC_CMN=y
5355 +CONFIG_BCMA_DRIVER_GPIO=y
5356 +# CONFIG_BCMA_DEBUG is not set
5359 +# Multifunction device drivers
5361 +CONFIG_MFD_CORE=y
5362 +CONFIG_MFD_AS3711=y
5363 +CONFIG_PMIC_ADP5520=y
5364 +CONFIG_MFD_AAT2870_CORE=y
5365 +CONFIG_MFD_BCM590XX=m
5366 +CONFIG_MFD_BD9571MWV=m
5367 +CONFIG_MFD_AXP20X=m
5368 +CONFIG_MFD_AXP20X_I2C=m
5369 +CONFIG_MFD_CROS_EC_DEV=m
5370 +CONFIG_MFD_MADERA=m
5371 +CONFIG_MFD_MADERA_I2C=m
5372 +CONFIG_MFD_MADERA_SPI=m
5373 +CONFIG_MFD_CS47L15=y
5374 +CONFIG_MFD_CS47L35=y
5375 +CONFIG_MFD_CS47L85=y
5376 +CONFIG_MFD_CS47L90=y
5377 +CONFIG_MFD_CS47L92=y
5378 +CONFIG_PMIC_DA903X=y
5379 +CONFIG_PMIC_DA9052=y
5380 +CONFIG_MFD_DA9052_SPI=y
5381 +CONFIG_MFD_DA9052_I2C=y
5382 +CONFIG_MFD_DA9055=y
5383 +CONFIG_MFD_DA9062=m
5384 +CONFIG_MFD_DA9063=y
5385 +CONFIG_MFD_DA9150=m
5386 +CONFIG_MFD_DLN2=m
5387 +CONFIG_MFD_MC13XXX=m
5388 +CONFIG_MFD_MC13XXX_SPI=m
5389 +CONFIG_MFD_MC13XXX_I2C=m
5390 +CONFIG_MFD_MP2629=m
5391 +CONFIG_HTC_PASIC3=m
5392 +CONFIG_HTC_I2CPLD=y
5393 +CONFIG_MFD_INTEL_QUARK_I2C_GPIO=m
5394 +CONFIG_LPC_ICH=m
5395 +CONFIG_LPC_SCH=m
5396 +CONFIG_INTEL_SOC_PMIC=y
5397 +CONFIG_INTEL_SOC_PMIC_BXTWC=m
5398 +CONFIG_INTEL_SOC_PMIC_CHTWC=y
5399 +CONFIG_INTEL_SOC_PMIC_CHTDC_TI=m
5400 +CONFIG_INTEL_SOC_PMIC_MRFLD=m
5401 +CONFIG_MFD_INTEL_LPSS=m
5402 +CONFIG_MFD_INTEL_LPSS_ACPI=m
5403 +CONFIG_MFD_INTEL_LPSS_PCI=m
5404 +CONFIG_MFD_INTEL_PMC_BXT=m
5405 +CONFIG_MFD_INTEL_PMT=m
5406 +CONFIG_MFD_IQS62X=m
5407 +CONFIG_MFD_JANZ_CMODIO=m
5408 +CONFIG_MFD_KEMPLD=m
5409 +CONFIG_MFD_88PM800=m
5410 +CONFIG_MFD_88PM805=m
5411 +CONFIG_MFD_88PM860X=y
5412 +CONFIG_MFD_MAX14577=y
5413 +CONFIG_MFD_MAX77693=y
5414 +CONFIG_MFD_MAX77843=y
5415 +CONFIG_MFD_MAX8907=m
5416 +CONFIG_MFD_MAX8925=y
5417 +CONFIG_MFD_MAX8997=y
5418 +CONFIG_MFD_MAX8998=y
5419 +CONFIG_MFD_MT6360=m
5420 +CONFIG_MFD_MT6397=m
5421 +CONFIG_MFD_MENF21BMC=m
5422 +CONFIG_EZX_PCAP=y
5423 +CONFIG_MFD_VIPERBOARD=m
5424 +CONFIG_MFD_RETU=m
5425 +CONFIG_MFD_PCF50633=m
5426 +CONFIG_PCF50633_ADC=m
5427 +CONFIG_PCF50633_GPIO=m
5428 +CONFIG_UCB1400_CORE=m
5429 +CONFIG_MFD_RDC321X=m
5430 +CONFIG_MFD_RT5033=m
5431 +CONFIG_MFD_RC5T583=y
5432 +CONFIG_MFD_SEC_CORE=y
5433 +CONFIG_MFD_SI476X_CORE=m
5434 +CONFIG_MFD_SM501=m
5435 +CONFIG_MFD_SM501_GPIO=y
5436 +CONFIG_MFD_SKY81452=m
5437 +CONFIG_ABX500_CORE=y
5438 +CONFIG_AB3100_CORE=y
5439 +CONFIG_AB3100_OTP=m
5440 +CONFIG_MFD_SYSCON=y
5441 +CONFIG_MFD_TI_AM335X_TSCADC=m
5442 +CONFIG_MFD_LP3943=m
5443 +CONFIG_MFD_LP8788=y
5444 +CONFIG_MFD_TI_LMU=m
5445 +CONFIG_MFD_PALMAS=y
5446 +CONFIG_TPS6105X=m
5447 +CONFIG_TPS65010=m
5448 +CONFIG_TPS6507X=m
5449 +CONFIG_MFD_TPS65086=m
5450 +CONFIG_MFD_TPS65090=y
5451 +CONFIG_MFD_TPS68470=y
5452 +CONFIG_MFD_TI_LP873X=m
5453 +CONFIG_MFD_TPS6586X=y
5454 +CONFIG_MFD_TPS65910=y
5455 +CONFIG_MFD_TPS65912=y
5456 +CONFIG_MFD_TPS65912_I2C=y
5457 +CONFIG_MFD_TPS65912_SPI=y
5458 +CONFIG_MFD_TPS80031=y
5459 +CONFIG_TWL4030_CORE=y
5460 +CONFIG_MFD_TWL4030_AUDIO=y
5461 +CONFIG_TWL6040_CORE=y
5462 +CONFIG_MFD_WL1273_CORE=m
5463 +CONFIG_MFD_LM3533=m
5464 +CONFIG_MFD_TQMX86=m
5465 +CONFIG_MFD_VX855=m
5466 +CONFIG_MFD_ARIZONA=y
5467 +CONFIG_MFD_ARIZONA_I2C=m
5468 +CONFIG_MFD_ARIZONA_SPI=m
5469 +CONFIG_MFD_CS47L24=y
5470 +CONFIG_MFD_WM5102=y
5471 +CONFIG_MFD_WM5110=y
5472 +CONFIG_MFD_WM8997=y
5473 +CONFIG_MFD_WM8998=y
5474 +CONFIG_MFD_WM8400=y
5475 +CONFIG_MFD_WM831X=y
5476 +CONFIG_MFD_WM831X_I2C=y
5477 +CONFIG_MFD_WM831X_SPI=y
5478 +CONFIG_MFD_WM8350=y
5479 +CONFIG_MFD_WM8350_I2C=y
5480 +CONFIG_MFD_WM8994=m
5481 +CONFIG_MFD_WCD934X=m
5482 +CONFIG_RAVE_SP_CORE=m
5483 +CONFIG_MFD_INTEL_M10_BMC=m
5484 +# end of Multifunction device drivers
5486 +CONFIG_REGULATOR=y
5487 +# CONFIG_REGULATOR_DEBUG is not set
5488 +CONFIG_REGULATOR_FIXED_VOLTAGE=m
5489 +CONFIG_REGULATOR_VIRTUAL_CONSUMER=m
5490 +CONFIG_REGULATOR_USERSPACE_CONSUMER=m
5491 +CONFIG_REGULATOR_88PG86X=m
5492 +CONFIG_REGULATOR_88PM800=m
5493 +CONFIG_REGULATOR_88PM8607=m
5494 +CONFIG_REGULATOR_ACT8865=m
5495 +CONFIG_REGULATOR_AD5398=m
5496 +CONFIG_REGULATOR_AAT2870=m
5497 +CONFIG_REGULATOR_ARIZONA_LDO1=m
5498 +CONFIG_REGULATOR_ARIZONA_MICSUPP=m
5499 +CONFIG_REGULATOR_AS3711=m
5500 +CONFIG_REGULATOR_AXP20X=m
5501 +CONFIG_REGULATOR_BCM590XX=m
5502 +CONFIG_REGULATOR_BD9571MWV=m
5503 +CONFIG_REGULATOR_DA903X=m
5504 +CONFIG_REGULATOR_DA9052=m
5505 +CONFIG_REGULATOR_DA9055=m
5506 +CONFIG_REGULATOR_DA9062=m
5507 +CONFIG_REGULATOR_DA9210=m
5508 +CONFIG_REGULATOR_DA9211=m
5509 +CONFIG_REGULATOR_FAN53555=m
5510 +CONFIG_REGULATOR_GPIO=m
5511 +CONFIG_REGULATOR_ISL9305=m
5512 +CONFIG_REGULATOR_ISL6271A=m
5513 +CONFIG_REGULATOR_LM363X=m
5514 +CONFIG_REGULATOR_LP3971=m
5515 +CONFIG_REGULATOR_LP3972=m
5516 +CONFIG_REGULATOR_LP872X=m
5517 +CONFIG_REGULATOR_LP8755=m
5518 +CONFIG_REGULATOR_LP8788=m
5519 +CONFIG_REGULATOR_LTC3589=m
5520 +CONFIG_REGULATOR_LTC3676=m
5521 +CONFIG_REGULATOR_MAX14577=m
5522 +CONFIG_REGULATOR_MAX1586=m
5523 +CONFIG_REGULATOR_MAX8649=m
5524 +CONFIG_REGULATOR_MAX8660=m
5525 +CONFIG_REGULATOR_MAX8907=m
5526 +CONFIG_REGULATOR_MAX8925=m
5527 +CONFIG_REGULATOR_MAX8952=m
5528 +CONFIG_REGULATOR_MAX8997=m
5529 +CONFIG_REGULATOR_MAX8998=m
5530 +CONFIG_REGULATOR_MAX77693=m
5531 +CONFIG_REGULATOR_MAX77826=m
5532 +CONFIG_REGULATOR_MC13XXX_CORE=m
5533 +CONFIG_REGULATOR_MC13783=m
5534 +CONFIG_REGULATOR_MC13892=m
5535 +CONFIG_REGULATOR_MP8859=m
5536 +CONFIG_REGULATOR_MT6311=m
5537 +CONFIG_REGULATOR_MT6315=m
5538 +CONFIG_REGULATOR_MT6323=m
5539 +CONFIG_REGULATOR_MT6358=m
5540 +CONFIG_REGULATOR_MT6360=m
5541 +CONFIG_REGULATOR_MT6397=m
5542 +CONFIG_REGULATOR_PALMAS=m
5543 +CONFIG_REGULATOR_PCA9450=m
5544 +CONFIG_REGULATOR_PCAP=m
5545 +CONFIG_REGULATOR_PCF50633=m
5546 +CONFIG_REGULATOR_PV88060=m
5547 +CONFIG_REGULATOR_PV88080=m
5548 +CONFIG_REGULATOR_PV88090=m
5549 +CONFIG_REGULATOR_PWM=m
5550 +CONFIG_REGULATOR_QCOM_SPMI=m
5551 +CONFIG_REGULATOR_QCOM_USB_VBUS=m
5552 +CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY=m
5553 +CONFIG_REGULATOR_RC5T583=m
5554 +CONFIG_REGULATOR_RT4801=m
5555 +CONFIG_REGULATOR_RT5033=m
5556 +CONFIG_REGULATOR_RTMV20=m
5557 +CONFIG_REGULATOR_S2MPA01=m
5558 +CONFIG_REGULATOR_S2MPS11=m
5559 +CONFIG_REGULATOR_S5M8767=m
5560 +CONFIG_REGULATOR_SKY81452=m
5561 +CONFIG_REGULATOR_SLG51000=m
5562 +CONFIG_REGULATOR_TPS51632=m
5563 +CONFIG_REGULATOR_TPS6105X=m
5564 +CONFIG_REGULATOR_TPS62360=m
5565 +CONFIG_REGULATOR_TPS65023=m
5566 +CONFIG_REGULATOR_TPS6507X=m
5567 +CONFIG_REGULATOR_TPS65086=m
5568 +CONFIG_REGULATOR_TPS65090=m
5569 +CONFIG_REGULATOR_TPS65132=m
5570 +CONFIG_REGULATOR_TPS6524X=m
5571 +CONFIG_REGULATOR_TPS6586X=m
5572 +CONFIG_REGULATOR_TPS65910=m
5573 +CONFIG_REGULATOR_TPS65912=m
5574 +CONFIG_REGULATOR_TPS80031=m
5575 +CONFIG_REGULATOR_TWL4030=m
5576 +CONFIG_REGULATOR_WM831X=m
5577 +CONFIG_REGULATOR_WM8350=m
5578 +CONFIG_REGULATOR_WM8400=m
5579 +CONFIG_REGULATOR_WM8994=m
5580 +CONFIG_REGULATOR_QCOM_LABIBB=m
5581 +CONFIG_RC_CORE=m
5582 +CONFIG_RC_MAP=m
5583 +CONFIG_LIRC=y
5584 +CONFIG_RC_DECODERS=y
5585 +CONFIG_IR_NEC_DECODER=m
5586 +CONFIG_IR_RC5_DECODER=m
5587 +CONFIG_IR_RC6_DECODER=m
5588 +CONFIG_IR_JVC_DECODER=m
5589 +CONFIG_IR_SONY_DECODER=m
5590 +CONFIG_IR_SANYO_DECODER=m
5591 +CONFIG_IR_SHARP_DECODER=m
5592 +CONFIG_IR_MCE_KBD_DECODER=m
5593 +CONFIG_IR_XMP_DECODER=m
5594 +CONFIG_IR_IMON_DECODER=m
5595 +CONFIG_IR_RCMM_DECODER=m
5596 +CONFIG_RC_DEVICES=y
5597 +CONFIG_RC_ATI_REMOTE=m
5598 +CONFIG_IR_ENE=m
5599 +CONFIG_IR_IMON=m
5600 +CONFIG_IR_IMON_RAW=m
5601 +CONFIG_IR_MCEUSB=m
5602 +CONFIG_IR_ITE_CIR=m
5603 +CONFIG_IR_FINTEK=m
5604 +CONFIG_IR_NUVOTON=m
5605 +CONFIG_IR_REDRAT3=m
5606 +CONFIG_IR_STREAMZAP=m
5607 +CONFIG_IR_WINBOND_CIR=m
5608 +CONFIG_IR_IGORPLUGUSB=m
5609 +CONFIG_IR_IGUANA=m
5610 +CONFIG_IR_TTUSBIR=m
5611 +CONFIG_RC_LOOPBACK=m
5612 +CONFIG_IR_SERIAL=m
5613 +CONFIG_IR_SERIAL_TRANSMITTER=y
5614 +CONFIG_IR_SIR=m
5615 +CONFIG_RC_XBOX_DVD=m
5616 +CONFIG_IR_TOY=m
5617 +CONFIG_CEC_CORE=m
5618 +CONFIG_CEC_NOTIFIER=y
5619 +CONFIG_CEC_PIN=y
5620 +CONFIG_MEDIA_CEC_RC=y
5621 +# CONFIG_CEC_PIN_ERROR_INJ is not set
5622 +CONFIG_MEDIA_CEC_SUPPORT=y
5623 +CONFIG_CEC_CH7322=m
5624 +CONFIG_CEC_CROS_EC=m
5625 +CONFIG_CEC_GPIO=m
5626 +CONFIG_CEC_SECO=m
5627 +CONFIG_CEC_SECO_RC=y
5628 +CONFIG_USB_PULSE8_CEC=m
5629 +CONFIG_USB_RAINSHADOW_CEC=m
5630 +CONFIG_MEDIA_SUPPORT=m
5631 +CONFIG_MEDIA_SUPPORT_FILTER=y
5632 +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y
5635 +# Media device types
5637 +CONFIG_MEDIA_CAMERA_SUPPORT=y
5638 +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
5639 +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
5640 +CONFIG_MEDIA_RADIO_SUPPORT=y
5641 +CONFIG_MEDIA_SDR_SUPPORT=y
5642 +CONFIG_MEDIA_PLATFORM_SUPPORT=y
5643 +CONFIG_MEDIA_TEST_SUPPORT=y
5644 +# end of Media device types
5646 +CONFIG_VIDEO_DEV=m
5647 +CONFIG_MEDIA_CONTROLLER=y
5648 +CONFIG_DVB_CORE=m
5651 +# Video4Linux options
5653 +CONFIG_VIDEO_V4L2=m
5654 +CONFIG_VIDEO_V4L2_I2C=y
5655 +CONFIG_VIDEO_V4L2_SUBDEV_API=y
5656 +# CONFIG_VIDEO_ADV_DEBUG is not set
5657 +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
5658 +CONFIG_VIDEO_TUNER=m
5659 +CONFIG_V4L2_MEM2MEM_DEV=m
5660 +CONFIG_V4L2_FLASH_LED_CLASS=m
5661 +CONFIG_V4L2_FWNODE=m
5662 +CONFIG_VIDEOBUF_GEN=m
5663 +CONFIG_VIDEOBUF_DMA_SG=m
5664 +CONFIG_VIDEOBUF_VMALLOC=m
5665 +# end of Video4Linux options
5668 +# Media controller options
5670 +CONFIG_MEDIA_CONTROLLER_DVB=y
5671 +CONFIG_MEDIA_CONTROLLER_REQUEST_API=y
5674 +# Please notice that the enabled Media controller Request API is EXPERIMENTAL
5676 +# end of Media controller options
5679 +# Digital TV options
5681 +# CONFIG_DVB_MMAP is not set
5682 +CONFIG_DVB_NET=y
5683 +CONFIG_DVB_MAX_ADAPTERS=8
5684 +CONFIG_DVB_DYNAMIC_MINORS=y
5685 +# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set
5686 +# CONFIG_DVB_ULE_DEBUG is not set
5687 +# end of Digital TV options
5690 +# Media drivers
5694 +# Drivers filtered as selected at 'Filter media drivers'
5696 +CONFIG_TTPCI_EEPROM=m
5697 +CONFIG_MEDIA_USB_SUPPORT=y
5700 +# Webcam devices
5702 +CONFIG_USB_VIDEO_CLASS=m
5703 +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
5704 +CONFIG_USB_GSPCA=m
5705 +CONFIG_USB_M5602=m
5706 +CONFIG_USB_STV06XX=m
5707 +CONFIG_USB_GL860=m
5708 +CONFIG_USB_GSPCA_BENQ=m
5709 +CONFIG_USB_GSPCA_CONEX=m
5710 +CONFIG_USB_GSPCA_CPIA1=m
5711 +CONFIG_USB_GSPCA_DTCS033=m
5712 +CONFIG_USB_GSPCA_ETOMS=m
5713 +CONFIG_USB_GSPCA_FINEPIX=m
5714 +CONFIG_USB_GSPCA_JEILINJ=m
5715 +CONFIG_USB_GSPCA_JL2005BCD=m
5716 +CONFIG_USB_GSPCA_KINECT=m
5717 +CONFIG_USB_GSPCA_KONICA=m
5718 +CONFIG_USB_GSPCA_MARS=m
5719 +CONFIG_USB_GSPCA_MR97310A=m
5720 +CONFIG_USB_GSPCA_NW80X=m
5721 +CONFIG_USB_GSPCA_OV519=m
5722 +CONFIG_USB_GSPCA_OV534=m
5723 +CONFIG_USB_GSPCA_OV534_9=m
5724 +CONFIG_USB_GSPCA_PAC207=m
5725 +CONFIG_USB_GSPCA_PAC7302=m
5726 +CONFIG_USB_GSPCA_PAC7311=m
5727 +CONFIG_USB_GSPCA_SE401=m
5728 +CONFIG_USB_GSPCA_SN9C2028=m
5729 +CONFIG_USB_GSPCA_SN9C20X=m
5730 +CONFIG_USB_GSPCA_SONIXB=m
5731 +CONFIG_USB_GSPCA_SONIXJ=m
5732 +CONFIG_USB_GSPCA_SPCA500=m
5733 +CONFIG_USB_GSPCA_SPCA501=m
5734 +CONFIG_USB_GSPCA_SPCA505=m
5735 +CONFIG_USB_GSPCA_SPCA506=m
5736 +CONFIG_USB_GSPCA_SPCA508=m
5737 +CONFIG_USB_GSPCA_SPCA561=m
5738 +CONFIG_USB_GSPCA_SPCA1528=m
5739 +CONFIG_USB_GSPCA_SQ905=m
5740 +CONFIG_USB_GSPCA_SQ905C=m
5741 +CONFIG_USB_GSPCA_SQ930X=m
5742 +CONFIG_USB_GSPCA_STK014=m
5743 +CONFIG_USB_GSPCA_STK1135=m
5744 +CONFIG_USB_GSPCA_STV0680=m
5745 +CONFIG_USB_GSPCA_SUNPLUS=m
5746 +CONFIG_USB_GSPCA_T613=m
5747 +CONFIG_USB_GSPCA_TOPRO=m
5748 +CONFIG_USB_GSPCA_TOUPTEK=m
5749 +CONFIG_USB_GSPCA_TV8532=m
5750 +CONFIG_USB_GSPCA_VC032X=m
5751 +CONFIG_USB_GSPCA_VICAM=m
5752 +CONFIG_USB_GSPCA_XIRLINK_CIT=m
5753 +CONFIG_USB_GSPCA_ZC3XX=m
5754 +CONFIG_USB_PWC=m
5755 +# CONFIG_USB_PWC_DEBUG is not set
5756 +CONFIG_USB_PWC_INPUT_EVDEV=y
5757 +CONFIG_VIDEO_CPIA2=m
5758 +CONFIG_USB_ZR364XX=m
5759 +CONFIG_USB_STKWEBCAM=m
5760 +CONFIG_USB_S2255=m
5761 +CONFIG_VIDEO_USBTV=m
5764 +# Analog TV USB devices
5766 +CONFIG_VIDEO_PVRUSB2=m
5767 +CONFIG_VIDEO_PVRUSB2_SYSFS=y
5768 +CONFIG_VIDEO_PVRUSB2_DVB=y
5769 +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
5770 +CONFIG_VIDEO_HDPVR=m
5771 +CONFIG_VIDEO_STK1160_COMMON=m
5772 +CONFIG_VIDEO_STK1160=m
5773 +CONFIG_VIDEO_GO7007=m
5774 +CONFIG_VIDEO_GO7007_USB=m
5775 +CONFIG_VIDEO_GO7007_LOADER=m
5776 +CONFIG_VIDEO_GO7007_USB_S2250_BOARD=m
5779 +# Analog/digital TV USB devices
5781 +CONFIG_VIDEO_AU0828=m
5782 +CONFIG_VIDEO_AU0828_V4L2=y
5783 +CONFIG_VIDEO_AU0828_RC=y
5784 +CONFIG_VIDEO_CX231XX=m
5785 +CONFIG_VIDEO_CX231XX_RC=y
5786 +CONFIG_VIDEO_CX231XX_ALSA=m
5787 +CONFIG_VIDEO_CX231XX_DVB=m
5788 +CONFIG_VIDEO_TM6000=m
5789 +CONFIG_VIDEO_TM6000_ALSA=m
5790 +CONFIG_VIDEO_TM6000_DVB=m
5793 +# Digital TV USB devices
5795 +CONFIG_DVB_USB=m
5796 +# CONFIG_DVB_USB_DEBUG is not set
5797 +CONFIG_DVB_USB_DIB3000MC=m
5798 +CONFIG_DVB_USB_A800=m
5799 +CONFIG_DVB_USB_DIBUSB_MB=m
5800 +# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
5801 +CONFIG_DVB_USB_DIBUSB_MC=m
5802 +CONFIG_DVB_USB_DIB0700=m
5803 +CONFIG_DVB_USB_UMT_010=m
5804 +CONFIG_DVB_USB_CXUSB=m
5805 +CONFIG_DVB_USB_CXUSB_ANALOG=y
5806 +CONFIG_DVB_USB_M920X=m
5807 +CONFIG_DVB_USB_DIGITV=m
5808 +CONFIG_DVB_USB_VP7045=m
5809 +CONFIG_DVB_USB_VP702X=m
5810 +CONFIG_DVB_USB_GP8PSK=m
5811 +CONFIG_DVB_USB_NOVA_T_USB2=m
5812 +CONFIG_DVB_USB_TTUSB2=m
5813 +CONFIG_DVB_USB_DTT200U=m
5814 +CONFIG_DVB_USB_OPERA1=m
5815 +CONFIG_DVB_USB_AF9005=m
5816 +CONFIG_DVB_USB_AF9005_REMOTE=m
5817 +CONFIG_DVB_USB_PCTV452E=m
5818 +CONFIG_DVB_USB_DW2102=m
5819 +CONFIG_DVB_USB_CINERGY_T2=m
5820 +CONFIG_DVB_USB_DTV5100=m
5821 +CONFIG_DVB_USB_AZ6027=m
5822 +CONFIG_DVB_USB_TECHNISAT_USB2=m
5823 +CONFIG_DVB_USB_V2=m
5824 +CONFIG_DVB_USB_AF9015=m
5825 +CONFIG_DVB_USB_AF9035=m
5826 +CONFIG_DVB_USB_ANYSEE=m
5827 +CONFIG_DVB_USB_AU6610=m
5828 +CONFIG_DVB_USB_AZ6007=m
5829 +CONFIG_DVB_USB_CE6230=m
5830 +CONFIG_DVB_USB_EC168=m
5831 +CONFIG_DVB_USB_GL861=m
5832 +CONFIG_DVB_USB_LME2510=m
5833 +CONFIG_DVB_USB_MXL111SF=m
5834 +CONFIG_DVB_USB_RTL28XXU=m
5835 +CONFIG_DVB_USB_DVBSKY=m
5836 +CONFIG_DVB_USB_ZD1301=m
5837 +CONFIG_DVB_TTUSB_BUDGET=m
5838 +CONFIG_DVB_TTUSB_DEC=m
5839 +CONFIG_SMS_USB_DRV=m
5840 +CONFIG_DVB_B2C2_FLEXCOP_USB=m
5841 +# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set
5842 +CONFIG_DVB_AS102=m
5845 +# Webcam, TV (analog/digital) USB devices
5847 +CONFIG_VIDEO_EM28XX=m
5848 +CONFIG_VIDEO_EM28XX_V4L2=m
5849 +CONFIG_VIDEO_EM28XX_ALSA=m
5850 +CONFIG_VIDEO_EM28XX_DVB=m
5851 +CONFIG_VIDEO_EM28XX_RC=m
5854 +# Software defined radio USB devices
5856 +CONFIG_USB_AIRSPY=m
5857 +CONFIG_USB_HACKRF=m
5858 +CONFIG_USB_MSI2500=m
5859 +CONFIG_MEDIA_PCI_SUPPORT=y
5862 +# Media capture support
5864 +CONFIG_VIDEO_MEYE=m
5865 +CONFIG_VIDEO_SOLO6X10=m
5866 +CONFIG_VIDEO_TW5864=m
5867 +CONFIG_VIDEO_TW68=m
5868 +CONFIG_VIDEO_TW686X=m
5871 +# Media capture/analog TV support
5873 +CONFIG_VIDEO_IVTV=m
5874 +# CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS is not set
5875 +CONFIG_VIDEO_IVTV_ALSA=m
5876 +CONFIG_VIDEO_FB_IVTV=m
5877 +CONFIG_VIDEO_FB_IVTV_FORCE_PAT=y
5878 +CONFIG_VIDEO_HEXIUM_GEMINI=m
5879 +CONFIG_VIDEO_HEXIUM_ORION=m
5880 +CONFIG_VIDEO_MXB=m
5881 +CONFIG_VIDEO_DT3155=m
5884 +# Media capture/analog/hybrid TV support
5886 +CONFIG_VIDEO_CX18=m
5887 +CONFIG_VIDEO_CX18_ALSA=m
5888 +CONFIG_VIDEO_CX23885=m
5889 +CONFIG_MEDIA_ALTERA_CI=m
5890 +CONFIG_VIDEO_CX25821=m
5891 +CONFIG_VIDEO_CX25821_ALSA=m
5892 +CONFIG_VIDEO_CX88=m
5893 +CONFIG_VIDEO_CX88_ALSA=m
5894 +CONFIG_VIDEO_CX88_BLACKBIRD=m
5895 +CONFIG_VIDEO_CX88_DVB=m
5896 +CONFIG_VIDEO_CX88_ENABLE_VP3054=y
5897 +CONFIG_VIDEO_CX88_VP3054=m
5898 +CONFIG_VIDEO_CX88_MPEG=m
5899 +CONFIG_VIDEO_BT848=m
5900 +CONFIG_DVB_BT8XX=m
5901 +CONFIG_VIDEO_SAA7134=m
5902 +CONFIG_VIDEO_SAA7134_ALSA=m
5903 +CONFIG_VIDEO_SAA7134_RC=y
5904 +CONFIG_VIDEO_SAA7134_DVB=m
5905 +CONFIG_VIDEO_SAA7134_GO7007=m
5906 +CONFIG_VIDEO_SAA7164=m
5907 +CONFIG_VIDEO_COBALT=m
5910 +# Media digital TV PCI Adapters
5912 +CONFIG_DVB_AV7110_IR=y
5913 +CONFIG_DVB_AV7110=m
5914 +CONFIG_DVB_AV7110_OSD=y
5915 +CONFIG_DVB_BUDGET_CORE=m
5916 +CONFIG_DVB_BUDGET=m
5917 +CONFIG_DVB_BUDGET_CI=m
5918 +CONFIG_DVB_BUDGET_AV=m
5919 +CONFIG_DVB_BUDGET_PATCH=m
5920 +CONFIG_DVB_B2C2_FLEXCOP_PCI=m
5921 +# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set
5922 +CONFIG_DVB_PLUTO2=m
5923 +CONFIG_DVB_DM1105=m
5924 +CONFIG_DVB_PT1=m
5925 +CONFIG_DVB_PT3=m
5926 +CONFIG_MANTIS_CORE=m
5927 +CONFIG_DVB_MANTIS=m
5928 +CONFIG_DVB_HOPPER=m
5929 +CONFIG_DVB_NGENE=m
5930 +CONFIG_DVB_DDBRIDGE=m
5931 +# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set
5932 +CONFIG_DVB_SMIPCIE=m
5933 +CONFIG_DVB_NETUP_UNIDVB=m
5934 +CONFIG_VIDEO_IPU3_CIO2=m
5935 +CONFIG_CIO2_BRIDGE=y
5936 +# CONFIG_VIDEO_PCI_SKELETON is not set
5937 +CONFIG_RADIO_ADAPTERS=y
5938 +CONFIG_RADIO_TEA575X=m
5939 +CONFIG_RADIO_SI470X=m
5940 +CONFIG_USB_SI470X=m
5941 +CONFIG_I2C_SI470X=m
5942 +CONFIG_RADIO_SI4713=m
5943 +CONFIG_USB_SI4713=m
5944 +CONFIG_PLATFORM_SI4713=m
5945 +CONFIG_I2C_SI4713=m
5946 +CONFIG_RADIO_SI476X=m
5947 +CONFIG_USB_MR800=m
5948 +CONFIG_USB_DSBR=m
5949 +CONFIG_RADIO_MAXIRADIO=m
5950 +CONFIG_RADIO_SHARK=m
5951 +CONFIG_RADIO_SHARK2=m
5952 +CONFIG_USB_KEENE=m
5953 +CONFIG_USB_RAREMONO=m
5954 +CONFIG_USB_MA901=m
5955 +CONFIG_RADIO_TEA5764=m
5956 +CONFIG_RADIO_SAA7706H=m
5957 +CONFIG_RADIO_TEF6862=m
5958 +CONFIG_RADIO_WL1273=m
5959 +CONFIG_RADIO_WL128X=m
5960 +CONFIG_MEDIA_COMMON_OPTIONS=y
5963 +# common driver options
5965 +CONFIG_VIDEO_CX2341X=m
5966 +CONFIG_VIDEO_TVEEPROM=m
5967 +CONFIG_CYPRESS_FIRMWARE=m
5968 +CONFIG_VIDEOBUF2_CORE=m
5969 +CONFIG_VIDEOBUF2_V4L2=m
5970 +CONFIG_VIDEOBUF2_MEMOPS=m
5971 +CONFIG_VIDEOBUF2_DMA_CONTIG=m
5972 +CONFIG_VIDEOBUF2_VMALLOC=m
5973 +CONFIG_VIDEOBUF2_DMA_SG=m
5974 +CONFIG_VIDEOBUF2_DVB=m
5975 +CONFIG_DVB_B2C2_FLEXCOP=m
5976 +CONFIG_VIDEO_SAA7146=m
5977 +CONFIG_VIDEO_SAA7146_VV=m
5978 +CONFIG_SMS_SIANO_MDTV=m
5979 +CONFIG_SMS_SIANO_RC=y
5980 +CONFIG_SMS_SIANO_DEBUGFS=y
5981 +CONFIG_VIDEO_V4L2_TPG=m
5982 +CONFIG_V4L_PLATFORM_DRIVERS=y
5983 +CONFIG_VIDEO_CAFE_CCIC=m
5984 +CONFIG_VIDEO_VIA_CAMERA=m
5985 +CONFIG_VIDEO_CADENCE=y
5986 +CONFIG_VIDEO_CADENCE_CSI2RX=m
5987 +CONFIG_VIDEO_CADENCE_CSI2TX=m
5988 +CONFIG_VIDEO_ASPEED=m
5989 +CONFIG_V4L_MEM2MEM_DRIVERS=y
5990 +CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m
5991 +CONFIG_DVB_PLATFORM_DRIVERS=y
5992 +CONFIG_SDR_PLATFORM_DRIVERS=y
5995 +# MMC/SDIO DVB adapters
5997 +CONFIG_SMS_SDIO_DRV=m
5998 +CONFIG_V4L_TEST_DRIVERS=y
5999 +CONFIG_VIDEO_VIMC=m
6000 +CONFIG_VIDEO_VIVID=m
6001 +CONFIG_VIDEO_VIVID_CEC=y
6002 +CONFIG_VIDEO_VIVID_MAX_DEVS=64
6003 +CONFIG_VIDEO_VIM2M=m
6004 +CONFIG_VIDEO_VICODEC=m
6005 +# CONFIG_DVB_TEST_DRIVERS is not set
6008 +# FireWire (IEEE 1394) Adapters
6010 +CONFIG_DVB_FIREDTV=m
6011 +CONFIG_DVB_FIREDTV_INPUT=y
6012 +# end of Media drivers
6015 +# Media ancillary drivers
6017 +CONFIG_MEDIA_ATTACH=y
6020 +# IR I2C driver auto-selected by 'Autoselect ancillary drivers'
6022 +CONFIG_VIDEO_IR_I2C=m
6025 +# Audio decoders, processors and mixers
6027 +CONFIG_VIDEO_TVAUDIO=m
6028 +CONFIG_VIDEO_TDA7432=m
6029 +CONFIG_VIDEO_TDA9840=m
6030 +CONFIG_VIDEO_TDA1997X=m
6031 +CONFIG_VIDEO_TEA6415C=m
6032 +CONFIG_VIDEO_TEA6420=m
6033 +CONFIG_VIDEO_MSP3400=m
6034 +CONFIG_VIDEO_CS3308=m
6035 +CONFIG_VIDEO_CS5345=m
6036 +CONFIG_VIDEO_CS53L32A=m
6037 +CONFIG_VIDEO_TLV320AIC23B=m
6038 +CONFIG_VIDEO_UDA1342=m
6039 +CONFIG_VIDEO_WM8775=m
6040 +CONFIG_VIDEO_WM8739=m
6041 +CONFIG_VIDEO_VP27SMPX=m
6042 +CONFIG_VIDEO_SONY_BTF_MPX=m
6043 +# end of Audio decoders, processors and mixers
6046 +# RDS decoders
6048 +CONFIG_VIDEO_SAA6588=m
6049 +# end of RDS decoders
6052 +# Video decoders
6054 +CONFIG_VIDEO_ADV7180=m
6055 +CONFIG_VIDEO_ADV7183=m
6056 +CONFIG_VIDEO_ADV7604=m
6057 +CONFIG_VIDEO_ADV7604_CEC=y
6058 +CONFIG_VIDEO_ADV7842=m
6059 +CONFIG_VIDEO_ADV7842_CEC=y
6060 +CONFIG_VIDEO_BT819=m
6061 +CONFIG_VIDEO_BT856=m
6062 +CONFIG_VIDEO_BT866=m
6063 +CONFIG_VIDEO_KS0127=m
6064 +CONFIG_VIDEO_ML86V7667=m
6065 +CONFIG_VIDEO_SAA7110=m
6066 +CONFIG_VIDEO_SAA711X=m
6067 +CONFIG_VIDEO_TC358743=m
6068 +CONFIG_VIDEO_TC358743_CEC=y
6069 +CONFIG_VIDEO_TVP514X=m
6070 +CONFIG_VIDEO_TVP5150=m
6071 +CONFIG_VIDEO_TVP7002=m
6072 +CONFIG_VIDEO_TW2804=m
6073 +CONFIG_VIDEO_TW9903=m
6074 +CONFIG_VIDEO_TW9906=m
6075 +CONFIG_VIDEO_TW9910=m
6076 +CONFIG_VIDEO_VPX3220=m
6079 +# Video and audio decoders
6081 +CONFIG_VIDEO_SAA717X=m
6082 +CONFIG_VIDEO_CX25840=m
6083 +# end of Video decoders
6086 +# Video encoders
6088 +CONFIG_VIDEO_SAA7127=m
6089 +CONFIG_VIDEO_SAA7185=m
6090 +CONFIG_VIDEO_ADV7170=m
6091 +CONFIG_VIDEO_ADV7175=m
6092 +CONFIG_VIDEO_ADV7343=m
6093 +CONFIG_VIDEO_ADV7393=m
6094 +CONFIG_VIDEO_ADV7511=m
6095 +CONFIG_VIDEO_ADV7511_CEC=y
6096 +CONFIG_VIDEO_AD9389B=m
6097 +CONFIG_VIDEO_AK881X=m
6098 +CONFIG_VIDEO_THS8200=m
6099 +# end of Video encoders
6102 +# Video improvement chips
6104 +CONFIG_VIDEO_UPD64031A=m
6105 +CONFIG_VIDEO_UPD64083=m
6106 +# end of Video improvement chips
6109 +# Audio/Video compression chips
6111 +CONFIG_VIDEO_SAA6752HS=m
6112 +# end of Audio/Video compression chips
6115 +# SDR tuner chips
6117 +CONFIG_SDR_MAX2175=m
6118 +# end of SDR tuner chips
6121 +# Miscellaneous helper chips
6123 +CONFIG_VIDEO_THS7303=m
6124 +CONFIG_VIDEO_M52790=m
6125 +CONFIG_VIDEO_I2C=m
6126 +CONFIG_VIDEO_ST_MIPID02=m
6127 +# end of Miscellaneous helper chips
6130 +# Camera sensor devices
6132 +CONFIG_VIDEO_APTINA_PLL=m
6133 +CONFIG_VIDEO_CCS_PLL=m
6134 +CONFIG_VIDEO_HI556=m
6135 +CONFIG_VIDEO_IMX214=m
6136 +CONFIG_VIDEO_IMX219=m
6137 +CONFIG_VIDEO_IMX258=m
6138 +CONFIG_VIDEO_IMX274=m
6139 +CONFIG_VIDEO_IMX290=m
6140 +CONFIG_VIDEO_IMX319=m
6141 +CONFIG_VIDEO_IMX355=m
6142 +CONFIG_VIDEO_OV02A10=m
6143 +CONFIG_VIDEO_OV2640=m
6144 +CONFIG_VIDEO_OV2659=m
6145 +CONFIG_VIDEO_OV2680=m
6146 +CONFIG_VIDEO_OV2685=m
6147 +CONFIG_VIDEO_OV2740=m
6148 +CONFIG_VIDEO_OV5647=m
6149 +CONFIG_VIDEO_OV5648=m
6150 +CONFIG_VIDEO_OV6650=m
6151 +CONFIG_VIDEO_OV5670=m
6152 +CONFIG_VIDEO_OV5675=m
6153 +CONFIG_VIDEO_OV5695=m
6154 +CONFIG_VIDEO_OV7251=m
6155 +CONFIG_VIDEO_OV772X=m
6156 +CONFIG_VIDEO_OV7640=m
6157 +CONFIG_VIDEO_OV7670=m
6158 +CONFIG_VIDEO_OV7740=m
6159 +CONFIG_VIDEO_OV8856=m
6160 +CONFIG_VIDEO_OV8865=m
6161 +CONFIG_VIDEO_OV9640=m
6162 +CONFIG_VIDEO_OV9650=m
6163 +CONFIG_VIDEO_OV9734=m
6164 +CONFIG_VIDEO_OV13858=m
6165 +CONFIG_VIDEO_VS6624=m
6166 +CONFIG_VIDEO_MT9M001=m
6167 +CONFIG_VIDEO_MT9M032=m
6168 +CONFIG_VIDEO_MT9M111=m
6169 +CONFIG_VIDEO_MT9P031=m
6170 +CONFIG_VIDEO_MT9T001=m
6171 +CONFIG_VIDEO_MT9T112=m
6172 +CONFIG_VIDEO_MT9V011=m
6173 +CONFIG_VIDEO_MT9V032=m
6174 +CONFIG_VIDEO_MT9V111=m
6175 +CONFIG_VIDEO_SR030PC30=m
6176 +CONFIG_VIDEO_NOON010PC30=m
6177 +CONFIG_VIDEO_M5MOLS=m
6178 +CONFIG_VIDEO_MAX9271_LIB=m
6179 +CONFIG_VIDEO_RDACM20=m
6180 +CONFIG_VIDEO_RDACM21=m
6181 +CONFIG_VIDEO_RJ54N1=m
6182 +CONFIG_VIDEO_S5K6AA=m
6183 +CONFIG_VIDEO_S5K6A3=m
6184 +CONFIG_VIDEO_S5K4ECGX=m
6185 +CONFIG_VIDEO_S5K5BAF=m
6186 +CONFIG_VIDEO_CCS=m
6187 +CONFIG_VIDEO_ET8EK8=m
6188 +CONFIG_VIDEO_S5C73M3=m
6189 +# end of Camera sensor devices
6192 +# Lens drivers
6194 +CONFIG_VIDEO_AD5820=m
6195 +CONFIG_VIDEO_AK7375=m
6196 +CONFIG_VIDEO_DW9714=m
6197 +CONFIG_VIDEO_DW9768=m
6198 +CONFIG_VIDEO_DW9807_VCM=m
6199 +# end of Lens drivers
6202 +# Flash devices
6204 +CONFIG_VIDEO_ADP1653=m
6205 +CONFIG_VIDEO_LM3560=m
6206 +CONFIG_VIDEO_LM3646=m
6207 +# end of Flash devices
6210 +# SPI helper chips
6212 +CONFIG_VIDEO_GS1662=m
6213 +# end of SPI helper chips
6216 +# Media SPI Adapters
6218 +CONFIG_CXD2880_SPI_DRV=m
6219 +# end of Media SPI Adapters
6221 +CONFIG_MEDIA_TUNER=m
6224 +# Customize TV tuners
6226 +CONFIG_MEDIA_TUNER_SIMPLE=m
6227 +CONFIG_MEDIA_TUNER_TDA18250=m
6228 +CONFIG_MEDIA_TUNER_TDA8290=m
6229 +CONFIG_MEDIA_TUNER_TDA827X=m
6230 +CONFIG_MEDIA_TUNER_TDA18271=m
6231 +CONFIG_MEDIA_TUNER_TDA9887=m
6232 +CONFIG_MEDIA_TUNER_TEA5761=m
6233 +CONFIG_MEDIA_TUNER_TEA5767=m
6234 +CONFIG_MEDIA_TUNER_MSI001=m
6235 +CONFIG_MEDIA_TUNER_MT20XX=m
6236 +CONFIG_MEDIA_TUNER_MT2060=m
6237 +CONFIG_MEDIA_TUNER_MT2063=m
6238 +CONFIG_MEDIA_TUNER_MT2266=m
6239 +CONFIG_MEDIA_TUNER_MT2131=m
6240 +CONFIG_MEDIA_TUNER_QT1010=m
6241 +CONFIG_MEDIA_TUNER_XC2028=m
6242 +CONFIG_MEDIA_TUNER_XC5000=m
6243 +CONFIG_MEDIA_TUNER_XC4000=m
6244 +CONFIG_MEDIA_TUNER_MXL5005S=m
6245 +CONFIG_MEDIA_TUNER_MXL5007T=m
6246 +CONFIG_MEDIA_TUNER_MC44S803=m
6247 +CONFIG_MEDIA_TUNER_MAX2165=m
6248 +CONFIG_MEDIA_TUNER_TDA18218=m
6249 +CONFIG_MEDIA_TUNER_FC0011=m
6250 +CONFIG_MEDIA_TUNER_FC0012=m
6251 +CONFIG_MEDIA_TUNER_FC0013=m
6252 +CONFIG_MEDIA_TUNER_TDA18212=m
6253 +CONFIG_MEDIA_TUNER_E4000=m
6254 +CONFIG_MEDIA_TUNER_FC2580=m
6255 +CONFIG_MEDIA_TUNER_M88RS6000T=m
6256 +CONFIG_MEDIA_TUNER_TUA9001=m
6257 +CONFIG_MEDIA_TUNER_SI2157=m
6258 +CONFIG_MEDIA_TUNER_IT913X=m
6259 +CONFIG_MEDIA_TUNER_R820T=m
6260 +CONFIG_MEDIA_TUNER_MXL301RF=m
6261 +CONFIG_MEDIA_TUNER_QM1D1C0042=m
6262 +CONFIG_MEDIA_TUNER_QM1D1B0004=m
6263 +# end of Customize TV tuners
6266 +# Customise DVB Frontends
6270 +# Multistandard (satellite) frontends
6272 +CONFIG_DVB_STB0899=m
6273 +CONFIG_DVB_STB6100=m
6274 +CONFIG_DVB_STV090x=m
6275 +CONFIG_DVB_STV0910=m
6276 +CONFIG_DVB_STV6110x=m
6277 +CONFIG_DVB_STV6111=m
6278 +CONFIG_DVB_MXL5XX=m
6279 +CONFIG_DVB_M88DS3103=m
6282 +# Multistandard (cable + terrestrial) frontends
6284 +CONFIG_DVB_DRXK=m
6285 +CONFIG_DVB_TDA18271C2DD=m
6286 +CONFIG_DVB_SI2165=m
6287 +CONFIG_DVB_MN88472=m
6288 +CONFIG_DVB_MN88473=m
6291 +# DVB-S (satellite) frontends
6293 +CONFIG_DVB_CX24110=m
6294 +CONFIG_DVB_CX24123=m
6295 +CONFIG_DVB_MT312=m
6296 +CONFIG_DVB_ZL10036=m
6297 +CONFIG_DVB_ZL10039=m
6298 +CONFIG_DVB_S5H1420=m
6299 +CONFIG_DVB_STV0288=m
6300 +CONFIG_DVB_STB6000=m
6301 +CONFIG_DVB_STV0299=m
6302 +CONFIG_DVB_STV6110=m
6303 +CONFIG_DVB_STV0900=m
6304 +CONFIG_DVB_TDA8083=m
6305 +CONFIG_DVB_TDA10086=m
6306 +CONFIG_DVB_TDA8261=m
6307 +CONFIG_DVB_VES1X93=m
6308 +CONFIG_DVB_TUNER_ITD1000=m
6309 +CONFIG_DVB_TUNER_CX24113=m
6310 +CONFIG_DVB_TDA826X=m
6311 +CONFIG_DVB_TUA6100=m
6312 +CONFIG_DVB_CX24116=m
6313 +CONFIG_DVB_CX24117=m
6314 +CONFIG_DVB_CX24120=m
6315 +CONFIG_DVB_SI21XX=m
6316 +CONFIG_DVB_TS2020=m
6317 +CONFIG_DVB_DS3000=m
6318 +CONFIG_DVB_MB86A16=m
6319 +CONFIG_DVB_TDA10071=m
6322 +# DVB-T (terrestrial) frontends
6324 +CONFIG_DVB_SP8870=m
6325 +CONFIG_DVB_SP887X=m
6326 +CONFIG_DVB_CX22700=m
6327 +CONFIG_DVB_CX22702=m
6328 +CONFIG_DVB_S5H1432=m
6329 +CONFIG_DVB_DRXD=m
6330 +CONFIG_DVB_L64781=m
6331 +CONFIG_DVB_TDA1004X=m
6332 +CONFIG_DVB_NXT6000=m
6333 +CONFIG_DVB_MT352=m
6334 +CONFIG_DVB_ZL10353=m
6335 +CONFIG_DVB_DIB3000MB=m
6336 +CONFIG_DVB_DIB3000MC=m
6337 +CONFIG_DVB_DIB7000M=m
6338 +CONFIG_DVB_DIB7000P=m
6339 +CONFIG_DVB_DIB9000=m
6340 +CONFIG_DVB_TDA10048=m
6341 +CONFIG_DVB_AF9013=m
6342 +CONFIG_DVB_EC100=m
6343 +CONFIG_DVB_STV0367=m
6344 +CONFIG_DVB_CXD2820R=m
6345 +CONFIG_DVB_CXD2841ER=m
6346 +CONFIG_DVB_RTL2830=m
6347 +CONFIG_DVB_RTL2832=m
6348 +CONFIG_DVB_RTL2832_SDR=m
6349 +CONFIG_DVB_SI2168=m
6350 +CONFIG_DVB_AS102_FE=m
6351 +CONFIG_DVB_ZD1301_DEMOD=m
6352 +CONFIG_DVB_GP8PSK_FE=m
6353 +CONFIG_DVB_CXD2880=m
6356 +# DVB-C (cable) frontends
6358 +CONFIG_DVB_VES1820=m
6359 +CONFIG_DVB_TDA10021=m
6360 +CONFIG_DVB_TDA10023=m
6361 +CONFIG_DVB_STV0297=m
6364 +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
6366 +CONFIG_DVB_NXT200X=m
6367 +CONFIG_DVB_OR51211=m
6368 +CONFIG_DVB_OR51132=m
6369 +CONFIG_DVB_BCM3510=m
6370 +CONFIG_DVB_LGDT330X=m
6371 +CONFIG_DVB_LGDT3305=m
6372 +CONFIG_DVB_LGDT3306A=m
6373 +CONFIG_DVB_LG2160=m
6374 +CONFIG_DVB_S5H1409=m
6375 +CONFIG_DVB_AU8522=m
6376 +CONFIG_DVB_AU8522_DTV=m
6377 +CONFIG_DVB_AU8522_V4L=m
6378 +CONFIG_DVB_S5H1411=m
6379 +CONFIG_DVB_MXL692=m
6382 +# ISDB-T (terrestrial) frontends
6384 +CONFIG_DVB_S921=m
6385 +CONFIG_DVB_DIB8000=m
6386 +CONFIG_DVB_MB86A20S=m
6389 +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends
6391 +CONFIG_DVB_TC90522=m
6392 +CONFIG_DVB_MN88443X=m
6395 +# Digital terrestrial only tuners/PLL
6397 +CONFIG_DVB_PLL=m
6398 +CONFIG_DVB_TUNER_DIB0070=m
6399 +CONFIG_DVB_TUNER_DIB0090=m
6402 +# SEC control devices for DVB-S
6404 +CONFIG_DVB_DRX39XYJ=m
6405 +CONFIG_DVB_LNBH25=m
6406 +CONFIG_DVB_LNBH29=m
6407 +CONFIG_DVB_LNBP21=m
6408 +CONFIG_DVB_LNBP22=m
6409 +CONFIG_DVB_ISL6405=m
6410 +CONFIG_DVB_ISL6421=m
6411 +CONFIG_DVB_ISL6423=m
6412 +CONFIG_DVB_A8293=m
6413 +CONFIG_DVB_LGS8GL5=m
6414 +CONFIG_DVB_LGS8GXX=m
6415 +CONFIG_DVB_ATBM8830=m
6416 +CONFIG_DVB_TDA665x=m
6417 +CONFIG_DVB_IX2505V=m
6418 +CONFIG_DVB_M88RS2000=m
6419 +CONFIG_DVB_AF9033=m
6420 +CONFIG_DVB_HORUS3A=m
6421 +CONFIG_DVB_ASCOT2E=m
6422 +CONFIG_DVB_HELENE=m
6425 +# Common Interface (EN50221) controller drivers
6427 +CONFIG_DVB_CXD2099=m
6428 +CONFIG_DVB_SP2=m
6429 +# end of Customise DVB Frontends
6432 +# Tools to develop new frontends
6434 +CONFIG_DVB_DUMMY_FE=m
6435 +# end of Media ancillary drivers
6438 +# Graphics support
6440 +CONFIG_AGP=y
6441 +CONFIG_AGP_AMD64=y
6442 +CONFIG_AGP_INTEL=y
6443 +CONFIG_AGP_SIS=m
6444 +CONFIG_AGP_VIA=y
6445 +CONFIG_INTEL_GTT=y
6446 +CONFIG_VGA_ARB=y
6447 +CONFIG_VGA_ARB_MAX_GPUS=16
6448 +CONFIG_VGA_SWITCHEROO=y
6449 +CONFIG_DRM=m
6450 +CONFIG_DRM_MIPI_DBI=m
6451 +CONFIG_DRM_MIPI_DSI=y
6452 +CONFIG_DRM_DP_AUX_CHARDEV=y
6453 +# CONFIG_DRM_DEBUG_SELFTEST is not set
6454 +CONFIG_DRM_KMS_HELPER=m
6455 +CONFIG_DRM_KMS_FB_HELPER=y
6456 +# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set
6457 +CONFIG_DRM_FBDEV_EMULATION=y
6458 +CONFIG_DRM_FBDEV_OVERALLOC=100
6459 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set
6460 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y
6461 +CONFIG_DRM_DP_CEC=y
6462 +CONFIG_DRM_TTM=m
6463 +CONFIG_DRM_VRAM_HELPER=m
6464 +CONFIG_DRM_TTM_HELPER=m
6465 +CONFIG_DRM_GEM_CMA_HELPER=y
6466 +CONFIG_DRM_KMS_CMA_HELPER=y
6467 +CONFIG_DRM_GEM_SHMEM_HELPER=y
6468 +CONFIG_DRM_SCHED=m
6471 +# I2C encoder or helper chips
6473 +CONFIG_DRM_I2C_CH7006=m
6474 +CONFIG_DRM_I2C_SIL164=m
6475 +CONFIG_DRM_I2C_NXP_TDA998X=m
6476 +CONFIG_DRM_I2C_NXP_TDA9950=m
6477 +# end of I2C encoder or helper chips
6480 +# ARM devices
6482 +# end of ARM devices
6484 +CONFIG_DRM_RADEON=m
6485 +# CONFIG_DRM_RADEON_USERPTR is not set
6486 +CONFIG_DRM_AMDGPU=m
6487 +CONFIG_DRM_AMDGPU_SI=y
6488 +CONFIG_DRM_AMDGPU_CIK=y
6489 +CONFIG_DRM_AMDGPU_USERPTR=y
6490 +# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set
6493 +# ACP (Audio CoProcessor) Configuration
6495 +CONFIG_DRM_AMD_ACP=y
6496 +# end of ACP (Audio CoProcessor) Configuration
6499 +# Display Engine Configuration
6501 +CONFIG_DRM_AMD_DC=y
6502 +CONFIG_DRM_AMD_DC_DCN=y
6503 +CONFIG_DRM_AMD_DC_HDCP=y
6504 +CONFIG_DRM_AMD_DC_SI=y
6505 +# CONFIG_DEBUG_KERNEL_DC is not set
6506 +# end of Display Engine Configuration
6508 +CONFIG_HSA_AMD=y
6509 +CONFIG_DRM_NOUVEAU=m
6510 +# CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT is not set
6511 +CONFIG_NOUVEAU_DEBUG=5
6512 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3
6513 +# CONFIG_NOUVEAU_DEBUG_MMU is not set
6514 +# CONFIG_NOUVEAU_DEBUG_PUSH is not set
6515 +CONFIG_DRM_NOUVEAU_BACKLIGHT=y
6516 +# CONFIG_DRM_NOUVEAU_SVM is not set
6517 +CONFIG_DRM_I915=m
6518 +CONFIG_DRM_I915_FORCE_PROBE=""
6519 +CONFIG_DRM_I915_CAPTURE_ERROR=y
6520 +CONFIG_DRM_I915_COMPRESS_ERROR=y
6521 +CONFIG_DRM_I915_USERPTR=y
6522 +CONFIG_DRM_I915_GVT=y
6523 +CONFIG_DRM_I915_GVT_KVMGT=m
6526 +# drm/i915 Debugging
6528 +# CONFIG_DRM_I915_WERROR is not set
6529 +# CONFIG_DRM_I915_DEBUG is not set
6530 +# CONFIG_DRM_I915_DEBUG_MMIO is not set
6531 +# CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS is not set
6532 +# CONFIG_DRM_I915_SW_FENCE_CHECK_DAG is not set
6533 +# CONFIG_DRM_I915_DEBUG_GUC is not set
6534 +# CONFIG_DRM_I915_SELFTEST is not set
6535 +# CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS is not set
6536 +# CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set
6537 +# CONFIG_DRM_I915_DEBUG_RUNTIME_PM is not set
6538 +# end of drm/i915 Debugging
6541 +# drm/i915 Profile Guided Optimisation
6543 +CONFIG_DRM_I915_FENCE_TIMEOUT=10000
6544 +CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250
6545 +CONFIG_DRM_I915_HEARTBEAT_INTERVAL=2500
6546 +CONFIG_DRM_I915_PREEMPT_TIMEOUT=640
6547 +CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000
6548 +CONFIG_DRM_I915_STOP_TIMEOUT=100
6549 +CONFIG_DRM_I915_TIMESLICE_DURATION=1
6550 +# end of drm/i915 Profile Guided Optimisation
6552 +CONFIG_DRM_VGEM=m
6553 +CONFIG_DRM_VKMS=m
6554 +CONFIG_DRM_VMWGFX=m
6555 +CONFIG_DRM_VMWGFX_FBCON=y
6556 +CONFIG_DRM_GMA500=m
6557 +CONFIG_DRM_GMA600=y
6558 +CONFIG_DRM_UDL=m
6559 +CONFIG_DRM_AST=m
6560 +CONFIG_DRM_MGAG200=m
6561 +CONFIG_DRM_QXL=m
6562 +CONFIG_DRM_BOCHS=m
6563 +CONFIG_DRM_VIRTIO_GPU=m
6564 +CONFIG_DRM_PANEL=y
6567 +# Display Panels
6569 +CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN=m
6570 +# end of Display Panels
6572 +CONFIG_DRM_BRIDGE=y
6573 +CONFIG_DRM_PANEL_BRIDGE=y
6576 +# Display Interface Bridges
6578 +CONFIG_DRM_ANALOGIX_ANX78XX=m
6579 +CONFIG_DRM_ANALOGIX_DP=m
6580 +# end of Display Interface Bridges
6582 +# CONFIG_DRM_ETNAVIV is not set
6583 +CONFIG_DRM_CIRRUS_QEMU=m
6584 +CONFIG_DRM_GM12U320=m
6585 +CONFIG_TINYDRM_HX8357D=m
6586 +CONFIG_TINYDRM_ILI9225=m
6587 +CONFIG_TINYDRM_ILI9341=m
6588 +CONFIG_TINYDRM_ILI9486=m
6589 +CONFIG_TINYDRM_MI0283QT=m
6590 +CONFIG_TINYDRM_REPAPER=m
6591 +CONFIG_TINYDRM_ST7586=m
6592 +CONFIG_TINYDRM_ST7735R=m
6593 +CONFIG_DRM_XEN=y
6594 +CONFIG_DRM_XEN_FRONTEND=m
6595 +CONFIG_DRM_VBOXVIDEO=m
6596 +# CONFIG_DRM_LEGACY is not set
6597 +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y
6600 +# Frame buffer Devices
6602 +CONFIG_FB_CMDLINE=y
6603 +CONFIG_FB_NOTIFY=y
6604 +CONFIG_FB=y
6605 +CONFIG_FIRMWARE_EDID=y
6606 +CONFIG_FB_DDC=m
6607 +CONFIG_FB_BOOT_VESA_SUPPORT=y
6608 +CONFIG_FB_CFB_FILLRECT=y
6609 +CONFIG_FB_CFB_COPYAREA=y
6610 +CONFIG_FB_CFB_IMAGEBLIT=y
6611 +CONFIG_FB_SYS_FILLRECT=m
6612 +CONFIG_FB_SYS_COPYAREA=m
6613 +CONFIG_FB_SYS_IMAGEBLIT=m
6614 +# CONFIG_FB_FOREIGN_ENDIAN is not set
6615 +CONFIG_FB_SYS_FOPS=m
6616 +CONFIG_FB_DEFERRED_IO=y
6617 +CONFIG_FB_HECUBA=m
6618 +CONFIG_FB_SVGALIB=m
6619 +CONFIG_FB_BACKLIGHT=m
6620 +CONFIG_FB_MODE_HELPERS=y
6621 +CONFIG_FB_TILEBLITTING=y
6624 +# Frame buffer hardware drivers
6626 +CONFIG_FB_CIRRUS=m
6627 +CONFIG_FB_PM2=m
6628 +CONFIG_FB_PM2_FIFO_DISCONNECT=y
6629 +CONFIG_FB_CYBER2000=m
6630 +CONFIG_FB_CYBER2000_DDC=y
6631 +CONFIG_FB_ARC=m
6632 +CONFIG_FB_ASILIANT=y
6633 +CONFIG_FB_IMSTT=y
6634 +CONFIG_FB_VGA16=m
6635 +CONFIG_FB_UVESA=m
6636 +CONFIG_FB_VESA=y
6637 +CONFIG_FB_EFI=y
6638 +CONFIG_FB_N411=m
6639 +CONFIG_FB_HGA=m
6640 +CONFIG_FB_OPENCORES=m
6641 +CONFIG_FB_S1D13XXX=m
6642 +CONFIG_FB_NVIDIA=m
6643 +CONFIG_FB_NVIDIA_I2C=y
6644 +# CONFIG_FB_NVIDIA_DEBUG is not set
6645 +CONFIG_FB_NVIDIA_BACKLIGHT=y
6646 +CONFIG_FB_RIVA=m
6647 +CONFIG_FB_RIVA_I2C=y
6648 +# CONFIG_FB_RIVA_DEBUG is not set
6649 +CONFIG_FB_RIVA_BACKLIGHT=y
6650 +CONFIG_FB_I740=m
6651 +CONFIG_FB_LE80578=m
6652 +CONFIG_FB_CARILLO_RANCH=m
6653 +CONFIG_FB_INTEL=m
6654 +# CONFIG_FB_INTEL_DEBUG is not set
6655 +CONFIG_FB_INTEL_I2C=y
6656 +CONFIG_FB_MATROX=m
6657 +CONFIG_FB_MATROX_MILLENIUM=y
6658 +CONFIG_FB_MATROX_MYSTIQUE=y
6659 +CONFIG_FB_MATROX_G=y
6660 +CONFIG_FB_MATROX_I2C=m
6661 +CONFIG_FB_MATROX_MAVEN=m
6662 +CONFIG_FB_RADEON=m
6663 +CONFIG_FB_RADEON_I2C=y
6664 +CONFIG_FB_RADEON_BACKLIGHT=y
6665 +# CONFIG_FB_RADEON_DEBUG is not set
6666 +CONFIG_FB_ATY128=m
6667 +CONFIG_FB_ATY128_BACKLIGHT=y
6668 +CONFIG_FB_ATY=m
6669 +CONFIG_FB_ATY_CT=y
6670 +# CONFIG_FB_ATY_GENERIC_LCD is not set
6671 +CONFIG_FB_ATY_GX=y
6672 +CONFIG_FB_ATY_BACKLIGHT=y
6673 +CONFIG_FB_S3=m
6674 +CONFIG_FB_S3_DDC=y
6675 +CONFIG_FB_SAVAGE=m
6676 +CONFIG_FB_SAVAGE_I2C=y
6677 +# CONFIG_FB_SAVAGE_ACCEL is not set
6678 +CONFIG_FB_SIS=m
6679 +CONFIG_FB_SIS_300=y
6680 +CONFIG_FB_SIS_315=y
6681 +CONFIG_FB_VIA=m
6682 +# CONFIG_FB_VIA_DIRECT_PROCFS is not set
6683 +CONFIG_FB_VIA_X_COMPATIBILITY=y
6684 +CONFIG_FB_NEOMAGIC=m
6685 +CONFIG_FB_KYRO=m
6686 +CONFIG_FB_3DFX=m
6687 +# CONFIG_FB_3DFX_ACCEL is not set
6688 +# CONFIG_FB_3DFX_I2C is not set
6689 +CONFIG_FB_VOODOO1=m
6690 +CONFIG_FB_VT8623=m
6691 +CONFIG_FB_TRIDENT=m
6692 +CONFIG_FB_ARK=m
6693 +CONFIG_FB_PM3=m
6694 +CONFIG_FB_CARMINE=m
6695 +CONFIG_FB_CARMINE_DRAM_EVAL=y
6696 +# CONFIG_CARMINE_DRAM_CUSTOM is not set
6697 +CONFIG_FB_SM501=m
6698 +CONFIG_FB_SMSCUFX=m
6699 +CONFIG_FB_UDL=m
6700 +# CONFIG_FB_IBM_GXT4500 is not set
6701 +# CONFIG_FB_VIRTUAL is not set
6702 +CONFIG_XEN_FBDEV_FRONTEND=m
6703 +CONFIG_FB_METRONOME=m
6704 +CONFIG_FB_MB862XX=m
6705 +CONFIG_FB_MB862XX_PCI_GDC=y
6706 +CONFIG_FB_MB862XX_I2C=y
6707 +CONFIG_FB_HYPERV=m
6708 +CONFIG_FB_SIMPLE=y
6709 +CONFIG_FB_SM712=m
6710 +# end of Frame buffer Devices
6713 +# Backlight & LCD device support
6715 +CONFIG_LCD_CLASS_DEVICE=m
6716 +CONFIG_LCD_L4F00242T03=m
6717 +CONFIG_LCD_LMS283GF05=m
6718 +CONFIG_LCD_LTV350QV=m
6719 +CONFIG_LCD_ILI922X=m
6720 +CONFIG_LCD_ILI9320=m
6721 +CONFIG_LCD_TDO24M=m
6722 +CONFIG_LCD_VGG2432A4=m
6723 +CONFIG_LCD_PLATFORM=m
6724 +CONFIG_LCD_AMS369FG06=m
6725 +CONFIG_LCD_LMS501KF03=m
6726 +CONFIG_LCD_HX8357=m
6727 +CONFIG_LCD_OTM3225A=m
6728 +CONFIG_BACKLIGHT_CLASS_DEVICE=y
6729 +CONFIG_BACKLIGHT_KTD253=m
6730 +CONFIG_BACKLIGHT_LM3533=m
6731 +CONFIG_BACKLIGHT_CARILLO_RANCH=m
6732 +CONFIG_BACKLIGHT_PWM=m
6733 +CONFIG_BACKLIGHT_DA903X=m
6734 +CONFIG_BACKLIGHT_DA9052=m
6735 +CONFIG_BACKLIGHT_MAX8925=m
6736 +CONFIG_BACKLIGHT_APPLE=m
6737 +CONFIG_BACKLIGHT_QCOM_WLED=m
6738 +CONFIG_BACKLIGHT_SAHARA=m
6739 +CONFIG_BACKLIGHT_WM831X=m
6740 +CONFIG_BACKLIGHT_ADP5520=m
6741 +CONFIG_BACKLIGHT_ADP8860=m
6742 +CONFIG_BACKLIGHT_ADP8870=m
6743 +CONFIG_BACKLIGHT_88PM860X=m
6744 +CONFIG_BACKLIGHT_PCF50633=m
6745 +CONFIG_BACKLIGHT_AAT2870=m
6746 +CONFIG_BACKLIGHT_LM3630A=m
6747 +CONFIG_BACKLIGHT_LM3639=m
6748 +CONFIG_BACKLIGHT_LP855X=m
6749 +CONFIG_BACKLIGHT_LP8788=m
6750 +CONFIG_BACKLIGHT_PANDORA=m
6751 +CONFIG_BACKLIGHT_SKY81452=m
6752 +CONFIG_BACKLIGHT_AS3711=m
6753 +CONFIG_BACKLIGHT_GPIO=m
6754 +CONFIG_BACKLIGHT_LV5207LP=m
6755 +CONFIG_BACKLIGHT_BD6107=m
6756 +CONFIG_BACKLIGHT_ARCXCNN=m
6757 +CONFIG_BACKLIGHT_RAVE_SP=m
6758 +# end of Backlight & LCD device support
6760 +CONFIG_VGASTATE=m
6761 +CONFIG_VIDEOMODE_HELPERS=y
6762 +CONFIG_HDMI=y
6765 +# Console display driver support
6767 +CONFIG_VGA_CONSOLE=y
6768 +CONFIG_DUMMY_CONSOLE=y
6769 +CONFIG_DUMMY_CONSOLE_COLUMNS=80
6770 +CONFIG_DUMMY_CONSOLE_ROWS=25
6771 +CONFIG_FRAMEBUFFER_CONSOLE=y
6772 +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
6773 +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
6774 +CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y
6775 +# end of Console display driver support
6777 +# CONFIG_LOGO is not set
6778 +# end of Graphics support
6780 +CONFIG_SOUND=m
6781 +CONFIG_SOUND_OSS_CORE=y
6782 +# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set
6783 +CONFIG_SND=m
6784 +CONFIG_SND_TIMER=m
6785 +CONFIG_SND_PCM=m
6786 +CONFIG_SND_PCM_ELD=y
6787 +CONFIG_SND_PCM_IEC958=y
6788 +CONFIG_SND_DMAENGINE_PCM=m
6789 +CONFIG_SND_HWDEP=m
6790 +CONFIG_SND_SEQ_DEVICE=m
6791 +CONFIG_SND_RAWMIDI=m
6792 +CONFIG_SND_COMPRESS_OFFLOAD=m
6793 +CONFIG_SND_JACK=y
6794 +CONFIG_SND_JACK_INPUT_DEV=y
6795 +CONFIG_SND_OSSEMUL=y
6796 +CONFIG_SND_MIXER_OSS=m
6797 +# CONFIG_SND_PCM_OSS is not set
6798 +CONFIG_SND_PCM_TIMER=y
6799 +CONFIG_SND_HRTIMER=m
6800 +CONFIG_SND_DYNAMIC_MINORS=y
6801 +CONFIG_SND_MAX_CARDS=32
6802 +CONFIG_SND_SUPPORT_OLD_API=y
6803 +CONFIG_SND_PROC_FS=y
6804 +CONFIG_SND_VERBOSE_PROCFS=y
6805 +# CONFIG_SND_VERBOSE_PRINTK is not set
6806 +# CONFIG_SND_DEBUG is not set
6807 +CONFIG_SND_VMASTER=y
6808 +CONFIG_SND_DMA_SGBUF=y
6809 +CONFIG_SND_SEQUENCER=m
6810 +CONFIG_SND_SEQ_DUMMY=m
6811 +# CONFIG_SND_SEQUENCER_OSS is not set
6812 +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
6813 +CONFIG_SND_SEQ_MIDI_EVENT=m
6814 +CONFIG_SND_SEQ_MIDI=m
6815 +CONFIG_SND_SEQ_MIDI_EMUL=m
6816 +CONFIG_SND_SEQ_VIRMIDI=m
6817 +CONFIG_SND_MPU401_UART=m
6818 +CONFIG_SND_OPL3_LIB=m
6819 +CONFIG_SND_OPL3_LIB_SEQ=m
6820 +CONFIG_SND_VX_LIB=m
6821 +CONFIG_SND_AC97_CODEC=m
6822 +CONFIG_SND_DRIVERS=y
6823 +CONFIG_SND_PCSP=m
6824 +CONFIG_SND_DUMMY=m
6825 +CONFIG_SND_ALOOP=m
6826 +CONFIG_SND_VIRMIDI=m
6827 +CONFIG_SND_MTPAV=m
6828 +CONFIG_SND_MTS64=m
6829 +CONFIG_SND_SERIAL_U16550=m
6830 +CONFIG_SND_MPU401=m
6831 +CONFIG_SND_PORTMAN2X4=m
6832 +CONFIG_SND_AC97_POWER_SAVE=y
6833 +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0
6834 +CONFIG_SND_SB_COMMON=m
6835 +CONFIG_SND_PCI=y
6836 +CONFIG_SND_AD1889=m
6837 +CONFIG_SND_ALS300=m
6838 +CONFIG_SND_ALS4000=m
6839 +CONFIG_SND_ALI5451=m
6840 +CONFIG_SND_ASIHPI=m
6841 +CONFIG_SND_ATIIXP=m
6842 +CONFIG_SND_ATIIXP_MODEM=m
6843 +CONFIG_SND_AU8810=m
6844 +CONFIG_SND_AU8820=m
6845 +CONFIG_SND_AU8830=m
6846 +CONFIG_SND_AW2=m
6847 +CONFIG_SND_AZT3328=m
6848 +CONFIG_SND_BT87X=m
6849 +# CONFIG_SND_BT87X_OVERCLOCK is not set
6850 +CONFIG_SND_CA0106=m
6851 +CONFIG_SND_CMIPCI=m
6852 +CONFIG_SND_OXYGEN_LIB=m
6853 +CONFIG_SND_OXYGEN=m
6854 +CONFIG_SND_CS4281=m
6855 +CONFIG_SND_CS46XX=m
6856 +CONFIG_SND_CS46XX_NEW_DSP=y
6857 +CONFIG_SND_CTXFI=m
6858 +CONFIG_SND_DARLA20=m
6859 +CONFIG_SND_GINA20=m
6860 +CONFIG_SND_LAYLA20=m
6861 +CONFIG_SND_DARLA24=m
6862 +CONFIG_SND_GINA24=m
6863 +CONFIG_SND_LAYLA24=m
6864 +CONFIG_SND_MONA=m
6865 +CONFIG_SND_MIA=m
6866 +CONFIG_SND_ECHO3G=m
6867 +CONFIG_SND_INDIGO=m
6868 +CONFIG_SND_INDIGOIO=m
6869 +CONFIG_SND_INDIGODJ=m
6870 +CONFIG_SND_INDIGOIOX=m
6871 +CONFIG_SND_INDIGODJX=m
6872 +CONFIG_SND_EMU10K1=m
6873 +CONFIG_SND_EMU10K1_SEQ=m
6874 +CONFIG_SND_EMU10K1X=m
6875 +CONFIG_SND_ENS1370=m
6876 +CONFIG_SND_ENS1371=m
6877 +CONFIG_SND_ES1938=m
6878 +CONFIG_SND_ES1968=m
6879 +CONFIG_SND_ES1968_INPUT=y
6880 +CONFIG_SND_ES1968_RADIO=y
6881 +CONFIG_SND_FM801=m
6882 +CONFIG_SND_FM801_TEA575X_BOOL=y
6883 +CONFIG_SND_HDSP=m
6884 +CONFIG_SND_HDSPM=m
6885 +CONFIG_SND_ICE1712=m
6886 +CONFIG_SND_ICE1724=m
6887 +CONFIG_SND_INTEL8X0=m
6888 +CONFIG_SND_INTEL8X0M=m
6889 +CONFIG_SND_KORG1212=m
6890 +CONFIG_SND_LOLA=m
6891 +CONFIG_SND_LX6464ES=m
6892 +CONFIG_SND_MAESTRO3=m
6893 +CONFIG_SND_MAESTRO3_INPUT=y
6894 +CONFIG_SND_MIXART=m
6895 +CONFIG_SND_NM256=m
6896 +CONFIG_SND_PCXHR=m
6897 +CONFIG_SND_RIPTIDE=m
6898 +CONFIG_SND_RME32=m
6899 +CONFIG_SND_RME96=m
6900 +CONFIG_SND_RME9652=m
6901 +CONFIG_SND_SONICVIBES=m
6902 +CONFIG_SND_TRIDENT=m
6903 +CONFIG_SND_VIA82XX=m
6904 +CONFIG_SND_VIA82XX_MODEM=m
6905 +CONFIG_SND_VIRTUOSO=m
6906 +CONFIG_SND_VX222=m
6907 +CONFIG_SND_YMFPCI=m
6910 +# HD-Audio
6912 +CONFIG_SND_HDA=m
6913 +CONFIG_SND_HDA_GENERIC_LEDS=y
6914 +CONFIG_SND_HDA_INTEL=m
6915 +CONFIG_SND_HDA_HWDEP=y
6916 +CONFIG_SND_HDA_RECONFIG=y
6917 +CONFIG_SND_HDA_INPUT_BEEP=y
6918 +CONFIG_SND_HDA_INPUT_BEEP_MODE=0
6919 +CONFIG_SND_HDA_PATCH_LOADER=y
6920 +CONFIG_SND_HDA_CODEC_REALTEK=m
6921 +CONFIG_SND_HDA_CODEC_ANALOG=m
6922 +CONFIG_SND_HDA_CODEC_SIGMATEL=m
6923 +CONFIG_SND_HDA_CODEC_VIA=m
6924 +CONFIG_SND_HDA_CODEC_HDMI=m
6925 +CONFIG_SND_HDA_CODEC_CIRRUS=m
6926 +CONFIG_SND_HDA_CODEC_CONEXANT=m
6927 +CONFIG_SND_HDA_CODEC_CA0110=m
6928 +CONFIG_SND_HDA_CODEC_CA0132=m
6929 +CONFIG_SND_HDA_CODEC_CA0132_DSP=y
6930 +CONFIG_SND_HDA_CODEC_CMEDIA=m
6931 +CONFIG_SND_HDA_CODEC_SI3054=m
6932 +CONFIG_SND_HDA_GENERIC=m
6933 +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=1
6934 +# CONFIG_SND_HDA_INTEL_HDMI_SILENT_STREAM is not set
6935 +# end of HD-Audio
6937 +CONFIG_SND_HDA_CORE=m
6938 +CONFIG_SND_HDA_DSP_LOADER=y
6939 +CONFIG_SND_HDA_COMPONENT=y
6940 +CONFIG_SND_HDA_I915=y
6941 +CONFIG_SND_HDA_EXT_CORE=m
6942 +CONFIG_SND_HDA_PREALLOC_SIZE=0
6943 +CONFIG_SND_INTEL_NHLT=y
6944 +CONFIG_SND_INTEL_DSP_CONFIG=m
6945 +CONFIG_SND_INTEL_SOUNDWIRE_ACPI=m
6946 +CONFIG_SND_INTEL_BYT_PREFER_SOF=y
6947 +CONFIG_SND_SPI=y
6948 +CONFIG_SND_USB=y
6949 +CONFIG_SND_USB_AUDIO=m
6950 +CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y
6951 +CONFIG_SND_USB_UA101=m
6952 +CONFIG_SND_USB_USX2Y=m
6953 +CONFIG_SND_USB_CAIAQ=m
6954 +CONFIG_SND_USB_CAIAQ_INPUT=y
6955 +CONFIG_SND_USB_US122L=m
6956 +CONFIG_SND_USB_6FIRE=m
6957 +CONFIG_SND_USB_HIFACE=m
6958 +CONFIG_SND_BCD2000=m
6959 +CONFIG_SND_USB_LINE6=m
6960 +CONFIG_SND_USB_POD=m
6961 +CONFIG_SND_USB_PODHD=m
6962 +CONFIG_SND_USB_TONEPORT=m
6963 +CONFIG_SND_USB_VARIAX=m
6964 +CONFIG_SND_FIREWIRE=y
6965 +CONFIG_SND_FIREWIRE_LIB=m
6966 +CONFIG_SND_DICE=m
6967 +CONFIG_SND_OXFW=m
6968 +CONFIG_SND_ISIGHT=m
6969 +CONFIG_SND_FIREWORKS=m
6970 +CONFIG_SND_BEBOB=m
6971 +CONFIG_SND_FIREWIRE_DIGI00X=m
6972 +CONFIG_SND_FIREWIRE_TASCAM=m
6973 +CONFIG_SND_FIREWIRE_MOTU=m
6974 +CONFIG_SND_FIREFACE=m
6975 +CONFIG_SND_PCMCIA=y
6976 +CONFIG_SND_VXPOCKET=m
6977 +CONFIG_SND_PDAUDIOCF=m
6978 +CONFIG_SND_SOC=m
6979 +CONFIG_SND_SOC_AC97_BUS=y
6980 +CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y
6981 +CONFIG_SND_SOC_COMPRESS=y
6982 +CONFIG_SND_SOC_TOPOLOGY=y
6983 +CONFIG_SND_SOC_ACPI=m
6984 +CONFIG_SND_SOC_ADI=m
6985 +CONFIG_SND_SOC_ADI_AXI_I2S=m
6986 +CONFIG_SND_SOC_ADI_AXI_SPDIF=m
6987 +CONFIG_SND_SOC_AMD_ACP=m
6988 +CONFIG_SND_SOC_AMD_CZ_DA7219MX98357_MACH=m
6989 +CONFIG_SND_SOC_AMD_CZ_RT5645_MACH=m
6990 +CONFIG_SND_SOC_AMD_ACP3x=m
6991 +CONFIG_SND_SOC_AMD_RV_RT5682_MACH=m
6992 +CONFIG_SND_SOC_AMD_RENOIR=m
6993 +CONFIG_SND_SOC_AMD_RENOIR_MACH=m
6994 +CONFIG_SND_ATMEL_SOC=m
6995 +CONFIG_SND_BCM63XX_I2S_WHISTLER=m
6996 +CONFIG_SND_DESIGNWARE_I2S=m
6997 +CONFIG_SND_DESIGNWARE_PCM=y
7000 +# SoC Audio for Freescale CPUs
7004 +# Common SoC Audio options for Freescale CPUs:
7006 +CONFIG_SND_SOC_FSL_ASRC=m
7007 +CONFIG_SND_SOC_FSL_SAI=m
7008 +CONFIG_SND_SOC_FSL_MQS=m
7009 +CONFIG_SND_SOC_FSL_AUDMIX=m
7010 +CONFIG_SND_SOC_FSL_SSI=m
7011 +CONFIG_SND_SOC_FSL_SPDIF=m
7012 +CONFIG_SND_SOC_FSL_ESAI=m
7013 +CONFIG_SND_SOC_FSL_MICFIL=m
7014 +CONFIG_SND_SOC_FSL_EASRC=m
7015 +CONFIG_SND_SOC_FSL_XCVR=m
7016 +CONFIG_SND_SOC_IMX_AUDMUX=m
7017 +# end of SoC Audio for Freescale CPUs
7019 +CONFIG_SND_I2S_HI6210_I2S=m
7020 +CONFIG_SND_SOC_IMG=y
7021 +CONFIG_SND_SOC_IMG_I2S_IN=m
7022 +CONFIG_SND_SOC_IMG_I2S_OUT=m
7023 +CONFIG_SND_SOC_IMG_PARALLEL_OUT=m
7024 +CONFIG_SND_SOC_IMG_SPDIF_IN=m
7025 +CONFIG_SND_SOC_IMG_SPDIF_OUT=m
7026 +CONFIG_SND_SOC_IMG_PISTACHIO_INTERNAL_DAC=m
7027 +CONFIG_SND_SOC_INTEL_SST_TOPLEVEL=y
7028 +CONFIG_SND_SOC_INTEL_SST=m
7029 +CONFIG_SND_SOC_INTEL_CATPT=m
7030 +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM=m
7031 +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_PCI=m
7032 +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI=m
7033 +# CONFIG_SND_SOC_INTEL_SKYLAKE is not set
7034 +CONFIG_SND_SOC_INTEL_SKL=m
7035 +CONFIG_SND_SOC_INTEL_APL=m
7036 +CONFIG_SND_SOC_INTEL_KBL=m
7037 +CONFIG_SND_SOC_INTEL_GLK=m
7038 +# CONFIG_SND_SOC_INTEL_CNL is not set
7039 +# CONFIG_SND_SOC_INTEL_CFL is not set
7040 +# CONFIG_SND_SOC_INTEL_CML_H is not set
7041 +# CONFIG_SND_SOC_INTEL_CML_LP is not set
7042 +CONFIG_SND_SOC_INTEL_SKYLAKE_FAMILY=m
7043 +CONFIG_SND_SOC_INTEL_SKYLAKE_SSP_CLK=m
7044 +# CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC is not set
7045 +CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON=m
7046 +CONFIG_SND_SOC_ACPI_INTEL_MATCH=m
7047 +CONFIG_SND_SOC_INTEL_MACH=y
7048 +# CONFIG_SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES is not set
7049 +CONFIG_SND_SOC_INTEL_HASWELL_MACH=m
7050 +CONFIG_SND_SOC_INTEL_BDW_RT5650_MACH=m
7051 +CONFIG_SND_SOC_INTEL_BDW_RT5677_MACH=m
7052 +CONFIG_SND_SOC_INTEL_BROADWELL_MACH=m
7053 +CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH=m
7054 +CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH=m
7055 +CONFIG_SND_SOC_INTEL_BYTCR_WM5102_MACH=m
7056 +CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH=m
7057 +CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH=m
7058 +CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH=m
7059 +CONFIG_SND_SOC_INTEL_CHT_BSW_NAU8824_MACH=m
7060 +CONFIG_SND_SOC_INTEL_BYT_CHT_CX2072X_MACH=m
7061 +CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH=m
7062 +CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH=m
7063 +# CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH is not set
7064 +CONFIG_SND_SOC_INTEL_SKL_RT286_MACH=m
7065 +CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH=m
7066 +CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH=m
7067 +CONFIG_SND_SOC_INTEL_DA7219_MAX98357A_GENERIC=m
7068 +CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON=m
7069 +CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH=m
7070 +CONFIG_SND_SOC_INTEL_BXT_RT298_MACH=m
7071 +CONFIG_SND_SOC_INTEL_SOF_WM8804_MACH=m
7072 +CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH=m
7073 +CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH=m
7074 +CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH=m
7075 +CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH=m
7076 +CONFIG_SND_SOC_INTEL_KBL_RT5660_MACH=m
7077 +CONFIG_SND_SOC_INTEL_GLK_DA7219_MAX98357A_MACH=m
7078 +CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH=m
7079 +CONFIG_SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH=m
7080 +CONFIG_SND_SOC_INTEL_SOF_RT5682_MACH=m
7081 +CONFIG_SND_SOC_INTEL_SOF_PCM512x_MACH=m
7082 +CONFIG_SND_SOC_INTEL_CML_LP_DA7219_MAX98357A_MACH=m
7083 +CONFIG_SND_SOC_INTEL_SOF_CML_RT1011_RT5682_MACH=m
7084 +CONFIG_SND_SOC_INTEL_SOF_DA7219_MAX98373_MACH=m
7085 +CONFIG_SND_SOC_INTEL_EHL_RT5660_MACH=m
7086 +CONFIG_SND_SOC_MTK_BTCVSD=m
7087 +CONFIG_SND_SOC_SOF_TOPLEVEL=y
7088 +CONFIG_SND_SOC_SOF_PCI_DEV=m
7089 +CONFIG_SND_SOC_SOF_PCI=m
7090 +CONFIG_SND_SOC_SOF_ACPI=m
7091 +CONFIG_SND_SOC_SOF_ACPI_DEV=m
7092 +# CONFIG_SND_SOC_SOF_DEBUG_PROBES is not set
7093 +# CONFIG_SND_SOC_SOF_DEVELOPER_SUPPORT is not set
7094 +CONFIG_SND_SOC_SOF=m
7095 +CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE=y
7096 +CONFIG_SND_SOC_SOF_INTEL_TOPLEVEL=y
7097 +CONFIG_SND_SOC_SOF_INTEL_HIFI_EP_IPC=m
7098 +CONFIG_SND_SOC_SOF_INTEL_ATOM_HIFI_EP=m
7099 +CONFIG_SND_SOC_SOF_INTEL_COMMON=m
7100 +CONFIG_SND_SOC_SOF_BAYTRAIL=m
7101 +CONFIG_SND_SOC_SOF_BROADWELL=m
7102 +CONFIG_SND_SOC_SOF_MERRIFIELD=m
7103 +CONFIG_SND_SOC_SOF_INTEL_APL=m
7104 +CONFIG_SND_SOC_SOF_APOLLOLAKE=m
7105 +CONFIG_SND_SOC_SOF_GEMINILAKE=m
7106 +CONFIG_SND_SOC_SOF_INTEL_CNL=m
7107 +CONFIG_SND_SOC_SOF_CANNONLAKE=m
7108 +CONFIG_SND_SOC_SOF_COFFEELAKE=m
7109 +CONFIG_SND_SOC_SOF_COMETLAKE=m
7110 +CONFIG_SND_SOC_SOF_INTEL_ICL=m
7111 +CONFIG_SND_SOC_SOF_ICELAKE=m
7112 +CONFIG_SND_SOC_SOF_JASPERLAKE=m
7113 +CONFIG_SND_SOC_SOF_INTEL_TGL=m
7114 +CONFIG_SND_SOC_SOF_TIGERLAKE=m
7115 +CONFIG_SND_SOC_SOF_ELKHARTLAKE=m
7116 +CONFIG_SND_SOC_SOF_ALDERLAKE=m
7117 +CONFIG_SND_SOC_SOF_HDA_COMMON=m
7118 +CONFIG_SND_SOC_SOF_HDA_LINK=y
7119 +CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC=y
7120 +# CONFIG_SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1 is not set
7121 +CONFIG_SND_SOC_SOF_HDA_LINK_BASELINE=m
7122 +CONFIG_SND_SOC_SOF_HDA=m
7123 +CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE=m
7124 +CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE=m
7125 +CONFIG_SND_SOC_SOF_XTENSA=m
7128 +# STMicroelectronics STM32 SOC audio support
7130 +# end of STMicroelectronics STM32 SOC audio support
7132 +CONFIG_SND_SOC_XILINX_I2S=m
7133 +CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER=m
7134 +CONFIG_SND_SOC_XILINX_SPDIF=m
7135 +CONFIG_SND_SOC_XTFPGA_I2S=m
7136 +CONFIG_SND_SOC_I2C_AND_SPI=m
7139 +# CODEC drivers
7141 +CONFIG_SND_SOC_ARIZONA=m
7142 +CONFIG_SND_SOC_WM_ADSP=m
7143 +CONFIG_SND_SOC_AC97_CODEC=m
7144 +CONFIG_SND_SOC_ADAU_UTILS=m
7145 +CONFIG_SND_SOC_ADAU1372=m
7146 +CONFIG_SND_SOC_ADAU1372_I2C=m
7147 +CONFIG_SND_SOC_ADAU1372_SPI=m
7148 +CONFIG_SND_SOC_ADAU1701=m
7149 +CONFIG_SND_SOC_ADAU17X1=m
7150 +CONFIG_SND_SOC_ADAU1761=m
7151 +CONFIG_SND_SOC_ADAU1761_I2C=m
7152 +CONFIG_SND_SOC_ADAU1761_SPI=m
7153 +CONFIG_SND_SOC_ADAU7002=m
7154 +CONFIG_SND_SOC_ADAU7118=m
7155 +CONFIG_SND_SOC_ADAU7118_HW=m
7156 +CONFIG_SND_SOC_ADAU7118_I2C=m
7157 +CONFIG_SND_SOC_AK4104=m
7158 +CONFIG_SND_SOC_AK4118=m
7159 +CONFIG_SND_SOC_AK4458=m
7160 +CONFIG_SND_SOC_AK4554=m
7161 +CONFIG_SND_SOC_AK4613=m
7162 +CONFIG_SND_SOC_AK4642=m
7163 +CONFIG_SND_SOC_AK5386=m
7164 +CONFIG_SND_SOC_AK5558=m
7165 +CONFIG_SND_SOC_ALC5623=m
7166 +CONFIG_SND_SOC_BD28623=m
7167 +CONFIG_SND_SOC_BT_SCO=m
7168 +CONFIG_SND_SOC_CROS_EC_CODEC=m
7169 +CONFIG_SND_SOC_CS35L32=m
7170 +CONFIG_SND_SOC_CS35L33=m
7171 +CONFIG_SND_SOC_CS35L34=m
7172 +CONFIG_SND_SOC_CS35L35=m
7173 +CONFIG_SND_SOC_CS35L36=m
7174 +CONFIG_SND_SOC_CS42L42=m
7175 +CONFIG_SND_SOC_CS42L51=m
7176 +CONFIG_SND_SOC_CS42L51_I2C=m
7177 +CONFIG_SND_SOC_CS42L52=m
7178 +CONFIG_SND_SOC_CS42L56=m
7179 +CONFIG_SND_SOC_CS42L73=m
7180 +CONFIG_SND_SOC_CS4234=m
7181 +CONFIG_SND_SOC_CS4265=m
7182 +CONFIG_SND_SOC_CS4270=m
7183 +CONFIG_SND_SOC_CS4271=m
7184 +CONFIG_SND_SOC_CS4271_I2C=m
7185 +CONFIG_SND_SOC_CS4271_SPI=m
7186 +CONFIG_SND_SOC_CS42XX8=m
7187 +CONFIG_SND_SOC_CS42XX8_I2C=m
7188 +CONFIG_SND_SOC_CS43130=m
7189 +CONFIG_SND_SOC_CS4341=m
7190 +CONFIG_SND_SOC_CS4349=m
7191 +CONFIG_SND_SOC_CS53L30=m
7192 +CONFIG_SND_SOC_CX2072X=m
7193 +CONFIG_SND_SOC_DA7213=m
7194 +CONFIG_SND_SOC_DA7219=m
7195 +CONFIG_SND_SOC_DMIC=m
7196 +CONFIG_SND_SOC_HDMI_CODEC=m
7197 +CONFIG_SND_SOC_ES7134=m
7198 +CONFIG_SND_SOC_ES7241=m
7199 +CONFIG_SND_SOC_ES8316=m
7200 +CONFIG_SND_SOC_ES8328=m
7201 +CONFIG_SND_SOC_ES8328_I2C=m
7202 +CONFIG_SND_SOC_ES8328_SPI=m
7203 +CONFIG_SND_SOC_GTM601=m
7204 +CONFIG_SND_SOC_HDAC_HDMI=m
7205 +CONFIG_SND_SOC_HDAC_HDA=m
7206 +CONFIG_SND_SOC_INNO_RK3036=m
7207 +CONFIG_SND_SOC_MAX98088=m
7208 +CONFIG_SND_SOC_MAX98090=m
7209 +CONFIG_SND_SOC_MAX98357A=m
7210 +CONFIG_SND_SOC_MAX98504=m
7211 +CONFIG_SND_SOC_MAX9867=m
7212 +CONFIG_SND_SOC_MAX98927=m
7213 +CONFIG_SND_SOC_MAX98373=m
7214 +CONFIG_SND_SOC_MAX98373_I2C=m
7215 +CONFIG_SND_SOC_MAX98373_SDW=m
7216 +CONFIG_SND_SOC_MAX98390=m
7217 +CONFIG_SND_SOC_MAX9860=m
7218 +CONFIG_SND_SOC_MSM8916_WCD_ANALOG=m
7219 +CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=m
7220 +CONFIG_SND_SOC_PCM1681=m
7221 +CONFIG_SND_SOC_PCM1789=m
7222 +CONFIG_SND_SOC_PCM1789_I2C=m
7223 +CONFIG_SND_SOC_PCM179X=m
7224 +CONFIG_SND_SOC_PCM179X_I2C=m
7225 +CONFIG_SND_SOC_PCM179X_SPI=m
7226 +CONFIG_SND_SOC_PCM186X=m
7227 +CONFIG_SND_SOC_PCM186X_I2C=m
7228 +CONFIG_SND_SOC_PCM186X_SPI=m
7229 +CONFIG_SND_SOC_PCM3060=m
7230 +CONFIG_SND_SOC_PCM3060_I2C=m
7231 +CONFIG_SND_SOC_PCM3060_SPI=m
7232 +CONFIG_SND_SOC_PCM3168A=m
7233 +CONFIG_SND_SOC_PCM3168A_I2C=m
7234 +CONFIG_SND_SOC_PCM3168A_SPI=m
7235 +CONFIG_SND_SOC_PCM5102A=m
7236 +CONFIG_SND_SOC_PCM512x=m
7237 +CONFIG_SND_SOC_PCM512x_I2C=m
7238 +CONFIG_SND_SOC_PCM512x_SPI=m
7239 +CONFIG_SND_SOC_RK3328=m
7240 +CONFIG_SND_SOC_RL6231=m
7241 +CONFIG_SND_SOC_RL6347A=m
7242 +CONFIG_SND_SOC_RT286=m
7243 +CONFIG_SND_SOC_RT298=m
7244 +CONFIG_SND_SOC_RT1011=m
7245 +CONFIG_SND_SOC_RT1015=m
7246 +CONFIG_SND_SOC_RT1308_SDW=m
7247 +CONFIG_SND_SOC_RT5514=m
7248 +CONFIG_SND_SOC_RT5514_SPI=m
7249 +CONFIG_SND_SOC_RT5616=m
7250 +CONFIG_SND_SOC_RT5631=m
7251 +CONFIG_SND_SOC_RT5640=m
7252 +CONFIG_SND_SOC_RT5645=m
7253 +CONFIG_SND_SOC_RT5651=m
7254 +CONFIG_SND_SOC_RT5659=m
7255 +CONFIG_SND_SOC_RT5660=m
7256 +CONFIG_SND_SOC_RT5663=m
7257 +CONFIG_SND_SOC_RT5670=m
7258 +CONFIG_SND_SOC_RT5677=m
7259 +CONFIG_SND_SOC_RT5677_SPI=m
7260 +CONFIG_SND_SOC_RT5682=m
7261 +CONFIG_SND_SOC_RT5682_I2C=m
7262 +CONFIG_SND_SOC_RT5682_SDW=m
7263 +CONFIG_SND_SOC_RT700=m
7264 +CONFIG_SND_SOC_RT700_SDW=m
7265 +CONFIG_SND_SOC_RT711=m
7266 +CONFIG_SND_SOC_RT711_SDW=m
7267 +CONFIG_SND_SOC_RT715=m
7268 +CONFIG_SND_SOC_RT715_SDW=m
7269 +CONFIG_SND_SOC_SGTL5000=m
7270 +CONFIG_SND_SOC_SI476X=m
7271 +CONFIG_SND_SOC_SIGMADSP=m
7272 +CONFIG_SND_SOC_SIGMADSP_I2C=m
7273 +CONFIG_SND_SOC_SIGMADSP_REGMAP=m
7274 +CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m
7275 +CONFIG_SND_SOC_SIMPLE_MUX=m
7276 +CONFIG_SND_SOC_SPDIF=m
7277 +CONFIG_SND_SOC_SSM2305=m
7278 +CONFIG_SND_SOC_SSM2602=m
7279 +CONFIG_SND_SOC_SSM2602_SPI=m
7280 +CONFIG_SND_SOC_SSM2602_I2C=m
7281 +CONFIG_SND_SOC_SSM4567=m
7282 +CONFIG_SND_SOC_STA32X=m
7283 +CONFIG_SND_SOC_STA350=m
7284 +CONFIG_SND_SOC_STI_SAS=m
7285 +CONFIG_SND_SOC_TAS2552=m
7286 +CONFIG_SND_SOC_TAS2562=m
7287 +CONFIG_SND_SOC_TAS2764=m
7288 +CONFIG_SND_SOC_TAS2770=m
7289 +CONFIG_SND_SOC_TAS5086=m
7290 +CONFIG_SND_SOC_TAS571X=m
7291 +CONFIG_SND_SOC_TAS5720=m
7292 +CONFIG_SND_SOC_TAS6424=m
7293 +CONFIG_SND_SOC_TDA7419=m
7294 +CONFIG_SND_SOC_TFA9879=m
7295 +CONFIG_SND_SOC_TLV320AIC23=m
7296 +CONFIG_SND_SOC_TLV320AIC23_I2C=m
7297 +CONFIG_SND_SOC_TLV320AIC23_SPI=m
7298 +CONFIG_SND_SOC_TLV320AIC31XX=m
7299 +CONFIG_SND_SOC_TLV320AIC32X4=m
7300 +CONFIG_SND_SOC_TLV320AIC32X4_I2C=m
7301 +CONFIG_SND_SOC_TLV320AIC32X4_SPI=m
7302 +CONFIG_SND_SOC_TLV320AIC3X=m
7303 +CONFIG_SND_SOC_TLV320ADCX140=m
7304 +CONFIG_SND_SOC_TS3A227E=m
7305 +CONFIG_SND_SOC_TSCS42XX=m
7306 +CONFIG_SND_SOC_TSCS454=m
7307 +CONFIG_SND_SOC_UDA1334=m
7308 +CONFIG_SND_SOC_WCD9335=m
7309 +CONFIG_SND_SOC_WCD934X=m
7310 +CONFIG_SND_SOC_WM5102=m
7311 +CONFIG_SND_SOC_WM8510=m
7312 +CONFIG_SND_SOC_WM8523=m
7313 +CONFIG_SND_SOC_WM8524=m
7314 +CONFIG_SND_SOC_WM8580=m
7315 +CONFIG_SND_SOC_WM8711=m
7316 +CONFIG_SND_SOC_WM8728=m
7317 +CONFIG_SND_SOC_WM8731=m
7318 +CONFIG_SND_SOC_WM8737=m
7319 +CONFIG_SND_SOC_WM8741=m
7320 +CONFIG_SND_SOC_WM8750=m
7321 +CONFIG_SND_SOC_WM8753=m
7322 +CONFIG_SND_SOC_WM8770=m
7323 +CONFIG_SND_SOC_WM8776=m
7324 +CONFIG_SND_SOC_WM8782=m
7325 +CONFIG_SND_SOC_WM8804=m
7326 +CONFIG_SND_SOC_WM8804_I2C=m
7327 +CONFIG_SND_SOC_WM8804_SPI=m
7328 +CONFIG_SND_SOC_WM8903=m
7329 +CONFIG_SND_SOC_WM8904=m
7330 +CONFIG_SND_SOC_WM8960=m
7331 +CONFIG_SND_SOC_WM8962=m
7332 +CONFIG_SND_SOC_WM8974=m
7333 +CONFIG_SND_SOC_WM8978=m
7334 +CONFIG_SND_SOC_WM8985=m
7335 +CONFIG_SND_SOC_WSA881X=m
7336 +CONFIG_SND_SOC_ZL38060=m
7337 +CONFIG_SND_SOC_ZX_AUD96P22=m
7338 +CONFIG_SND_SOC_MAX9759=m
7339 +CONFIG_SND_SOC_MT6351=m
7340 +CONFIG_SND_SOC_MT6358=m
7341 +CONFIG_SND_SOC_MT6660=m
7342 +CONFIG_SND_SOC_NAU8315=m
7343 +CONFIG_SND_SOC_NAU8540=m
7344 +CONFIG_SND_SOC_NAU8810=m
7345 +CONFIG_SND_SOC_NAU8822=m
7346 +CONFIG_SND_SOC_NAU8824=m
7347 +CONFIG_SND_SOC_NAU8825=m
7348 +CONFIG_SND_SOC_TPA6130A2=m
7349 +CONFIG_SND_SOC_LPASS_WSA_MACRO=m
7350 +CONFIG_SND_SOC_LPASS_VA_MACRO=m
7351 +CONFIG_SND_SOC_LPASS_RX_MACRO=m
7352 +CONFIG_SND_SOC_LPASS_TX_MACRO=m
7353 +# end of CODEC drivers
7355 +CONFIG_SND_SIMPLE_CARD_UTILS=m
7356 +CONFIG_SND_SIMPLE_CARD=m
7357 +CONFIG_SND_X86=y
7358 +CONFIG_HDMI_LPE_AUDIO=m
7359 +CONFIG_SND_SYNTH_EMUX=m
7360 +CONFIG_SND_XEN_FRONTEND=m
7361 +CONFIG_AC97_BUS=m
7364 +# HID support
7366 +CONFIG_HID=m
7367 +CONFIG_HID_BATTERY_STRENGTH=y
7368 +CONFIG_HIDRAW=y
7369 +CONFIG_UHID=m
7370 +CONFIG_HID_GENERIC=m
7373 +# Special HID drivers
7375 +CONFIG_HID_A4TECH=m
7376 +CONFIG_HID_ACCUTOUCH=m
7377 +CONFIG_HID_ACRUX=m
7378 +CONFIG_HID_ACRUX_FF=y
7379 +CONFIG_HID_APPLE=m
7380 +CONFIG_HID_APPLEIR=m
7381 +CONFIG_HID_ASUS=m
7382 +CONFIG_HID_AUREAL=m
7383 +CONFIG_HID_BELKIN=m
7384 +CONFIG_HID_BETOP_FF=m
7385 +CONFIG_HID_BIGBEN_FF=m
7386 +CONFIG_HID_CHERRY=m
7387 +CONFIG_HID_CHICONY=m
7388 +CONFIG_HID_CORSAIR=m
7389 +CONFIG_HID_COUGAR=m
7390 +CONFIG_HID_MACALLY=m
7391 +CONFIG_HID_PRODIKEYS=m
7392 +CONFIG_HID_CMEDIA=m
7393 +CONFIG_HID_CP2112=m
7394 +CONFIG_HID_CREATIVE_SB0540=m
7395 +CONFIG_HID_CYPRESS=m
7396 +CONFIG_HID_DRAGONRISE=m
7397 +CONFIG_DRAGONRISE_FF=y
7398 +CONFIG_HID_EMS_FF=m
7399 +CONFIG_HID_ELAN=m
7400 +CONFIG_HID_ELECOM=m
7401 +CONFIG_HID_ELO=m
7402 +CONFIG_HID_EZKEY=m
7403 +CONFIG_HID_GEMBIRD=m
7404 +CONFIG_HID_GFRM=m
7405 +CONFIG_HID_GLORIOUS=m
7406 +CONFIG_HID_HOLTEK=m
7407 +CONFIG_HOLTEK_FF=y
7408 +CONFIG_HID_GOOGLE_HAMMER=m
7409 +CONFIG_HID_VIVALDI=m
7410 +CONFIG_HID_GT683R=m
7411 +CONFIG_HID_KEYTOUCH=m
7412 +CONFIG_HID_KYE=m
7413 +CONFIG_HID_UCLOGIC=m
7414 +CONFIG_HID_WALTOP=m
7415 +CONFIG_HID_VIEWSONIC=m
7416 +CONFIG_HID_GYRATION=m
7417 +CONFIG_HID_ICADE=m
7418 +CONFIG_HID_ITE=m
7419 +CONFIG_HID_JABRA=m
7420 +CONFIG_HID_TWINHAN=m
7421 +CONFIG_HID_KENSINGTON=m
7422 +CONFIG_HID_LCPOWER=m
7423 +CONFIG_HID_LED=m
7424 +CONFIG_HID_LENOVO=m
7425 +CONFIG_HID_LOGITECH=m
7426 +CONFIG_HID_LOGITECH_DJ=m
7427 +CONFIG_HID_LOGITECH_HIDPP=m
7428 +CONFIG_LOGITECH_FF=y
7429 +CONFIG_LOGIRUMBLEPAD2_FF=y
7430 +CONFIG_LOGIG940_FF=y
7431 +CONFIG_LOGIWHEELS_FF=y
7432 +CONFIG_HID_MAGICMOUSE=m
7433 +CONFIG_HID_MALTRON=m
7434 +CONFIG_HID_MAYFLASH=m
7435 +CONFIG_HID_REDRAGON=m
7436 +CONFIG_HID_MICROSOFT=m
7437 +CONFIG_HID_MONTEREY=m
7438 +CONFIG_HID_MULTITOUCH=m
7439 +CONFIG_HID_NTI=m
7440 +CONFIG_HID_NTRIG=m
7441 +CONFIG_HID_ORTEK=m
7442 +CONFIG_HID_PANTHERLORD=m
7443 +CONFIG_PANTHERLORD_FF=y
7444 +CONFIG_HID_PENMOUNT=m
7445 +CONFIG_HID_PETALYNX=m
7446 +CONFIG_HID_PICOLCD=m
7447 +CONFIG_HID_PICOLCD_FB=y
7448 +CONFIG_HID_PICOLCD_BACKLIGHT=y
7449 +CONFIG_HID_PICOLCD_LCD=y
7450 +CONFIG_HID_PICOLCD_LEDS=y
7451 +CONFIG_HID_PICOLCD_CIR=y
7452 +CONFIG_HID_PLANTRONICS=m
7453 +CONFIG_HID_PLAYSTATION=m
7454 +CONFIG_PLAYSTATION_FF=y
7455 +CONFIG_HID_PRIMAX=m
7456 +CONFIG_HID_RETRODE=m
7457 +CONFIG_HID_ROCCAT=m
7458 +CONFIG_HID_SAITEK=m
7459 +CONFIG_HID_SAMSUNG=m
7460 +CONFIG_HID_SONY=m
7461 +CONFIG_SONY_FF=y
7462 +CONFIG_HID_SPEEDLINK=m
7463 +CONFIG_HID_STEAM=m
7464 +CONFIG_HID_STEELSERIES=m
7465 +CONFIG_HID_SUNPLUS=m
7466 +CONFIG_HID_RMI=m
7467 +CONFIG_HID_GREENASIA=m
7468 +CONFIG_GREENASIA_FF=y
7469 +CONFIG_HID_HYPERV_MOUSE=m
7470 +CONFIG_HID_SMARTJOYPLUS=m
7471 +CONFIG_SMARTJOYPLUS_FF=y
7472 +CONFIG_HID_TIVO=m
7473 +CONFIG_HID_TOPSEED=m
7474 +CONFIG_HID_THINGM=m
7475 +CONFIG_HID_THRUSTMASTER=m
7476 +CONFIG_THRUSTMASTER_FF=y
7477 +CONFIG_HID_UDRAW_PS3=m
7478 +CONFIG_HID_U2FZERO=m
7479 +CONFIG_HID_WACOM=m
7480 +CONFIG_HID_WIIMOTE=m
7481 +CONFIG_HID_XINMO=m
7482 +CONFIG_HID_ZEROPLUS=m
7483 +CONFIG_ZEROPLUS_FF=y
7484 +CONFIG_HID_ZYDACRON=m
7485 +CONFIG_HID_SENSOR_HUB=m
7486 +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m
7487 +CONFIG_HID_ALPS=m
7488 +CONFIG_HID_MCP2221=m
7489 +# end of Special HID drivers
7492 +# USB HID support
7494 +CONFIG_USB_HID=m
7495 +CONFIG_HID_PID=y
7496 +CONFIG_USB_HIDDEV=y
7499 +# USB HID Boot Protocol drivers
7501 +CONFIG_USB_KBD=m
7502 +CONFIG_USB_MOUSE=m
7503 +# end of USB HID Boot Protocol drivers
7504 +# end of USB HID support
7507 +# I2C HID support
7509 +CONFIG_I2C_HID_ACPI=m
7510 +# end of I2C HID support
7512 +CONFIG_I2C_HID_CORE=m
7515 +# Intel ISH HID support
7517 +CONFIG_INTEL_ISH_HID=m
7518 +CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER=m
7519 +# end of Intel ISH HID support
7522 +# AMD SFH HID Support
7524 +CONFIG_AMD_SFH_HID=m
7525 +# end of AMD SFH HID Support
7526 +# end of HID support
7528 +CONFIG_USB_OHCI_LITTLE_ENDIAN=y
7529 +CONFIG_USB_SUPPORT=y
7530 +CONFIG_USB_COMMON=y
7531 +CONFIG_USB_LED_TRIG=y
7532 +CONFIG_USB_ULPI_BUS=m
7533 +CONFIG_USB_CONN_GPIO=m
7534 +CONFIG_USB_ARCH_HAS_HCD=y
7535 +CONFIG_USB=y
7536 +CONFIG_USB_PCI=y
7537 +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
7540 +# Miscellaneous USB options
7542 +CONFIG_USB_DEFAULT_PERSIST=y
7543 +# CONFIG_USB_FEW_INIT_RETRIES is not set
7544 +CONFIG_USB_DYNAMIC_MINORS=y
7545 +# CONFIG_USB_OTG is not set
7546 +# CONFIG_USB_OTG_PRODUCTLIST is not set
7547 +# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set
7548 +CONFIG_USB_LEDS_TRIGGER_USBPORT=m
7549 +CONFIG_USB_AUTOSUSPEND_DELAY=2
7550 +CONFIG_USB_MON=m
7553 +# USB Host Controller Drivers
7555 +CONFIG_USB_C67X00_HCD=m
7556 +CONFIG_USB_XHCI_HCD=y
7557 +CONFIG_USB_XHCI_DBGCAP=y
7558 +CONFIG_USB_XHCI_PCI=m
7559 +CONFIG_USB_XHCI_PCI_RENESAS=m
7560 +CONFIG_USB_XHCI_PLATFORM=m
7561 +CONFIG_USB_EHCI_HCD=y
7562 +CONFIG_USB_EHCI_ROOT_HUB_TT=y
7563 +CONFIG_USB_EHCI_TT_NEWSCHED=y
7564 +CONFIG_USB_EHCI_PCI=y
7565 +CONFIG_USB_EHCI_FSL=m
7566 +CONFIG_USB_EHCI_HCD_PLATFORM=y
7567 +CONFIG_USB_OXU210HP_HCD=m
7568 +CONFIG_USB_ISP116X_HCD=m
7569 +CONFIG_USB_FOTG210_HCD=m
7570 +CONFIG_USB_MAX3421_HCD=m
7571 +CONFIG_USB_OHCI_HCD=y
7572 +CONFIG_USB_OHCI_HCD_PCI=y
7573 +CONFIG_USB_OHCI_HCD_PLATFORM=y
7574 +CONFIG_USB_UHCI_HCD=y
7575 +CONFIG_USB_U132_HCD=m
7576 +CONFIG_USB_SL811_HCD=m
7577 +CONFIG_USB_SL811_HCD_ISO=y
7578 +CONFIG_USB_SL811_CS=m
7579 +CONFIG_USB_R8A66597_HCD=m
7580 +CONFIG_USB_HCD_BCMA=m
7581 +CONFIG_USB_HCD_SSB=m
7582 +# CONFIG_USB_HCD_TEST_MODE is not set
7585 +# USB Device Class drivers
7587 +CONFIG_USB_ACM=m
7588 +CONFIG_USB_PRINTER=m
7589 +CONFIG_USB_WDM=m
7590 +CONFIG_USB_TMC=m
7593 +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
7597 +# also be needed; see USB_STORAGE Help for more info
7599 +CONFIG_USB_STORAGE=m
7600 +# CONFIG_USB_STORAGE_DEBUG is not set
7601 +CONFIG_USB_STORAGE_REALTEK=m
7602 +CONFIG_REALTEK_AUTOPM=y
7603 +CONFIG_USB_STORAGE_DATAFAB=m
7604 +CONFIG_USB_STORAGE_FREECOM=m
7605 +CONFIG_USB_STORAGE_ISD200=m
7606 +CONFIG_USB_STORAGE_USBAT=m
7607 +CONFIG_USB_STORAGE_SDDR09=m
7608 +CONFIG_USB_STORAGE_SDDR55=m
7609 +CONFIG_USB_STORAGE_JUMPSHOT=m
7610 +CONFIG_USB_STORAGE_ALAUDA=m
7611 +CONFIG_USB_STORAGE_ONETOUCH=m
7612 +CONFIG_USB_STORAGE_KARMA=m
7613 +CONFIG_USB_STORAGE_CYPRESS_ATACB=m
7614 +CONFIG_USB_STORAGE_ENE_UB6250=m
7615 +CONFIG_USB_UAS=m
7618 +# USB Imaging devices
7620 +CONFIG_USB_MDC800=m
7621 +CONFIG_USB_MICROTEK=m
7622 +CONFIG_USBIP_CORE=m
7623 +CONFIG_USBIP_VHCI_HCD=m
7624 +CONFIG_USBIP_VHCI_HC_PORTS=8
7625 +CONFIG_USBIP_VHCI_NR_HCS=1
7626 +CONFIG_USBIP_HOST=m
7627 +CONFIG_USBIP_VUDC=m
7628 +# CONFIG_USBIP_DEBUG is not set
7629 +CONFIG_USB_CDNS_SUPPORT=m
7630 +CONFIG_USB_CDNS_HOST=y
7631 +CONFIG_USB_CDNS3=m
7632 +CONFIG_USB_CDNS3_GADGET=y
7633 +CONFIG_USB_CDNS3_HOST=y
7634 +CONFIG_USB_CDNS3_PCI_WRAP=m
7635 +CONFIG_USB_CDNSP_PCI=m
7636 +CONFIG_USB_CDNSP_GADGET=y
7637 +CONFIG_USB_CDNSP_HOST=y
7638 +CONFIG_USB_MUSB_HDRC=m
7639 +# CONFIG_USB_MUSB_HOST is not set
7640 +# CONFIG_USB_MUSB_GADGET is not set
7641 +CONFIG_USB_MUSB_DUAL_ROLE=y
7644 +# Platform Glue Layer
7648 +# MUSB DMA mode
7650 +CONFIG_MUSB_PIO_ONLY=y
7651 +CONFIG_USB_DWC3=m
7652 +CONFIG_USB_DWC3_ULPI=y
7653 +# CONFIG_USB_DWC3_HOST is not set
7654 +# CONFIG_USB_DWC3_GADGET is not set
7655 +CONFIG_USB_DWC3_DUAL_ROLE=y
7658 +# Platform Glue Driver Support
7660 +CONFIG_USB_DWC3_PCI=m
7661 +CONFIG_USB_DWC3_HAPS=m
7662 +CONFIG_USB_DWC2=y
7663 +CONFIG_USB_DWC2_HOST=y
7666 +# Gadget/Dual-role mode requires USB Gadget support to be enabled
7668 +CONFIG_USB_DWC2_PCI=m
7669 +# CONFIG_USB_DWC2_DEBUG is not set
7670 +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set
7671 +CONFIG_USB_CHIPIDEA=m
7672 +CONFIG_USB_CHIPIDEA_UDC=y
7673 +CONFIG_USB_CHIPIDEA_HOST=y
7674 +CONFIG_USB_CHIPIDEA_PCI=m
7675 +CONFIG_USB_CHIPIDEA_MSM=m
7676 +CONFIG_USB_CHIPIDEA_GENERIC=m
7677 +CONFIG_USB_ISP1760=m
7678 +CONFIG_USB_ISP1760_HCD=y
7679 +CONFIG_USB_ISP1761_UDC=y
7680 +# CONFIG_USB_ISP1760_HOST_ROLE is not set
7681 +# CONFIG_USB_ISP1760_GADGET_ROLE is not set
7682 +CONFIG_USB_ISP1760_DUAL_ROLE=y
7685 +# USB port drivers
7687 +CONFIG_USB_USS720=m
7688 +CONFIG_USB_SERIAL=m
7689 +CONFIG_USB_SERIAL_GENERIC=y
7690 +CONFIG_USB_SERIAL_SIMPLE=m
7691 +CONFIG_USB_SERIAL_AIRCABLE=m
7692 +CONFIG_USB_SERIAL_ARK3116=m
7693 +CONFIG_USB_SERIAL_BELKIN=m
7694 +CONFIG_USB_SERIAL_CH341=m
7695 +CONFIG_USB_SERIAL_WHITEHEAT=m
7696 +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
7697 +CONFIG_USB_SERIAL_CP210X=m
7698 +CONFIG_USB_SERIAL_CYPRESS_M8=m
7699 +CONFIG_USB_SERIAL_EMPEG=m
7700 +CONFIG_USB_SERIAL_FTDI_SIO=m
7701 +CONFIG_USB_SERIAL_VISOR=m
7702 +CONFIG_USB_SERIAL_IPAQ=m
7703 +CONFIG_USB_SERIAL_IR=m
7704 +CONFIG_USB_SERIAL_EDGEPORT=m
7705 +CONFIG_USB_SERIAL_EDGEPORT_TI=m
7706 +CONFIG_USB_SERIAL_F81232=m
7707 +CONFIG_USB_SERIAL_F8153X=m
7708 +CONFIG_USB_SERIAL_GARMIN=m
7709 +CONFIG_USB_SERIAL_IPW=m
7710 +CONFIG_USB_SERIAL_IUU=m
7711 +CONFIG_USB_SERIAL_KEYSPAN_PDA=m
7712 +CONFIG_USB_SERIAL_KEYSPAN=m
7713 +CONFIG_USB_SERIAL_KLSI=m
7714 +CONFIG_USB_SERIAL_KOBIL_SCT=m
7715 +CONFIG_USB_SERIAL_MCT_U232=m
7716 +CONFIG_USB_SERIAL_METRO=m
7717 +CONFIG_USB_SERIAL_MOS7720=m
7718 +CONFIG_USB_SERIAL_MOS7715_PARPORT=y
7719 +CONFIG_USB_SERIAL_MOS7840=m
7720 +CONFIG_USB_SERIAL_MXUPORT=m
7721 +CONFIG_USB_SERIAL_NAVMAN=m
7722 +CONFIG_USB_SERIAL_PL2303=m
7723 +CONFIG_USB_SERIAL_OTI6858=m
7724 +CONFIG_USB_SERIAL_QCAUX=m
7725 +CONFIG_USB_SERIAL_QUALCOMM=m
7726 +CONFIG_USB_SERIAL_SPCP8X5=m
7727 +CONFIG_USB_SERIAL_SAFE=m
7728 +# CONFIG_USB_SERIAL_SAFE_PADDED is not set
7729 +CONFIG_USB_SERIAL_SIERRAWIRELESS=m
7730 +CONFIG_USB_SERIAL_SYMBOL=m
7731 +CONFIG_USB_SERIAL_TI=m
7732 +CONFIG_USB_SERIAL_CYBERJACK=m
7733 +CONFIG_USB_SERIAL_WWAN=m
7734 +CONFIG_USB_SERIAL_OPTION=m
7735 +CONFIG_USB_SERIAL_OMNINET=m
7736 +CONFIG_USB_SERIAL_OPTICON=m
7737 +CONFIG_USB_SERIAL_XSENS_MT=m
7738 +CONFIG_USB_SERIAL_WISHBONE=m
7739 +CONFIG_USB_SERIAL_SSU100=m
7740 +CONFIG_USB_SERIAL_QT2=m
7741 +CONFIG_USB_SERIAL_UPD78F0730=m
7742 +CONFIG_USB_SERIAL_XR=m
7743 +CONFIG_USB_SERIAL_DEBUG=m
7746 +# USB Miscellaneous drivers
7748 +CONFIG_USB_EMI62=m
7749 +CONFIG_USB_EMI26=m
7750 +CONFIG_USB_ADUTUX=m
7751 +CONFIG_USB_SEVSEG=m
7752 +CONFIG_USB_LEGOTOWER=m
7753 +CONFIG_USB_LCD=m
7754 +CONFIG_USB_CYPRESS_CY7C63=m
7755 +CONFIG_USB_CYTHERM=m
7756 +CONFIG_USB_IDMOUSE=m
7757 +CONFIG_USB_FTDI_ELAN=m
7758 +CONFIG_USB_APPLEDISPLAY=m
7759 +CONFIG_APPLE_MFI_FASTCHARGE=m
7760 +CONFIG_USB_SISUSBVGA=m
7761 +CONFIG_USB_LD=m
7762 +CONFIG_USB_TRANCEVIBRATOR=m
7763 +CONFIG_USB_IOWARRIOR=m
7764 +CONFIG_USB_TEST=m
7765 +CONFIG_USB_EHSET_TEST_FIXTURE=m
7766 +CONFIG_USB_ISIGHTFW=m
7767 +CONFIG_USB_YUREX=m
7768 +CONFIG_USB_EZUSB_FX2=m
7769 +CONFIG_USB_HUB_USB251XB=m
7770 +CONFIG_USB_HSIC_USB3503=m
7771 +CONFIG_USB_HSIC_USB4604=m
7772 +CONFIG_USB_LINK_LAYER_TEST=m
7773 +CONFIG_USB_CHAOSKEY=m
7774 +CONFIG_USB_ATM=m
7775 +CONFIG_USB_SPEEDTOUCH=m
7776 +CONFIG_USB_CXACRU=m
7777 +CONFIG_USB_UEAGLEATM=m
7778 +CONFIG_USB_XUSBATM=m
7781 +# USB Physical Layer drivers
7783 +CONFIG_USB_PHY=y
7784 +CONFIG_NOP_USB_XCEIV=m
7785 +CONFIG_USB_GPIO_VBUS=m
7786 +CONFIG_TAHVO_USB=m
7787 +CONFIG_TAHVO_USB_HOST_BY_DEFAULT=y
7788 +CONFIG_USB_ISP1301=m
7789 +# end of USB Physical Layer drivers
7791 +CONFIG_USB_GADGET=m
7792 +# CONFIG_USB_GADGET_DEBUG is not set
7793 +# CONFIG_USB_GADGET_DEBUG_FILES is not set
7794 +# CONFIG_USB_GADGET_DEBUG_FS is not set
7795 +CONFIG_USB_GADGET_VBUS_DRAW=2
7796 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
7797 +CONFIG_U_SERIAL_CONSOLE=y
7800 +# USB Peripheral Controller
7802 +CONFIG_USB_FOTG210_UDC=m
7803 +CONFIG_USB_GR_UDC=m
7804 +CONFIG_USB_R8A66597=m
7805 +CONFIG_USB_PXA27X=m
7806 +CONFIG_USB_MV_UDC=m
7807 +CONFIG_USB_MV_U3D=m
7808 +CONFIG_USB_SNP_CORE=m
7809 +# CONFIG_USB_M66592 is not set
7810 +CONFIG_USB_BDC_UDC=m
7811 +CONFIG_USB_AMD5536UDC=m
7812 +CONFIG_USB_NET2272=m
7813 +CONFIG_USB_NET2272_DMA=y
7814 +CONFIG_USB_NET2280=m
7815 +CONFIG_USB_GOKU=m
7816 +CONFIG_USB_EG20T=m
7817 +CONFIG_USB_MAX3420_UDC=m
7818 +# CONFIG_USB_DUMMY_HCD is not set
7819 +# end of USB Peripheral Controller
7821 +CONFIG_USB_LIBCOMPOSITE=m
7822 +CONFIG_USB_F_ACM=m
7823 +CONFIG_USB_F_SS_LB=m
7824 +CONFIG_USB_U_SERIAL=m
7825 +CONFIG_USB_U_ETHER=m
7826 +CONFIG_USB_U_AUDIO=m
7827 +CONFIG_USB_F_SERIAL=m
7828 +CONFIG_USB_F_OBEX=m
7829 +CONFIG_USB_F_NCM=m
7830 +CONFIG_USB_F_ECM=m
7831 +CONFIG_USB_F_PHONET=m
7832 +CONFIG_USB_F_EEM=m
7833 +CONFIG_USB_F_SUBSET=m
7834 +CONFIG_USB_F_RNDIS=m
7835 +CONFIG_USB_F_MASS_STORAGE=m
7836 +CONFIG_USB_F_FS=m
7837 +CONFIG_USB_F_UAC1=m
7838 +CONFIG_USB_F_UAC1_LEGACY=m
7839 +CONFIG_USB_F_UAC2=m
7840 +CONFIG_USB_F_UVC=m
7841 +CONFIG_USB_F_MIDI=m
7842 +CONFIG_USB_F_HID=m
7843 +CONFIG_USB_F_PRINTER=m
7844 +CONFIG_USB_F_TCM=m
7845 +CONFIG_USB_CONFIGFS=m
7846 +CONFIG_USB_CONFIGFS_SERIAL=y
7847 +CONFIG_USB_CONFIGFS_ACM=y
7848 +CONFIG_USB_CONFIGFS_OBEX=y
7849 +CONFIG_USB_CONFIGFS_NCM=y
7850 +CONFIG_USB_CONFIGFS_ECM=y
7851 +CONFIG_USB_CONFIGFS_ECM_SUBSET=y
7852 +CONFIG_USB_CONFIGFS_RNDIS=y
7853 +CONFIG_USB_CONFIGFS_EEM=y
7854 +CONFIG_USB_CONFIGFS_PHONET=y
7855 +CONFIG_USB_CONFIGFS_MASS_STORAGE=y
7856 +CONFIG_USB_CONFIGFS_F_LB_SS=y
7857 +CONFIG_USB_CONFIGFS_F_FS=y
7858 +CONFIG_USB_CONFIGFS_F_UAC1=y
7859 +CONFIG_USB_CONFIGFS_F_UAC1_LEGACY=y
7860 +CONFIG_USB_CONFIGFS_F_UAC2=y
7861 +CONFIG_USB_CONFIGFS_F_MIDI=y
7862 +CONFIG_USB_CONFIGFS_F_HID=y
7863 +CONFIG_USB_CONFIGFS_F_UVC=y
7864 +CONFIG_USB_CONFIGFS_F_PRINTER=y
7865 +CONFIG_USB_CONFIGFS_F_TCM=y
7868 +# USB Gadget precomposed configurations
7870 +CONFIG_USB_ZERO=m
7871 +CONFIG_USB_AUDIO=m
7872 +CONFIG_GADGET_UAC1=y
7873 +# CONFIG_GADGET_UAC1_LEGACY is not set
7874 +CONFIG_USB_ETH=m
7875 +CONFIG_USB_ETH_RNDIS=y
7876 +CONFIG_USB_ETH_EEM=y
7877 +CONFIG_USB_G_NCM=m
7878 +CONFIG_USB_GADGETFS=m
7879 +CONFIG_USB_FUNCTIONFS=m
7880 +CONFIG_USB_FUNCTIONFS_ETH=y
7881 +CONFIG_USB_FUNCTIONFS_RNDIS=y
7882 +CONFIG_USB_FUNCTIONFS_GENERIC=y
7883 +CONFIG_USB_MASS_STORAGE=m
7884 +CONFIG_USB_GADGET_TARGET=m
7885 +CONFIG_USB_G_SERIAL=m
7886 +CONFIG_USB_MIDI_GADGET=m
7887 +CONFIG_USB_G_PRINTER=m
7888 +CONFIG_USB_CDC_COMPOSITE=m
7889 +CONFIG_USB_G_NOKIA=m
7890 +CONFIG_USB_G_ACM_MS=m
7891 +# CONFIG_USB_G_MULTI is not set
7892 +CONFIG_USB_G_HID=m
7893 +CONFIG_USB_G_DBGP=m
7894 +# CONFIG_USB_G_DBGP_PRINTK is not set
7895 +CONFIG_USB_G_DBGP_SERIAL=y
7896 +CONFIG_USB_G_WEBCAM=m
7897 +CONFIG_USB_RAW_GADGET=m
7898 +# end of USB Gadget precomposed configurations
7900 +CONFIG_TYPEC=m
7901 +CONFIG_TYPEC_TCPM=m
7902 +CONFIG_TYPEC_TCPCI=m
7903 +CONFIG_TYPEC_RT1711H=m
7904 +CONFIG_TYPEC_MT6360=m
7905 +CONFIG_TYPEC_TCPCI_MAXIM=m
7906 +CONFIG_TYPEC_FUSB302=m
7907 +# CONFIG_TYPEC_WCOVE is not set
7908 +CONFIG_TYPEC_UCSI=m
7909 +CONFIG_UCSI_CCG=m
7910 +CONFIG_UCSI_ACPI=m
7911 +CONFIG_TYPEC_HD3SS3220=m
7912 +CONFIG_TYPEC_TPS6598X=m
7913 +CONFIG_TYPEC_STUSB160X=m
7916 +# USB Type-C Multiplexer/DeMultiplexer Switch support
7918 +CONFIG_TYPEC_MUX_PI3USB30532=m
7919 +CONFIG_TYPEC_MUX_INTEL_PMC=m
7920 +# end of USB Type-C Multiplexer/DeMultiplexer Switch support
7923 +# USB Type-C Alternate Mode drivers
7925 +CONFIG_TYPEC_DP_ALTMODE=m
7926 +CONFIG_TYPEC_NVIDIA_ALTMODE=m
7927 +# end of USB Type-C Alternate Mode drivers
7929 +CONFIG_USB_ROLE_SWITCH=y
7930 +CONFIG_USB_ROLES_INTEL_XHCI=m
7931 +CONFIG_MMC=y
7932 +CONFIG_MMC_BLOCK=m
7933 +CONFIG_MMC_BLOCK_MINORS=8
7934 +CONFIG_SDIO_UART=m
7935 +# CONFIG_MMC_TEST is not set
7936 +CONFIG_MMC_CRYPTO=y
7939 +# MMC/SD/SDIO Host Controller Drivers
7941 +# CONFIG_MMC_DEBUG is not set
7942 +CONFIG_MMC_SDHCI=m
7943 +CONFIG_MMC_SDHCI_IO_ACCESSORS=y
7944 +CONFIG_MMC_SDHCI_PCI=m
7945 +CONFIG_MMC_RICOH_MMC=y
7946 +CONFIG_MMC_SDHCI_ACPI=m
7947 +CONFIG_MMC_SDHCI_PLTFM=m
7948 +CONFIG_MMC_SDHCI_F_SDH30=m
7949 +CONFIG_MMC_WBSD=m
7950 +CONFIG_MMC_ALCOR=m
7951 +CONFIG_MMC_TIFM_SD=m
7952 +CONFIG_MMC_SPI=m
7953 +CONFIG_MMC_SDRICOH_CS=m
7954 +CONFIG_MMC_CB710=m
7955 +CONFIG_MMC_VIA_SDMMC=m
7956 +CONFIG_MMC_VUB300=m
7957 +CONFIG_MMC_USHC=m
7958 +CONFIG_MMC_USDHI6ROL0=m
7959 +CONFIG_MMC_REALTEK_PCI=m
7960 +CONFIG_MMC_REALTEK_USB=m
7961 +CONFIG_MMC_CQHCI=m
7962 +# CONFIG_MMC_HSQ is not set
7963 +CONFIG_MMC_TOSHIBA_PCI=m
7964 +CONFIG_MMC_MTK=m
7965 +CONFIG_MMC_SDHCI_XENON=m
7966 +CONFIG_MEMSTICK=m
7967 +# CONFIG_MEMSTICK_DEBUG is not set
7970 +# MemoryStick drivers
7972 +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set
7973 +CONFIG_MSPRO_BLOCK=m
7974 +CONFIG_MS_BLOCK=m
7977 +# MemoryStick Host Controller Drivers
7979 +CONFIG_MEMSTICK_TIFM_MS=m
7980 +CONFIG_MEMSTICK_JMICRON_38X=m
7981 +CONFIG_MEMSTICK_R592=m
7982 +CONFIG_MEMSTICK_REALTEK_PCI=m
7983 +CONFIG_MEMSTICK_REALTEK_USB=m
7984 +CONFIG_NEW_LEDS=y
7985 +CONFIG_LEDS_CLASS=y
7986 +CONFIG_LEDS_CLASS_FLASH=m
7987 +CONFIG_LEDS_CLASS_MULTICOLOR=m
7988 +CONFIG_LEDS_BRIGHTNESS_HW_CHANGED=y
7991 +# LED drivers
7993 +CONFIG_LEDS_88PM860X=m
7994 +CONFIG_LEDS_APU=m
7995 +CONFIG_LEDS_AS3645A=m
7996 +CONFIG_LEDS_LM3530=m
7997 +CONFIG_LEDS_LM3532=m
7998 +CONFIG_LEDS_LM3533=m
7999 +CONFIG_LEDS_LM3642=m
8000 +CONFIG_LEDS_LM3601X=m
8001 +CONFIG_LEDS_MT6323=m
8002 +CONFIG_LEDS_PCA9532=m
8003 +CONFIG_LEDS_PCA9532_GPIO=y
8004 +CONFIG_LEDS_GPIO=m
8005 +CONFIG_LEDS_LP3944=m
8006 +CONFIG_LEDS_LP3952=m
8007 +CONFIG_LEDS_LP50XX=m
8008 +CONFIG_LEDS_LP8788=m
8009 +CONFIG_LEDS_CLEVO_MAIL=m
8010 +CONFIG_LEDS_PCA955X=m
8011 +CONFIG_LEDS_PCA955X_GPIO=y
8012 +CONFIG_LEDS_PCA963X=m
8013 +CONFIG_LEDS_WM831X_STATUS=m
8014 +CONFIG_LEDS_WM8350=m
8015 +CONFIG_LEDS_DA903X=m
8016 +CONFIG_LEDS_DA9052=m
8017 +CONFIG_LEDS_DAC124S085=m
8018 +CONFIG_LEDS_PWM=m
8019 +CONFIG_LEDS_REGULATOR=m
8020 +CONFIG_LEDS_BD2802=m
8021 +CONFIG_LEDS_INTEL_SS4200=m
8022 +CONFIG_LEDS_ADP5520=m
8023 +CONFIG_LEDS_MC13783=m
8024 +CONFIG_LEDS_TCA6507=m
8025 +CONFIG_LEDS_TLC591XX=m
8026 +CONFIG_LEDS_MAX8997=m
8027 +CONFIG_LEDS_LM355x=m
8028 +CONFIG_LEDS_MENF21BMC=m
8031 +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)
8033 +CONFIG_LEDS_BLINKM=m
8034 +CONFIG_LEDS_MLXCPLD=m
8035 +CONFIG_LEDS_MLXREG=m
8036 +CONFIG_LEDS_USER=m
8037 +CONFIG_LEDS_NIC78BX=m
8038 +CONFIG_LEDS_TI_LMU_COMMON=m
8039 +CONFIG_LEDS_LM36274=m
8040 +CONFIG_LEDS_TPS6105X=m
8041 +CONFIG_LEDS_SGM3140=m
8044 +# Flash and Torch LED drivers
8046 +CONFIG_LEDS_RT8515=m
8049 +# LED Triggers
8051 +CONFIG_LEDS_TRIGGERS=y
8052 +CONFIG_LEDS_TRIGGER_TIMER=m
8053 +CONFIG_LEDS_TRIGGER_ONESHOT=m
8054 +CONFIG_LEDS_TRIGGER_DISK=y
8055 +CONFIG_LEDS_TRIGGER_MTD=y
8056 +CONFIG_LEDS_TRIGGER_HEARTBEAT=m
8057 +CONFIG_LEDS_TRIGGER_BACKLIGHT=m
8058 +CONFIG_LEDS_TRIGGER_CPU=y
8059 +CONFIG_LEDS_TRIGGER_ACTIVITY=m
8060 +CONFIG_LEDS_TRIGGER_GPIO=m
8061 +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
8064 +# iptables trigger is under Netfilter config (LED target)
8066 +CONFIG_LEDS_TRIGGER_TRANSIENT=m
8067 +CONFIG_LEDS_TRIGGER_CAMERA=m
8068 +CONFIG_LEDS_TRIGGER_PANIC=y
8069 +CONFIG_LEDS_TRIGGER_NETDEV=m
8070 +CONFIG_LEDS_TRIGGER_PATTERN=m
8071 +CONFIG_LEDS_TRIGGER_AUDIO=m
8072 +CONFIG_LEDS_TRIGGER_TTY=m
8075 +# LED Blink
8077 +CONFIG_LEDS_BLINK=y
8078 +# CONFIG_ACCESSIBILITY is not set
8079 +CONFIG_INFINIBAND=m
8080 +CONFIG_INFINIBAND_USER_MAD=m
8081 +CONFIG_INFINIBAND_USER_ACCESS=m
8082 +CONFIG_INFINIBAND_USER_MEM=y
8083 +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y
8084 +CONFIG_INFINIBAND_ADDR_TRANS=y
8085 +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y
8086 +CONFIG_INFINIBAND_VIRT_DMA=y
8087 +CONFIG_INFINIBAND_MTHCA=m
8088 +# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
8089 +CONFIG_INFINIBAND_QIB=m
8090 +CONFIG_INFINIBAND_QIB_DCA=y
8091 +CONFIG_INFINIBAND_CXGB4=m
8092 +CONFIG_INFINIBAND_EFA=m
8093 +CONFIG_INFINIBAND_I40IW=m
8094 +CONFIG_MLX4_INFINIBAND=m
8095 +CONFIG_MLX5_INFINIBAND=m
8096 +CONFIG_INFINIBAND_OCRDMA=m
8097 +CONFIG_INFINIBAND_VMWARE_PVRDMA=m
8098 +CONFIG_INFINIBAND_USNIC=m
8099 +CONFIG_INFINIBAND_BNXT_RE=m
8100 +CONFIG_INFINIBAND_HFI1=m
8101 +# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set
8102 +# CONFIG_SDMA_VERBOSITY is not set
8103 +CONFIG_INFINIBAND_QEDR=m
8104 +CONFIG_INFINIBAND_RDMAVT=m
8105 +CONFIG_RDMA_RXE=m
8106 +CONFIG_RDMA_SIW=m
8107 +CONFIG_INFINIBAND_IPOIB=m
8108 +CONFIG_INFINIBAND_IPOIB_CM=y
8109 +# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
8110 +CONFIG_INFINIBAND_SRP=m
8111 +CONFIG_INFINIBAND_SRPT=m
8112 +CONFIG_INFINIBAND_ISER=m
8113 +CONFIG_INFINIBAND_ISERT=m
8114 +CONFIG_INFINIBAND_RTRS=m
8115 +CONFIG_INFINIBAND_RTRS_CLIENT=m
8116 +CONFIG_INFINIBAND_RTRS_SERVER=m
8117 +CONFIG_INFINIBAND_OPA_VNIC=m
8118 +CONFIG_EDAC_ATOMIC_SCRUB=y
8119 +CONFIG_EDAC_SUPPORT=y
8120 +CONFIG_EDAC=y
8121 +# CONFIG_EDAC_LEGACY_SYSFS is not set
8122 +# CONFIG_EDAC_DEBUG is not set
8123 +CONFIG_EDAC_DECODE_MCE=m
8124 +CONFIG_EDAC_GHES=y
8125 +CONFIG_EDAC_AMD64=m
8126 +CONFIG_EDAC_E752X=m
8127 +CONFIG_EDAC_I82975X=m
8128 +CONFIG_EDAC_I3000=m
8129 +CONFIG_EDAC_I3200=m
8130 +CONFIG_EDAC_IE31200=m
8131 +CONFIG_EDAC_X38=m
8132 +CONFIG_EDAC_I5400=m
8133 +CONFIG_EDAC_I7CORE=m
8134 +CONFIG_EDAC_I5000=m
8135 +CONFIG_EDAC_I5100=m
8136 +CONFIG_EDAC_I7300=m
8137 +CONFIG_EDAC_SBRIDGE=m
8138 +CONFIG_EDAC_SKX=m
8139 +CONFIG_EDAC_I10NM=m
8140 +CONFIG_EDAC_PND2=m
8141 +CONFIG_EDAC_IGEN6=m
8142 +CONFIG_RTC_LIB=y
8143 +CONFIG_RTC_MC146818_LIB=y
8144 +CONFIG_RTC_CLASS=y
8145 +CONFIG_RTC_HCTOSYS=y
8146 +CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
8147 +CONFIG_RTC_SYSTOHC=y
8148 +CONFIG_RTC_SYSTOHC_DEVICE="rtc0"
8149 +# CONFIG_RTC_DEBUG is not set
8150 +CONFIG_RTC_NVMEM=y
8153 +# RTC interfaces
8155 +CONFIG_RTC_INTF_SYSFS=y
8156 +CONFIG_RTC_INTF_PROC=y
8157 +CONFIG_RTC_INTF_DEV=y
8158 +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
8159 +# CONFIG_RTC_DRV_TEST is not set
8162 +# I2C RTC drivers
8164 +CONFIG_RTC_DRV_88PM860X=m
8165 +CONFIG_RTC_DRV_88PM80X=m
8166 +CONFIG_RTC_DRV_ABB5ZES3=m
8167 +CONFIG_RTC_DRV_ABEOZ9=m
8168 +CONFIG_RTC_DRV_ABX80X=m
8169 +CONFIG_RTC_DRV_DS1307=m
8170 +CONFIG_RTC_DRV_DS1307_CENTURY=y
8171 +CONFIG_RTC_DRV_DS1374=m
8172 +CONFIG_RTC_DRV_DS1374_WDT=y
8173 +CONFIG_RTC_DRV_DS1672=m
8174 +CONFIG_RTC_DRV_LP8788=m
8175 +CONFIG_RTC_DRV_MAX6900=m
8176 +CONFIG_RTC_DRV_MAX8907=m
8177 +CONFIG_RTC_DRV_MAX8925=m
8178 +CONFIG_RTC_DRV_MAX8998=m
8179 +CONFIG_RTC_DRV_MAX8997=m
8180 +CONFIG_RTC_DRV_RS5C372=m
8181 +CONFIG_RTC_DRV_ISL1208=m
8182 +CONFIG_RTC_DRV_ISL12022=m
8183 +CONFIG_RTC_DRV_X1205=m
8184 +CONFIG_RTC_DRV_PCF8523=m
8185 +CONFIG_RTC_DRV_PCF85063=m
8186 +CONFIG_RTC_DRV_PCF85363=m
8187 +CONFIG_RTC_DRV_PCF8563=m
8188 +CONFIG_RTC_DRV_PCF8583=m
8189 +CONFIG_RTC_DRV_M41T80=m
8190 +CONFIG_RTC_DRV_M41T80_WDT=y
8191 +CONFIG_RTC_DRV_BQ32K=m
8192 +CONFIG_RTC_DRV_PALMAS=m
8193 +CONFIG_RTC_DRV_TPS6586X=m
8194 +CONFIG_RTC_DRV_TPS65910=m
8195 +CONFIG_RTC_DRV_TPS80031=m
8196 +CONFIG_RTC_DRV_RC5T583=m
8197 +CONFIG_RTC_DRV_S35390A=m
8198 +CONFIG_RTC_DRV_FM3130=m
8199 +CONFIG_RTC_DRV_RX8010=m
8200 +CONFIG_RTC_DRV_RX8581=m
8201 +CONFIG_RTC_DRV_RX8025=m
8202 +CONFIG_RTC_DRV_EM3027=m
8203 +CONFIG_RTC_DRV_RV3028=m
8204 +CONFIG_RTC_DRV_RV3032=m
8205 +CONFIG_RTC_DRV_RV8803=m
8206 +CONFIG_RTC_DRV_S5M=m
8207 +CONFIG_RTC_DRV_SD3078=m
8210 +# SPI RTC drivers
8212 +CONFIG_RTC_DRV_M41T93=m
8213 +CONFIG_RTC_DRV_M41T94=m
8214 +CONFIG_RTC_DRV_DS1302=m
8215 +CONFIG_RTC_DRV_DS1305=m
8216 +CONFIG_RTC_DRV_DS1343=m
8217 +CONFIG_RTC_DRV_DS1347=m
8218 +CONFIG_RTC_DRV_DS1390=m
8219 +CONFIG_RTC_DRV_MAX6916=m
8220 +CONFIG_RTC_DRV_R9701=m
8221 +CONFIG_RTC_DRV_RX4581=m
8222 +CONFIG_RTC_DRV_RS5C348=m
8223 +CONFIG_RTC_DRV_MAX6902=m
8224 +CONFIG_RTC_DRV_PCF2123=m
8225 +CONFIG_RTC_DRV_MCP795=m
8226 +CONFIG_RTC_I2C_AND_SPI=y
8229 +# SPI and I2C RTC drivers
8231 +CONFIG_RTC_DRV_DS3232=m
8232 +CONFIG_RTC_DRV_DS3232_HWMON=y
8233 +CONFIG_RTC_DRV_PCF2127=m
8234 +CONFIG_RTC_DRV_RV3029C2=m
8235 +CONFIG_RTC_DRV_RV3029_HWMON=y
8236 +CONFIG_RTC_DRV_RX6110=m
8239 +# Platform RTC drivers
8241 +CONFIG_RTC_DRV_CMOS=y
8242 +CONFIG_RTC_DRV_DS1286=m
8243 +CONFIG_RTC_DRV_DS1511=m
8244 +CONFIG_RTC_DRV_DS1553=m
8245 +CONFIG_RTC_DRV_DS1685_FAMILY=m
8246 +CONFIG_RTC_DRV_DS1685=y
8247 +# CONFIG_RTC_DRV_DS1689 is not set
8248 +# CONFIG_RTC_DRV_DS17285 is not set
8249 +# CONFIG_RTC_DRV_DS17485 is not set
8250 +# CONFIG_RTC_DRV_DS17885 is not set
8251 +CONFIG_RTC_DRV_DS1742=m
8252 +CONFIG_RTC_DRV_DS2404=m
8253 +CONFIG_RTC_DRV_DA9052=m
8254 +CONFIG_RTC_DRV_DA9055=m
8255 +CONFIG_RTC_DRV_DA9063=m
8256 +CONFIG_RTC_DRV_STK17TA8=m
8257 +CONFIG_RTC_DRV_M48T86=m
8258 +CONFIG_RTC_DRV_M48T35=m
8259 +CONFIG_RTC_DRV_M48T59=m
8260 +CONFIG_RTC_DRV_MSM6242=m
8261 +CONFIG_RTC_DRV_BQ4802=m
8262 +CONFIG_RTC_DRV_RP5C01=m
8263 +CONFIG_RTC_DRV_V3020=m
8264 +CONFIG_RTC_DRV_WM831X=m
8265 +CONFIG_RTC_DRV_WM8350=m
8266 +CONFIG_RTC_DRV_PCF50633=m
8267 +CONFIG_RTC_DRV_CROS_EC=m
8270 +# on-CPU RTC drivers
8272 +CONFIG_RTC_DRV_FTRTC010=m
8273 +CONFIG_RTC_DRV_PCAP=m
8274 +CONFIG_RTC_DRV_MC13XXX=m
8275 +CONFIG_RTC_DRV_MT6397=m
8278 +# HID Sensor RTC drivers
8280 +CONFIG_RTC_DRV_HID_SENSOR_TIME=m
8281 +CONFIG_RTC_DRV_WILCO_EC=m
8282 +CONFIG_DMADEVICES=y
8283 +# CONFIG_DMADEVICES_DEBUG is not set
8286 +# DMA Devices
8288 +CONFIG_DMA_ENGINE=y
8289 +CONFIG_DMA_VIRTUAL_CHANNELS=y
8290 +CONFIG_DMA_ACPI=y
8291 +CONFIG_ALTERA_MSGDMA=m
8292 +CONFIG_INTEL_IDMA64=m
8293 +CONFIG_INTEL_IDXD=m
8294 +CONFIG_INTEL_IDXD_SVM=y
8295 +CONFIG_INTEL_IOATDMA=m
8296 +CONFIG_PLX_DMA=m
8297 +CONFIG_XILINX_ZYNQMP_DPDMA=m
8298 +CONFIG_QCOM_HIDMA_MGMT=m
8299 +CONFIG_QCOM_HIDMA=m
8300 +CONFIG_DW_DMAC_CORE=m
8301 +CONFIG_DW_DMAC=m
8302 +CONFIG_DW_DMAC_PCI=m
8303 +CONFIG_DW_EDMA=m
8304 +CONFIG_DW_EDMA_PCIE=m
8305 +CONFIG_HSU_DMA=m
8306 +CONFIG_SF_PDMA=m
8307 +CONFIG_INTEL_LDMA=y
8310 +# DMA Clients
8312 +CONFIG_ASYNC_TX_DMA=y
8313 +# CONFIG_DMATEST is not set
8314 +CONFIG_DMA_ENGINE_RAID=y
8317 +# DMABUF options
8319 +CONFIG_SYNC_FILE=y
8320 +CONFIG_SW_SYNC=y
8321 +CONFIG_UDMABUF=y
8322 +# CONFIG_DMABUF_MOVE_NOTIFY is not set
8323 +# CONFIG_DMABUF_DEBUG is not set
8324 +# CONFIG_DMABUF_SELFTESTS is not set
8325 +CONFIG_DMABUF_HEAPS=y
8326 +CONFIG_DMABUF_HEAPS_SYSTEM=y
8327 +# end of DMABUF options
8329 +CONFIG_DCA=m
8330 +CONFIG_AUXDISPLAY=y
8331 +CONFIG_CHARLCD=m
8332 +CONFIG_HD44780_COMMON=m
8333 +CONFIG_HD44780=m
8334 +CONFIG_KS0108=m
8335 +CONFIG_KS0108_PORT=0x378
8336 +CONFIG_KS0108_DELAY=2
8337 +CONFIG_CFAG12864B=m
8338 +CONFIG_CFAG12864B_RATE=20
8339 +CONFIG_IMG_ASCII_LCD=m
8340 +CONFIG_LCD2S=m
8341 +CONFIG_PARPORT_PANEL=m
8342 +CONFIG_PANEL_PARPORT=0
8343 +CONFIG_PANEL_PROFILE=5
8344 +# CONFIG_PANEL_CHANGE_MESSAGE is not set
8345 +# CONFIG_CHARLCD_BL_OFF is not set
8346 +# CONFIG_CHARLCD_BL_ON is not set
8347 +CONFIG_CHARLCD_BL_FLASH=y
8348 +CONFIG_PANEL=m
8349 +CONFIG_UIO=m
8350 +CONFIG_UIO_CIF=m
8351 +CONFIG_UIO_PDRV_GENIRQ=m
8352 +CONFIG_UIO_DMEM_GENIRQ=m
8353 +CONFIG_UIO_AEC=m
8354 +CONFIG_UIO_SERCOS3=m
8355 +CONFIG_UIO_PCI_GENERIC=m
8356 +CONFIG_UIO_NETX=m
8357 +CONFIG_UIO_PRUSS=m
8358 +CONFIG_UIO_MF624=m
8359 +CONFIG_UIO_HV_GENERIC=m
8360 +CONFIG_VFIO_IOMMU_TYPE1=y
8361 +CONFIG_VFIO_VIRQFD=y
8362 +CONFIG_VFIO=y
8363 +CONFIG_VFIO_NOIOMMU=y
8364 +CONFIG_VFIO_PCI=y
8365 +CONFIG_VFIO_PCI_VGA=y
8366 +CONFIG_VFIO_PCI_MMAP=y
8367 +CONFIG_VFIO_PCI_INTX=y
8368 +CONFIG_VFIO_PCI_IGD=y
8369 +CONFIG_VFIO_MDEV=m
8370 +CONFIG_VFIO_MDEV_DEVICE=m
8371 +CONFIG_IRQ_BYPASS_MANAGER=y
8372 +CONFIG_VIRT_DRIVERS=y
8373 +CONFIG_VBOXGUEST=m
8374 +CONFIG_NITRO_ENCLAVES=m
8375 +CONFIG_ACRN_HSM=m
8376 +CONFIG_VIRTIO=y
8377 +CONFIG_VIRTIO_PCI_LIB=y
8378 +CONFIG_VIRTIO_MENU=y
8379 +CONFIG_VIRTIO_PCI=y
8380 +CONFIG_VIRTIO_PCI_LEGACY=y
8381 +CONFIG_VIRTIO_VDPA=m
8382 +CONFIG_VIRTIO_PMEM=m
8383 +CONFIG_VIRTIO_BALLOON=y
8384 +CONFIG_VIRTIO_MEM=m
8385 +CONFIG_VIRTIO_INPUT=m
8386 +CONFIG_VIRTIO_MMIO=y
8387 +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
8388 +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m
8389 +CONFIG_VDPA=m
8390 +CONFIG_VDPA_SIM=m
8391 +CONFIG_VDPA_SIM_NET=m
8392 +CONFIG_IFCVF=m
8393 +CONFIG_MLX5_VDPA=y
8394 +CONFIG_MLX5_VDPA_NET=m
8395 +CONFIG_VHOST_IOTLB=m
8396 +CONFIG_VHOST_RING=m
8397 +CONFIG_VHOST=m
8398 +CONFIG_VHOST_MENU=y
8399 +CONFIG_VHOST_NET=m
8400 +CONFIG_VHOST_SCSI=m
8401 +CONFIG_VHOST_VSOCK=m
8402 +CONFIG_VHOST_VDPA=m
8403 +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set
8406 +# Microsoft Hyper-V guest support
8408 +CONFIG_HYPERV=m
8409 +CONFIG_HYPERV_TIMER=y
8410 +CONFIG_HYPERV_UTILS=m
8411 +CONFIG_HYPERV_BALLOON=m
8412 +# end of Microsoft Hyper-V guest support
8415 +# Xen driver support
8417 +CONFIG_XEN_BALLOON=y
8418 +CONFIG_XEN_BALLOON_MEMORY_HOTPLUG=y
8419 +CONFIG_XEN_MEMORY_HOTPLUG_LIMIT=512
8420 +CONFIG_XEN_SCRUB_PAGES_DEFAULT=y
8421 +CONFIG_XEN_DEV_EVTCHN=m
8422 +CONFIG_XEN_BACKEND=y
8423 +CONFIG_XENFS=m
8424 +CONFIG_XEN_COMPAT_XENFS=y
8425 +CONFIG_XEN_SYS_HYPERVISOR=y
8426 +CONFIG_XEN_XENBUS_FRONTEND=y
8427 +CONFIG_XEN_GNTDEV=m
8428 +CONFIG_XEN_GNTDEV_DMABUF=y
8429 +CONFIG_XEN_GRANT_DEV_ALLOC=m
8430 +CONFIG_XEN_GRANT_DMA_ALLOC=y
8431 +CONFIG_SWIOTLB_XEN=y
8432 +CONFIG_XEN_PCIDEV_BACKEND=m
8433 +CONFIG_XEN_PVCALLS_FRONTEND=m
8434 +# CONFIG_XEN_PVCALLS_BACKEND is not set
8435 +CONFIG_XEN_SCSI_BACKEND=m
8436 +CONFIG_XEN_PRIVCMD=m
8437 +CONFIG_XEN_ACPI_PROCESSOR=y
8438 +CONFIG_XEN_MCE_LOG=y
8439 +CONFIG_XEN_HAVE_PVMMU=y
8440 +CONFIG_XEN_EFI=y
8441 +CONFIG_XEN_AUTO_XLATE=y
8442 +CONFIG_XEN_ACPI=y
8443 +CONFIG_XEN_SYMS=y
8444 +CONFIG_XEN_HAVE_VPMU=y
8445 +CONFIG_XEN_FRONT_PGDIR_SHBUF=m
8446 +CONFIG_XEN_UNPOPULATED_ALLOC=y
8447 +# end of Xen driver support
8449 +CONFIG_GREYBUS=m
8450 +CONFIG_GREYBUS_ES2=m
8451 +CONFIG_STAGING=y
8452 +CONFIG_PRISM2_USB=m
8453 +CONFIG_COMEDI=m
8454 +# CONFIG_COMEDI_DEBUG is not set
8455 +CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048
8456 +CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480
8457 +CONFIG_COMEDI_MISC_DRIVERS=y
8458 +CONFIG_COMEDI_BOND=m
8459 +CONFIG_COMEDI_TEST=m
8460 +CONFIG_COMEDI_PARPORT=m
8461 +CONFIG_COMEDI_ISA_DRIVERS=y
8462 +CONFIG_COMEDI_PCL711=m
8463 +CONFIG_COMEDI_PCL724=m
8464 +CONFIG_COMEDI_PCL726=m
8465 +CONFIG_COMEDI_PCL730=m
8466 +CONFIG_COMEDI_PCL812=m
8467 +CONFIG_COMEDI_PCL816=m
8468 +CONFIG_COMEDI_PCL818=m
8469 +CONFIG_COMEDI_PCM3724=m
8470 +CONFIG_COMEDI_AMPLC_DIO200_ISA=m
8471 +CONFIG_COMEDI_AMPLC_PC236_ISA=m
8472 +CONFIG_COMEDI_AMPLC_PC263_ISA=m
8473 +CONFIG_COMEDI_RTI800=m
8474 +CONFIG_COMEDI_RTI802=m
8475 +CONFIG_COMEDI_DAC02=m
8476 +CONFIG_COMEDI_DAS16M1=m
8477 +CONFIG_COMEDI_DAS08_ISA=m
8478 +CONFIG_COMEDI_DAS16=m
8479 +CONFIG_COMEDI_DAS800=m
8480 +CONFIG_COMEDI_DAS1800=m
8481 +CONFIG_COMEDI_DAS6402=m
8482 +CONFIG_COMEDI_DT2801=m
8483 +CONFIG_COMEDI_DT2811=m
8484 +CONFIG_COMEDI_DT2814=m
8485 +CONFIG_COMEDI_DT2815=m
8486 +CONFIG_COMEDI_DT2817=m
8487 +CONFIG_COMEDI_DT282X=m
8488 +CONFIG_COMEDI_DMM32AT=m
8489 +CONFIG_COMEDI_FL512=m
8490 +CONFIG_COMEDI_AIO_AIO12_8=m
8491 +CONFIG_COMEDI_AIO_IIRO_16=m
8492 +CONFIG_COMEDI_II_PCI20KC=m
8493 +CONFIG_COMEDI_C6XDIGIO=m
8494 +CONFIG_COMEDI_MPC624=m
8495 +CONFIG_COMEDI_ADQ12B=m
8496 +CONFIG_COMEDI_NI_AT_A2150=m
8497 +CONFIG_COMEDI_NI_AT_AO=m
8498 +CONFIG_COMEDI_NI_ATMIO=m
8499 +CONFIG_COMEDI_NI_ATMIO16D=m
8500 +CONFIG_COMEDI_NI_LABPC_ISA=m
8501 +CONFIG_COMEDI_PCMAD=m
8502 +CONFIG_COMEDI_PCMDA12=m
8503 +CONFIG_COMEDI_PCMMIO=m
8504 +CONFIG_COMEDI_PCMUIO=m
8505 +CONFIG_COMEDI_MULTIQ3=m
8506 +CONFIG_COMEDI_S526=m
8507 +CONFIG_COMEDI_PCI_DRIVERS=m
8508 +CONFIG_COMEDI_8255_PCI=m
8509 +CONFIG_COMEDI_ADDI_WATCHDOG=m
8510 +CONFIG_COMEDI_ADDI_APCI_1032=m
8511 +CONFIG_COMEDI_ADDI_APCI_1500=m
8512 +CONFIG_COMEDI_ADDI_APCI_1516=m
8513 +CONFIG_COMEDI_ADDI_APCI_1564=m
8514 +CONFIG_COMEDI_ADDI_APCI_16XX=m
8515 +CONFIG_COMEDI_ADDI_APCI_2032=m
8516 +CONFIG_COMEDI_ADDI_APCI_2200=m
8517 +CONFIG_COMEDI_ADDI_APCI_3120=m
8518 +CONFIG_COMEDI_ADDI_APCI_3501=m
8519 +CONFIG_COMEDI_ADDI_APCI_3XXX=m
8520 +CONFIG_COMEDI_ADL_PCI6208=m
8521 +CONFIG_COMEDI_ADL_PCI7X3X=m
8522 +CONFIG_COMEDI_ADL_PCI8164=m
8523 +CONFIG_COMEDI_ADL_PCI9111=m
8524 +CONFIG_COMEDI_ADL_PCI9118=m
8525 +CONFIG_COMEDI_ADV_PCI1710=m
8526 +CONFIG_COMEDI_ADV_PCI1720=m
8527 +CONFIG_COMEDI_ADV_PCI1723=m
8528 +CONFIG_COMEDI_ADV_PCI1724=m
8529 +CONFIG_COMEDI_ADV_PCI1760=m
8530 +CONFIG_COMEDI_ADV_PCI_DIO=m
8531 +CONFIG_COMEDI_AMPLC_DIO200_PCI=m
8532 +CONFIG_COMEDI_AMPLC_PC236_PCI=m
8533 +CONFIG_COMEDI_AMPLC_PC263_PCI=m
8534 +CONFIG_COMEDI_AMPLC_PCI224=m
8535 +CONFIG_COMEDI_AMPLC_PCI230=m
8536 +CONFIG_COMEDI_CONTEC_PCI_DIO=m
8537 +CONFIG_COMEDI_DAS08_PCI=m
8538 +CONFIG_COMEDI_DT3000=m
8539 +CONFIG_COMEDI_DYNA_PCI10XX=m
8540 +CONFIG_COMEDI_GSC_HPDI=m
8541 +CONFIG_COMEDI_MF6X4=m
8542 +CONFIG_COMEDI_ICP_MULTI=m
8543 +CONFIG_COMEDI_DAQBOARD2000=m
8544 +CONFIG_COMEDI_JR3_PCI=m
8545 +CONFIG_COMEDI_KE_COUNTER=m
8546 +CONFIG_COMEDI_CB_PCIDAS64=m
8547 +CONFIG_COMEDI_CB_PCIDAS=m
8548 +CONFIG_COMEDI_CB_PCIDDA=m
8549 +CONFIG_COMEDI_CB_PCIMDAS=m
8550 +CONFIG_COMEDI_CB_PCIMDDA=m
8551 +CONFIG_COMEDI_ME4000=m
8552 +CONFIG_COMEDI_ME_DAQ=m
8553 +CONFIG_COMEDI_NI_6527=m
8554 +CONFIG_COMEDI_NI_65XX=m
8555 +CONFIG_COMEDI_NI_660X=m
8556 +CONFIG_COMEDI_NI_670X=m
8557 +CONFIG_COMEDI_NI_LABPC_PCI=m
8558 +CONFIG_COMEDI_NI_PCIDIO=m
8559 +CONFIG_COMEDI_NI_PCIMIO=m
8560 +CONFIG_COMEDI_RTD520=m
8561 +CONFIG_COMEDI_S626=m
8562 +CONFIG_COMEDI_MITE=m
8563 +CONFIG_COMEDI_NI_TIOCMD=m
8564 +CONFIG_COMEDI_PCMCIA_DRIVERS=m
8565 +CONFIG_COMEDI_CB_DAS16_CS=m
8566 +CONFIG_COMEDI_DAS08_CS=m
8567 +CONFIG_COMEDI_NI_DAQ_700_CS=m
8568 +CONFIG_COMEDI_NI_DAQ_DIO24_CS=m
8569 +CONFIG_COMEDI_NI_LABPC_CS=m
8570 +CONFIG_COMEDI_NI_MIO_CS=m
8571 +CONFIG_COMEDI_QUATECH_DAQP_CS=m
8572 +CONFIG_COMEDI_USB_DRIVERS=m
8573 +CONFIG_COMEDI_DT9812=m
8574 +CONFIG_COMEDI_NI_USB6501=m
8575 +CONFIG_COMEDI_USBDUX=m
8576 +CONFIG_COMEDI_USBDUXFAST=m
8577 +CONFIG_COMEDI_USBDUXSIGMA=m
8578 +CONFIG_COMEDI_VMK80XX=m
8579 +CONFIG_COMEDI_8254=m
8580 +CONFIG_COMEDI_8255=m
8581 +CONFIG_COMEDI_8255_SA=m
8582 +CONFIG_COMEDI_KCOMEDILIB=m
8583 +CONFIG_COMEDI_AMPLC_DIO200=m
8584 +CONFIG_COMEDI_AMPLC_PC236=m
8585 +CONFIG_COMEDI_DAS08=m
8586 +CONFIG_COMEDI_ISADMA=m
8587 +CONFIG_COMEDI_NI_LABPC=m
8588 +CONFIG_COMEDI_NI_LABPC_ISADMA=m
8589 +CONFIG_COMEDI_NI_TIO=m
8590 +CONFIG_COMEDI_NI_ROUTING=m
8591 +CONFIG_RTL8192U=m
8592 +CONFIG_RTLLIB=m
8593 +CONFIG_RTLLIB_CRYPTO_CCMP=m
8594 +CONFIG_RTLLIB_CRYPTO_TKIP=m
8595 +CONFIG_RTLLIB_CRYPTO_WEP=m
8596 +CONFIG_RTL8192E=m
8597 +CONFIG_RTL8723BS=m
8598 +CONFIG_R8712U=m
8599 +CONFIG_R8188EU=m
8600 +CONFIG_88EU_AP_MODE=y
8601 +CONFIG_RTS5208=m
8602 +CONFIG_VT6655=m
8603 +CONFIG_VT6656=m
8606 +# IIO staging drivers
8610 +# Accelerometers
8612 +CONFIG_ADIS16203=m
8613 +CONFIG_ADIS16240=m
8614 +# end of Accelerometers
8617 +# Analog to digital converters
8619 +CONFIG_AD7816=m
8620 +CONFIG_AD7280=m
8621 +# end of Analog to digital converters
8624 +# Analog digital bi-direction converters
8626 +CONFIG_ADT7316=m
8627 +CONFIG_ADT7316_SPI=m
8628 +CONFIG_ADT7316_I2C=m
8629 +# end of Analog digital bi-direction converters
8632 +# Capacitance to digital converters
8634 +CONFIG_AD7150=m
8635 +CONFIG_AD7746=m
8636 +# end of Capacitance to digital converters
8639 +# Direct Digital Synthesis
8641 +CONFIG_AD9832=m
8642 +CONFIG_AD9834=m
8643 +# end of Direct Digital Synthesis
8646 +# Network Analyzer, Impedance Converters
8648 +CONFIG_AD5933=m
8649 +# end of Network Analyzer, Impedance Converters
8652 +# Active energy metering IC
8654 +CONFIG_ADE7854=m
8655 +CONFIG_ADE7854_I2C=m
8656 +CONFIG_ADE7854_SPI=m
8657 +# end of Active energy metering IC
8660 +# Resolver to digital converters
8662 +CONFIG_AD2S1210=m
8663 +# end of Resolver to digital converters
8664 +# end of IIO staging drivers
8666 +CONFIG_FB_SM750=m
8667 +CONFIG_STAGING_MEDIA=y
8668 +CONFIG_INTEL_ATOMISP=y
8669 +CONFIG_VIDEO_ATOMISP=m
8670 +# CONFIG_VIDEO_ATOMISP_ISP2401 is not set
8671 +CONFIG_VIDEO_ATOMISP_OV2722=m
8672 +CONFIG_VIDEO_ATOMISP_GC2235=m
8673 +CONFIG_VIDEO_ATOMISP_MSRLIST_HELPER=m
8674 +CONFIG_VIDEO_ATOMISP_MT9M114=m
8675 +CONFIG_VIDEO_ATOMISP_GC0310=m
8676 +CONFIG_VIDEO_ATOMISP_OV2680=m
8677 +CONFIG_VIDEO_ATOMISP_OV5693=m
8678 +CONFIG_VIDEO_ATOMISP_LM3554=m
8679 +CONFIG_VIDEO_ZORAN=m
8680 +CONFIG_VIDEO_ZORAN_DC30=m
8681 +CONFIG_VIDEO_ZORAN_ZR36060=m
8682 +CONFIG_VIDEO_ZORAN_BUZ=m
8683 +CONFIG_VIDEO_ZORAN_DC10=m
8684 +CONFIG_VIDEO_ZORAN_LML33=m
8685 +CONFIG_VIDEO_ZORAN_LML33R10=m
8686 +CONFIG_VIDEO_ZORAN_AVS6EYES=m
8687 +CONFIG_VIDEO_IPU3_IMGU=m
8690 +# Android
8692 +CONFIG_ASHMEM=m
8693 +# end of Android
8695 +CONFIG_LTE_GDM724X=m
8696 +CONFIG_FIREWIRE_SERIAL=m
8697 +CONFIG_FWTTY_MAX_TOTAL_PORTS=64
8698 +CONFIG_FWTTY_MAX_CARD_PORTS=32
8699 +CONFIG_GS_FPGABOOT=m
8700 +CONFIG_UNISYSSPAR=y
8701 +CONFIG_UNISYS_VISORNIC=m
8702 +CONFIG_UNISYS_VISORINPUT=m
8703 +CONFIG_UNISYS_VISORHBA=m
8704 +CONFIG_FB_TFT=m
8705 +CONFIG_FB_TFT_AGM1264K_FL=m
8706 +CONFIG_FB_TFT_BD663474=m
8707 +CONFIG_FB_TFT_HX8340BN=m
8708 +CONFIG_FB_TFT_HX8347D=m
8709 +CONFIG_FB_TFT_HX8353D=m
8710 +CONFIG_FB_TFT_HX8357D=m
8711 +CONFIG_FB_TFT_ILI9163=m
8712 +CONFIG_FB_TFT_ILI9320=m
8713 +CONFIG_FB_TFT_ILI9325=m
8714 +CONFIG_FB_TFT_ILI9340=m
8715 +CONFIG_FB_TFT_ILI9341=m
8716 +CONFIG_FB_TFT_ILI9481=m
8717 +CONFIG_FB_TFT_ILI9486=m
8718 +CONFIG_FB_TFT_PCD8544=m
8719 +CONFIG_FB_TFT_RA8875=m
8720 +CONFIG_FB_TFT_S6D02A1=m
8721 +CONFIG_FB_TFT_S6D1121=m
8722 +CONFIG_FB_TFT_SEPS525=m
8723 +CONFIG_FB_TFT_SH1106=m
8724 +CONFIG_FB_TFT_SSD1289=m
8725 +CONFIG_FB_TFT_SSD1305=m
8726 +CONFIG_FB_TFT_SSD1306=m
8727 +CONFIG_FB_TFT_SSD1331=m
8728 +CONFIG_FB_TFT_SSD1351=m
8729 +CONFIG_FB_TFT_ST7735R=m
8730 +CONFIG_FB_TFT_ST7789V=m
8731 +CONFIG_FB_TFT_TINYLCD=m
8732 +CONFIG_FB_TFT_TLS8204=m
8733 +CONFIG_FB_TFT_UC1611=m
8734 +CONFIG_FB_TFT_UC1701=m
8735 +CONFIG_FB_TFT_UPD161704=m
8736 +CONFIG_FB_TFT_WATTEROTT=m
8737 +CONFIG_MOST_COMPONENTS=m
8738 +CONFIG_MOST_NET=m
8739 +CONFIG_MOST_SOUND=m
8740 +CONFIG_MOST_VIDEO=m
8741 +CONFIG_MOST_I2C=m
8742 +CONFIG_KS7010=m
8743 +CONFIG_GREYBUS_AUDIO=m
8744 +CONFIG_GREYBUS_AUDIO_APB_CODEC=m
8745 +CONFIG_GREYBUS_BOOTROM=m
8746 +CONFIG_GREYBUS_FIRMWARE=m
8747 +CONFIG_GREYBUS_HID=m
8748 +CONFIG_GREYBUS_LIGHT=m
8749 +CONFIG_GREYBUS_LOG=m
8750 +CONFIG_GREYBUS_LOOPBACK=m
8751 +CONFIG_GREYBUS_POWER=m
8752 +CONFIG_GREYBUS_RAW=m
8753 +CONFIG_GREYBUS_VIBRATOR=m
8754 +CONFIG_GREYBUS_BRIDGED_PHY=m
8755 +CONFIG_GREYBUS_GPIO=m
8756 +CONFIG_GREYBUS_I2C=m
8757 +CONFIG_GREYBUS_PWM=m
8758 +CONFIG_GREYBUS_SDIO=m
8759 +CONFIG_GREYBUS_SPI=m
8760 +CONFIG_GREYBUS_UART=m
8761 +CONFIG_GREYBUS_USB=m
8762 +CONFIG_PI433=m
8765 +# Gasket devices
8767 +CONFIG_STAGING_GASKET_FRAMEWORK=m
8768 +CONFIG_STAGING_APEX_DRIVER=m
8769 +# end of Gasket devices
8771 +CONFIG_FIELDBUS_DEV=m
8772 +CONFIG_KPC2000=y
8773 +CONFIG_KPC2000_CORE=m
8774 +CONFIG_KPC2000_SPI=m
8775 +CONFIG_KPC2000_I2C=m
8776 +CONFIG_KPC2000_DMA=m
8777 +CONFIG_QLGE=m
8778 +CONFIG_WIMAX=m
8779 +CONFIG_WIMAX_DEBUG_LEVEL=8
8780 +CONFIG_WIMAX_I2400M=m
8781 +CONFIG_WIMAX_I2400M_USB=m
8782 +CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
8783 +CONFIG_WFX=m
8784 +CONFIG_SPMI_HISI3670=m
8785 +CONFIG_X86_PLATFORM_DEVICES=y
8786 +CONFIG_ACPI_WMI=m
8787 +CONFIG_WMI_BMOF=m
8788 +CONFIG_HUAWEI_WMI=m
8789 +CONFIG_UV_SYSFS=m
8790 +CONFIG_INTEL_WMI_SBL_FW_UPDATE=m
8791 +CONFIG_INTEL_WMI_THUNDERBOLT=m
8792 +CONFIG_MXM_WMI=m
8793 +CONFIG_PEAQ_WMI=m
8794 +CONFIG_XIAOMI_WMI=m
8795 +CONFIG_ACERHDF=m
8796 +CONFIG_ACER_WIRELESS=m
8797 +CONFIG_ACER_WMI=m
8798 +CONFIG_AMD_PMC=m
8799 +CONFIG_APPLE_GMUX=m
8800 +CONFIG_ASUS_LAPTOP=m
8801 +CONFIG_ASUS_WIRELESS=m
8802 +CONFIG_ASUS_WMI=m
8803 +CONFIG_ASUS_NB_WMI=m
8804 +CONFIG_EEEPC_LAPTOP=m
8805 +CONFIG_EEEPC_WMI=m
8806 +CONFIG_X86_PLATFORM_DRIVERS_DELL=y
8807 +CONFIG_ALIENWARE_WMI=m
8808 +CONFIG_DCDBAS=m
8809 +CONFIG_DELL_LAPTOP=m
8810 +CONFIG_DELL_RBU=m
8811 +CONFIG_DELL_RBTN=m
8812 +CONFIG_DELL_SMBIOS=m
8813 +CONFIG_DELL_SMBIOS_WMI=y
8814 +CONFIG_DELL_SMBIOS_SMM=y
8815 +CONFIG_DELL_SMO8800=m
8816 +CONFIG_DELL_WMI=m
8817 +CONFIG_DELL_WMI_AIO=m
8818 +CONFIG_DELL_WMI_DESCRIPTOR=m
8819 +CONFIG_DELL_WMI_LED=m
8820 +CONFIG_DELL_WMI_SYSMAN=m
8821 +CONFIG_AMILO_RFKILL=m
8822 +CONFIG_FUJITSU_LAPTOP=m
8823 +CONFIG_FUJITSU_TABLET=m
8824 +CONFIG_GPD_POCKET_FAN=m
8825 +CONFIG_HP_ACCEL=m
8826 +CONFIG_HP_WIRELESS=m
8827 +CONFIG_HP_WMI=m
8828 +CONFIG_IBM_RTL=m
8829 +CONFIG_IDEAPAD_LAPTOP=m
8830 +CONFIG_SENSORS_HDAPS=m
8831 +CONFIG_THINKPAD_ACPI=m
8832 +CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y
8833 +CONFIG_THINKPAD_ACPI_DEBUGFACILITIES=y
8834 +# CONFIG_THINKPAD_ACPI_DEBUG is not set
8835 +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set
8836 +CONFIG_THINKPAD_ACPI_VIDEO=y
8837 +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
8838 +CONFIG_INTEL_ATOMISP2_LED=m
8839 +CONFIG_INTEL_CHT_INT33FE=m
8840 +CONFIG_INTEL_HID_EVENT=m
8841 +CONFIG_INTEL_INT0002_VGPIO=m
8842 +CONFIG_INTEL_MENLOW=m
8843 +CONFIG_INTEL_OAKTRAIL=m
8844 +CONFIG_INTEL_VBTN=m
8845 +CONFIG_MSI_LAPTOP=m
8846 +CONFIG_MSI_WMI=m
8847 +CONFIG_PCENGINES_APU2=m
8848 +CONFIG_SAMSUNG_LAPTOP=m
8849 +CONFIG_SAMSUNG_Q10=m
8850 +CONFIG_ACPI_TOSHIBA=m
8851 +CONFIG_TOSHIBA_BT_RFKILL=m
8852 +CONFIG_TOSHIBA_HAPS=m
8853 +# CONFIG_TOSHIBA_WMI is not set
8854 +CONFIG_ACPI_CMPC=m
8855 +CONFIG_COMPAL_LAPTOP=m
8856 +CONFIG_LG_LAPTOP=m
8857 +CONFIG_PANASONIC_LAPTOP=m
8858 +CONFIG_SONY_LAPTOP=m
8859 +CONFIG_SONYPI_COMPAT=y
8860 +CONFIG_SYSTEM76_ACPI=m
8861 +CONFIG_TOPSTAR_LAPTOP=m
8862 +CONFIG_I2C_MULTI_INSTANTIATE=m
8863 +CONFIG_MLX_PLATFORM=m
8864 +CONFIG_TOUCHSCREEN_DMI=y
8865 +CONFIG_INTEL_IPS=m
8866 +CONFIG_INTEL_RST=m
8867 +CONFIG_INTEL_SMARTCONNECT=m
8870 +# Intel Speed Select Technology interface support
8872 +CONFIG_INTEL_SPEED_SELECT_INTERFACE=m
8873 +# end of Intel Speed Select Technology interface support
8875 +CONFIG_INTEL_TURBO_MAX_3=y
8876 +CONFIG_INTEL_UNCORE_FREQ_CONTROL=m
8877 +CONFIG_INTEL_BXTWC_PMIC_TMU=m
8878 +CONFIG_INTEL_CHTDC_TI_PWRBTN=m
8879 +CONFIG_INTEL_MRFLD_PWRBTN=m
8880 +CONFIG_INTEL_PMC_CORE=y
8881 +CONFIG_INTEL_PMT_CLASS=m
8882 +CONFIG_INTEL_PMT_TELEMETRY=m
8883 +CONFIG_INTEL_PMT_CRASHLOG=m
8884 +CONFIG_INTEL_PUNIT_IPC=m
8885 +CONFIG_INTEL_SCU_IPC=y
8886 +CONFIG_INTEL_SCU=y
8887 +CONFIG_INTEL_SCU_PCI=y
8888 +CONFIG_INTEL_SCU_PLATFORM=m
8889 +CONFIG_INTEL_SCU_IPC_UTIL=m
8890 +CONFIG_INTEL_TELEMETRY=m
8891 +CONFIG_PMC_ATOM=y
8892 +CONFIG_CHROME_PLATFORMS=y
8893 +CONFIG_CHROMEOS_LAPTOP=m
8894 +CONFIG_CHROMEOS_PSTORE=m
8895 +CONFIG_CHROMEOS_TBMC=m
8896 +CONFIG_CROS_EC=m
8897 +CONFIG_CROS_EC_I2C=m
8898 +CONFIG_CROS_EC_ISHTP=m
8899 +CONFIG_CROS_EC_SPI=m
8900 +CONFIG_CROS_EC_LPC=m
8901 +CONFIG_CROS_EC_PROTO=y
8902 +CONFIG_CROS_KBD_LED_BACKLIGHT=m
8903 +CONFIG_CROS_EC_CHARDEV=m
8904 +CONFIG_CROS_EC_LIGHTBAR=m
8905 +CONFIG_CROS_EC_DEBUGFS=m
8906 +CONFIG_CROS_EC_SENSORHUB=m
8907 +CONFIG_CROS_EC_SYSFS=m
8908 +CONFIG_CROS_EC_TYPEC=m
8909 +CONFIG_CROS_USBPD_LOGGER=m
8910 +CONFIG_CROS_USBPD_NOTIFY=m
8911 +CONFIG_WILCO_EC=m
8912 +CONFIG_WILCO_EC_DEBUGFS=m
8913 +CONFIG_WILCO_EC_EVENTS=m
8914 +CONFIG_WILCO_EC_TELEMETRY=m
8915 +CONFIG_MELLANOX_PLATFORM=y
8916 +CONFIG_MLXREG_HOTPLUG=m
8917 +CONFIG_MLXREG_IO=m
8918 +CONFIG_SURFACE_PLATFORMS=y
8919 +CONFIG_SURFACE3_WMI=m
8920 +CONFIG_SURFACE_3_BUTTON=m
8921 +CONFIG_SURFACE_3_POWER_OPREGION=m
8922 +CONFIG_SURFACE_ACPI_NOTIFY=m
8923 +CONFIG_SURFACE_AGGREGATOR_CDEV=m
8924 +CONFIG_SURFACE_GPE=m
8925 +CONFIG_SURFACE_HOTPLUG=m
8926 +CONFIG_SURFACE_PRO3_BUTTON=m
8927 +CONFIG_SURFACE_AGGREGATOR=m
8928 +CONFIG_SURFACE_AGGREGATOR_BUS=y
8929 +# CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION is not set
8930 +CONFIG_HAVE_CLK=y
8931 +CONFIG_CLKDEV_LOOKUP=y
8932 +CONFIG_HAVE_CLK_PREPARE=y
8933 +CONFIG_COMMON_CLK=y
8934 +CONFIG_COMMON_CLK_WM831X=m
8935 +CONFIG_COMMON_CLK_MAX9485=m
8936 +CONFIG_COMMON_CLK_SI5341=m
8937 +CONFIG_COMMON_CLK_SI5351=m
8938 +CONFIG_COMMON_CLK_SI544=m
8939 +CONFIG_COMMON_CLK_CDCE706=m
8940 +CONFIG_COMMON_CLK_CS2000_CP=m
8941 +CONFIG_COMMON_CLK_S2MPS11=m
8942 +CONFIG_CLK_TWL6040=m
8943 +CONFIG_COMMON_CLK_PALMAS=m
8944 +CONFIG_COMMON_CLK_PWM=m
8945 +CONFIG_XILINX_VCU=m
8946 +CONFIG_HWSPINLOCK=y
8949 +# Clock Source drivers
8951 +CONFIG_CLKEVT_I8253=y
8952 +CONFIG_I8253_LOCK=y
8953 +CONFIG_CLKBLD_I8253=y
8954 +# end of Clock Source drivers
8956 +CONFIG_MAILBOX=y
8957 +CONFIG_PCC=y
8958 +CONFIG_ALTERA_MBOX=m
8959 +CONFIG_IOMMU_IOVA=y
8960 +CONFIG_IOASID=y
8961 +CONFIG_IOMMU_API=y
8962 +CONFIG_IOMMU_SUPPORT=y
8965 +# Generic IOMMU Pagetable Support
8967 +CONFIG_IOMMU_IO_PGTABLE=y
8968 +# end of Generic IOMMU Pagetable Support
8970 +# CONFIG_IOMMU_DEBUGFS is not set
8971 +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set
8972 +CONFIG_IOMMU_DMA=y
8973 +CONFIG_AMD_IOMMU=y
8974 +CONFIG_AMD_IOMMU_V2=m
8975 +CONFIG_DMAR_TABLE=y
8976 +CONFIG_INTEL_IOMMU=y
8977 +CONFIG_INTEL_IOMMU_SVM=y
8978 +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
8979 +CONFIG_INTEL_IOMMU_FLOPPY_WA=y
8980 +# CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON is not set
8981 +CONFIG_IRQ_REMAP=y
8982 +CONFIG_HYPERV_IOMMU=y
8985 +# Remoteproc drivers
8987 +CONFIG_REMOTEPROC=y
8988 +CONFIG_REMOTEPROC_CDEV=y
8989 +# end of Remoteproc drivers
8992 +# Rpmsg drivers
8994 +CONFIG_RPMSG=m
8995 +CONFIG_RPMSG_CHAR=m
8996 +CONFIG_RPMSG_NS=m
8997 +CONFIG_RPMSG_QCOM_GLINK=m
8998 +CONFIG_RPMSG_QCOM_GLINK_RPM=m
8999 +CONFIG_RPMSG_VIRTIO=m
9000 +# end of Rpmsg drivers
9002 +CONFIG_SOUNDWIRE=m
9005 +# SoundWire Devices
9007 +CONFIG_SOUNDWIRE_CADENCE=m
9008 +CONFIG_SOUNDWIRE_INTEL=m
9009 +CONFIG_SOUNDWIRE_QCOM=m
9010 +CONFIG_SOUNDWIRE_GENERIC_ALLOCATION=m
9013 +# SOC (System On Chip) specific Drivers
9017 +# Amlogic SoC drivers
9019 +# end of Amlogic SoC drivers
9022 +# Broadcom SoC drivers
9024 +# end of Broadcom SoC drivers
9027 +# NXP/Freescale QorIQ SoC drivers
9029 +# end of NXP/Freescale QorIQ SoC drivers
9032 +# i.MX SoC drivers
9034 +# end of i.MX SoC drivers
9037 +# Enable LiteX SoC Builder specific drivers
9039 +# end of Enable LiteX SoC Builder specific drivers
9042 +# Qualcomm SoC drivers
9044 +CONFIG_QCOM_QMI_HELPERS=m
9045 +# end of Qualcomm SoC drivers
9047 +CONFIG_SOC_TI=y
9050 +# Xilinx SoC drivers
9052 +# end of Xilinx SoC drivers
9053 +# end of SOC (System On Chip) specific Drivers
9055 +CONFIG_PM_DEVFREQ=y
9058 +# DEVFREQ Governors
9060 +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
9061 +CONFIG_DEVFREQ_GOV_PERFORMANCE=y
9062 +CONFIG_DEVFREQ_GOV_POWERSAVE=y
9063 +CONFIG_DEVFREQ_GOV_USERSPACE=y
9064 +CONFIG_DEVFREQ_GOV_PASSIVE=y
9067 +# DEVFREQ Drivers
9069 +CONFIG_PM_DEVFREQ_EVENT=y
9070 +CONFIG_EXTCON=y
9073 +# Extcon Device Drivers
9075 +CONFIG_EXTCON_ADC_JACK=m
9076 +CONFIG_EXTCON_ARIZONA=m
9077 +CONFIG_EXTCON_AXP288=m
9078 +CONFIG_EXTCON_FSA9480=m
9079 +CONFIG_EXTCON_GPIO=m
9080 +CONFIG_EXTCON_INTEL_INT3496=m
9081 +CONFIG_EXTCON_INTEL_CHT_WC=m
9082 +CONFIG_EXTCON_INTEL_MRFLD=m
9083 +CONFIG_EXTCON_MAX14577=m
9084 +CONFIG_EXTCON_MAX3355=m
9085 +CONFIG_EXTCON_MAX77693=m
9086 +CONFIG_EXTCON_MAX77843=m
9087 +CONFIG_EXTCON_MAX8997=m
9088 +CONFIG_EXTCON_PALMAS=m
9089 +CONFIG_EXTCON_PTN5150=m
9090 +CONFIG_EXTCON_RT8973A=m
9091 +CONFIG_EXTCON_SM5502=m
9092 +CONFIG_EXTCON_USB_GPIO=m
9093 +CONFIG_EXTCON_USBC_CROS_EC=m
9094 +CONFIG_EXTCON_USBC_TUSB320=m
9095 +CONFIG_MEMORY=y
9096 +CONFIG_FPGA_DFL_EMIF=m
9097 +CONFIG_IIO=m
9098 +CONFIG_IIO_BUFFER=y
9099 +CONFIG_IIO_BUFFER_CB=m
9100 +CONFIG_IIO_BUFFER_DMA=m
9101 +CONFIG_IIO_BUFFER_DMAENGINE=m
9102 +CONFIG_IIO_BUFFER_HW_CONSUMER=m
9103 +CONFIG_IIO_KFIFO_BUF=m
9104 +CONFIG_IIO_TRIGGERED_BUFFER=m
9105 +CONFIG_IIO_CONFIGFS=m
9106 +CONFIG_IIO_TRIGGER=y
9107 +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
9108 +CONFIG_IIO_SW_DEVICE=m
9109 +CONFIG_IIO_SW_TRIGGER=m
9110 +CONFIG_IIO_TRIGGERED_EVENT=m
9113 +# Accelerometers
9115 +CONFIG_ADIS16201=m
9116 +CONFIG_ADIS16209=m
9117 +CONFIG_ADXL372=m
9118 +CONFIG_ADXL372_SPI=m
9119 +CONFIG_ADXL372_I2C=m
9120 +CONFIG_BMA220=m
9121 +CONFIG_BMA400=m
9122 +CONFIG_BMA400_I2C=m
9123 +CONFIG_BMA400_SPI=m
9124 +CONFIG_BMC150_ACCEL=m
9125 +CONFIG_BMC150_ACCEL_I2C=m
9126 +CONFIG_BMC150_ACCEL_SPI=m
9127 +CONFIG_DA280=m
9128 +CONFIG_DA311=m
9129 +CONFIG_DMARD09=m
9130 +CONFIG_DMARD10=m
9131 +CONFIG_HID_SENSOR_ACCEL_3D=m
9132 +CONFIG_IIO_CROS_EC_ACCEL_LEGACY=m
9133 +CONFIG_IIO_ST_ACCEL_3AXIS=m
9134 +CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m
9135 +CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m
9136 +CONFIG_KXSD9=m
9137 +CONFIG_KXSD9_SPI=m
9138 +CONFIG_KXSD9_I2C=m
9139 +CONFIG_KXCJK1013=m
9140 +CONFIG_MC3230=m
9141 +CONFIG_MMA7455=m
9142 +CONFIG_MMA7455_I2C=m
9143 +CONFIG_MMA7455_SPI=m
9144 +CONFIG_MMA7660=m
9145 +CONFIG_MMA8452=m
9146 +CONFIG_MMA9551_CORE=m
9147 +CONFIG_MMA9551=m
9148 +CONFIG_MMA9553=m
9149 +CONFIG_MXC4005=m
9150 +CONFIG_MXC6255=m
9151 +CONFIG_SCA3000=m
9152 +CONFIG_STK8312=m
9153 +CONFIG_STK8BA50=m
9154 +# end of Accelerometers
9157 +# Analog to digital converters
9159 +CONFIG_AD_SIGMA_DELTA=m
9160 +CONFIG_AD7091R5=m
9161 +CONFIG_AD7124=m
9162 +CONFIG_AD7192=m
9163 +CONFIG_AD7266=m
9164 +CONFIG_AD7291=m
9165 +CONFIG_AD7292=m
9166 +CONFIG_AD7298=m
9167 +CONFIG_AD7476=m
9168 +CONFIG_AD7606=m
9169 +CONFIG_AD7606_IFACE_PARALLEL=m
9170 +CONFIG_AD7606_IFACE_SPI=m
9171 +CONFIG_AD7766=m
9172 +CONFIG_AD7768_1=m
9173 +CONFIG_AD7780=m
9174 +CONFIG_AD7791=m
9175 +CONFIG_AD7793=m
9176 +CONFIG_AD7887=m
9177 +CONFIG_AD7923=m
9178 +CONFIG_AD7949=m
9179 +CONFIG_AD799X=m
9180 +CONFIG_AD9467=m
9181 +CONFIG_ADI_AXI_ADC=m
9182 +CONFIG_AXP20X_ADC=m
9183 +CONFIG_AXP288_ADC=m
9184 +CONFIG_CC10001_ADC=m
9185 +CONFIG_DA9150_GPADC=m
9186 +CONFIG_DLN2_ADC=m
9187 +CONFIG_HI8435=m
9188 +CONFIG_HX711=m
9189 +CONFIG_INA2XX_ADC=m
9190 +CONFIG_INTEL_MRFLD_ADC=m
9191 +CONFIG_LP8788_ADC=m
9192 +CONFIG_LTC2471=m
9193 +CONFIG_LTC2485=m
9194 +CONFIG_LTC2496=m
9195 +CONFIG_LTC2497=m
9196 +CONFIG_MAX1027=m
9197 +CONFIG_MAX11100=m
9198 +CONFIG_MAX1118=m
9199 +CONFIG_MAX1241=m
9200 +CONFIG_MAX1363=m
9201 +CONFIG_MAX9611=m
9202 +CONFIG_MCP320X=m
9203 +CONFIG_MCP3422=m
9204 +CONFIG_MCP3911=m
9205 +CONFIG_MEDIATEK_MT6360_ADC=m
9206 +CONFIG_MEN_Z188_ADC=m
9207 +CONFIG_MP2629_ADC=m
9208 +CONFIG_NAU7802=m
9209 +CONFIG_PALMAS_GPADC=m
9210 +CONFIG_QCOM_VADC_COMMON=m
9211 +CONFIG_QCOM_SPMI_IADC=m
9212 +CONFIG_QCOM_SPMI_VADC=m
9213 +CONFIG_QCOM_SPMI_ADC5=m
9214 +CONFIG_STX104=m
9215 +CONFIG_TI_ADC081C=m
9216 +CONFIG_TI_ADC0832=m
9217 +CONFIG_TI_ADC084S021=m
9218 +CONFIG_TI_ADC12138=m
9219 +CONFIG_TI_ADC108S102=m
9220 +CONFIG_TI_ADC128S052=m
9221 +CONFIG_TI_ADC161S626=m
9222 +CONFIG_TI_ADS1015=m
9223 +CONFIG_TI_ADS7950=m
9224 +CONFIG_TI_AM335X_ADC=m
9225 +CONFIG_TI_TLC4541=m
9226 +CONFIG_TWL4030_MADC=m
9227 +CONFIG_TWL6030_GPADC=m
9228 +CONFIG_VIPERBOARD_ADC=m
9229 +CONFIG_XILINX_XADC=m
9230 +# end of Analog to digital converters
9233 +# Analog Front Ends
9235 +# end of Analog Front Ends
9238 +# Amplifiers
9240 +CONFIG_AD8366=m
9241 +CONFIG_HMC425=m
9242 +# end of Amplifiers
9245 +# Chemical Sensors
9247 +CONFIG_ATLAS_PH_SENSOR=m
9248 +CONFIG_ATLAS_EZO_SENSOR=m
9249 +CONFIG_BME680=m
9250 +CONFIG_BME680_I2C=m
9251 +CONFIG_BME680_SPI=m
9252 +CONFIG_CCS811=m
9253 +CONFIG_IAQCORE=m
9254 +CONFIG_PMS7003=m
9255 +CONFIG_SCD30_CORE=m
9256 +CONFIG_SCD30_I2C=m
9257 +CONFIG_SCD30_SERIAL=m
9258 +CONFIG_SENSIRION_SGP30=m
9259 +CONFIG_SPS30=m
9260 +CONFIG_VZ89X=m
9261 +# end of Chemical Sensors
9263 +CONFIG_IIO_CROS_EC_SENSORS_CORE=m
9264 +CONFIG_IIO_CROS_EC_SENSORS=m
9265 +CONFIG_IIO_CROS_EC_SENSORS_LID_ANGLE=m
9268 +# Hid Sensor IIO Common
9270 +CONFIG_HID_SENSOR_IIO_COMMON=m
9271 +CONFIG_HID_SENSOR_IIO_TRIGGER=m
9272 +# end of Hid Sensor IIO Common
9274 +CONFIG_IIO_MS_SENSORS_I2C=m
9277 +# SSP Sensor Common
9279 +CONFIG_IIO_SSP_SENSORS_COMMONS=m
9280 +CONFIG_IIO_SSP_SENSORHUB=m
9281 +# end of SSP Sensor Common
9283 +CONFIG_IIO_ST_SENSORS_I2C=m
9284 +CONFIG_IIO_ST_SENSORS_SPI=m
9285 +CONFIG_IIO_ST_SENSORS_CORE=m
9288 +# Digital to analog converters
9290 +CONFIG_AD5064=m
9291 +CONFIG_AD5360=m
9292 +CONFIG_AD5380=m
9293 +CONFIG_AD5421=m
9294 +CONFIG_AD5446=m
9295 +CONFIG_AD5449=m
9296 +CONFIG_AD5592R_BASE=m
9297 +CONFIG_AD5592R=m
9298 +CONFIG_AD5593R=m
9299 +CONFIG_AD5504=m
9300 +CONFIG_AD5624R_SPI=m
9301 +CONFIG_AD5686=m
9302 +CONFIG_AD5686_SPI=m
9303 +CONFIG_AD5696_I2C=m
9304 +CONFIG_AD5755=m
9305 +CONFIG_AD5758=m
9306 +CONFIG_AD5761=m
9307 +CONFIG_AD5764=m
9308 +CONFIG_AD5766=m
9309 +CONFIG_AD5770R=m
9310 +CONFIG_AD5791=m
9311 +CONFIG_AD7303=m
9312 +CONFIG_AD8801=m
9313 +CONFIG_CIO_DAC=m
9314 +CONFIG_DS4424=m
9315 +CONFIG_LTC1660=m
9316 +CONFIG_LTC2632=m
9317 +CONFIG_M62332=m
9318 +CONFIG_MAX517=m
9319 +CONFIG_MCP4725=m
9320 +CONFIG_MCP4922=m
9321 +CONFIG_TI_DAC082S085=m
9322 +CONFIG_TI_DAC5571=m
9323 +CONFIG_TI_DAC7311=m
9324 +CONFIG_TI_DAC7612=m
9325 +# end of Digital to analog converters
9328 +# IIO dummy driver
9330 +CONFIG_IIO_SIMPLE_DUMMY=m
9331 +# CONFIG_IIO_SIMPLE_DUMMY_EVENTS is not set
9332 +# CONFIG_IIO_SIMPLE_DUMMY_BUFFER is not set
9333 +# end of IIO dummy driver
9336 +# Frequency Synthesizers DDS/PLL
9340 +# Clock Generator/Distribution
9342 +CONFIG_AD9523=m
9343 +# end of Clock Generator/Distribution
9346 +# Phase-Locked Loop (PLL) frequency synthesizers
9348 +CONFIG_ADF4350=m
9349 +CONFIG_ADF4371=m
9350 +# end of Phase-Locked Loop (PLL) frequency synthesizers
9351 +# end of Frequency Synthesizers DDS/PLL
9354 +# Digital gyroscope sensors
9356 +CONFIG_ADIS16080=m
9357 +CONFIG_ADIS16130=m
9358 +CONFIG_ADIS16136=m
9359 +CONFIG_ADIS16260=m
9360 +CONFIG_ADXRS290=m
9361 +CONFIG_ADXRS450=m
9362 +CONFIG_BMG160=m
9363 +CONFIG_BMG160_I2C=m
9364 +CONFIG_BMG160_SPI=m
9365 +CONFIG_FXAS21002C=m
9366 +CONFIG_FXAS21002C_I2C=m
9367 +CONFIG_FXAS21002C_SPI=m
9368 +CONFIG_HID_SENSOR_GYRO_3D=m
9369 +CONFIG_MPU3050=m
9370 +CONFIG_MPU3050_I2C=m
9371 +CONFIG_IIO_ST_GYRO_3AXIS=m
9372 +CONFIG_IIO_ST_GYRO_I2C_3AXIS=m
9373 +CONFIG_IIO_ST_GYRO_SPI_3AXIS=m
9374 +CONFIG_ITG3200=m
9375 +# end of Digital gyroscope sensors
9378 +# Health Sensors
9382 +# Heart Rate Monitors
9384 +CONFIG_AFE4403=m
9385 +CONFIG_AFE4404=m
9386 +CONFIG_MAX30100=m
9387 +CONFIG_MAX30102=m
9388 +# end of Heart Rate Monitors
9389 +# end of Health Sensors
9392 +# Humidity sensors
9394 +CONFIG_AM2315=m
9395 +CONFIG_DHT11=m
9396 +CONFIG_HDC100X=m
9397 +CONFIG_HDC2010=m
9398 +CONFIG_HID_SENSOR_HUMIDITY=m
9399 +CONFIG_HTS221=m
9400 +CONFIG_HTS221_I2C=m
9401 +CONFIG_HTS221_SPI=m
9402 +CONFIG_HTU21=m
9403 +CONFIG_SI7005=m
9404 +CONFIG_SI7020=m
9405 +# end of Humidity sensors
9408 +# Inertial measurement units
9410 +CONFIG_ADIS16400=m
9411 +CONFIG_ADIS16460=m
9412 +CONFIG_ADIS16475=m
9413 +CONFIG_ADIS16480=m
9414 +CONFIG_BMI160=m
9415 +CONFIG_BMI160_I2C=m
9416 +CONFIG_BMI160_SPI=m
9417 +CONFIG_FXOS8700=m
9418 +CONFIG_FXOS8700_I2C=m
9419 +CONFIG_FXOS8700_SPI=m
9420 +CONFIG_KMX61=m
9421 +CONFIG_INV_ICM42600=m
9422 +CONFIG_INV_ICM42600_I2C=m
9423 +CONFIG_INV_ICM42600_SPI=m
9424 +CONFIG_INV_MPU6050_IIO=m
9425 +CONFIG_INV_MPU6050_I2C=m
9426 +CONFIG_INV_MPU6050_SPI=m
9427 +CONFIG_IIO_ST_LSM6DSX=m
9428 +CONFIG_IIO_ST_LSM6DSX_I2C=m
9429 +CONFIG_IIO_ST_LSM6DSX_SPI=m
9430 +CONFIG_IIO_ST_LSM6DSX_I3C=m
9431 +# end of Inertial measurement units
9433 +CONFIG_IIO_ADIS_LIB=m
9434 +CONFIG_IIO_ADIS_LIB_BUFFER=y
9437 +# Light sensors
9439 +CONFIG_ACPI_ALS=m
9440 +CONFIG_ADJD_S311=m
9441 +CONFIG_ADUX1020=m
9442 +CONFIG_AL3010=m
9443 +CONFIG_AL3320A=m
9444 +CONFIG_APDS9300=m
9445 +CONFIG_APDS9960=m
9446 +CONFIG_AS73211=m
9447 +CONFIG_BH1750=m
9448 +CONFIG_BH1780=m
9449 +CONFIG_CM32181=m
9450 +CONFIG_CM3232=m
9451 +CONFIG_CM3323=m
9452 +CONFIG_CM36651=m
9453 +CONFIG_IIO_CROS_EC_LIGHT_PROX=m
9454 +CONFIG_GP2AP002=m
9455 +CONFIG_GP2AP020A00F=m
9456 +CONFIG_IQS621_ALS=m
9457 +CONFIG_SENSORS_ISL29018=m
9458 +CONFIG_SENSORS_ISL29028=m
9459 +CONFIG_ISL29125=m
9460 +CONFIG_HID_SENSOR_ALS=m
9461 +CONFIG_HID_SENSOR_PROX=m
9462 +CONFIG_JSA1212=m
9463 +CONFIG_RPR0521=m
9464 +CONFIG_SENSORS_LM3533=m
9465 +CONFIG_LTR501=m
9466 +CONFIG_LV0104CS=m
9467 +CONFIG_MAX44000=m
9468 +CONFIG_MAX44009=m
9469 +CONFIG_NOA1305=m
9470 +CONFIG_OPT3001=m
9471 +CONFIG_PA12203001=m
9472 +CONFIG_SI1133=m
9473 +CONFIG_SI1145=m
9474 +CONFIG_STK3310=m
9475 +CONFIG_ST_UVIS25=m
9476 +CONFIG_ST_UVIS25_I2C=m
9477 +CONFIG_ST_UVIS25_SPI=m
9478 +CONFIG_TCS3414=m
9479 +CONFIG_TCS3472=m
9480 +CONFIG_SENSORS_TSL2563=m
9481 +CONFIG_TSL2583=m
9482 +CONFIG_TSL2772=m
9483 +CONFIG_TSL4531=m
9484 +CONFIG_US5182D=m
9485 +CONFIG_VCNL4000=m
9486 +CONFIG_VCNL4035=m
9487 +CONFIG_VEML6030=m
9488 +CONFIG_VEML6070=m
9489 +CONFIG_VL6180=m
9490 +CONFIG_ZOPT2201=m
9491 +# end of Light sensors
9494 +# Magnetometer sensors
9496 +CONFIG_AK8975=m
9497 +CONFIG_AK09911=m
9498 +CONFIG_BMC150_MAGN=m
9499 +CONFIG_BMC150_MAGN_I2C=m
9500 +CONFIG_BMC150_MAGN_SPI=m
9501 +CONFIG_MAG3110=m
9502 +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m
9503 +CONFIG_MMC35240=m
9504 +CONFIG_IIO_ST_MAGN_3AXIS=m
9505 +CONFIG_IIO_ST_MAGN_I2C_3AXIS=m
9506 +CONFIG_IIO_ST_MAGN_SPI_3AXIS=m
9507 +CONFIG_SENSORS_HMC5843=m
9508 +CONFIG_SENSORS_HMC5843_I2C=m
9509 +CONFIG_SENSORS_HMC5843_SPI=m
9510 +CONFIG_SENSORS_RM3100=m
9511 +CONFIG_SENSORS_RM3100_I2C=m
9512 +CONFIG_SENSORS_RM3100_SPI=m
9513 +CONFIG_YAMAHA_YAS530=m
9514 +# end of Magnetometer sensors
9517 +# Multiplexers
9519 +# end of Multiplexers
9522 +# Inclinometer sensors
9524 +CONFIG_HID_SENSOR_INCLINOMETER_3D=m
9525 +CONFIG_HID_SENSOR_DEVICE_ROTATION=m
9526 +# end of Inclinometer sensors
9529 +# Triggers - standalone
9531 +CONFIG_IIO_HRTIMER_TRIGGER=m
9532 +CONFIG_IIO_INTERRUPT_TRIGGER=m
9533 +CONFIG_IIO_TIGHTLOOP_TRIGGER=m
9534 +CONFIG_IIO_SYSFS_TRIGGER=m
9535 +# end of Triggers - standalone
9538 +# Linear and angular position sensors
9540 +CONFIG_IQS624_POS=m
9541 +CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE=m
9542 +# end of Linear and angular position sensors
9545 +# Digital potentiometers
9547 +CONFIG_AD5272=m
9548 +CONFIG_DS1803=m
9549 +CONFIG_MAX5432=m
9550 +CONFIG_MAX5481=m
9551 +CONFIG_MAX5487=m
9552 +CONFIG_MCP4018=m
9553 +CONFIG_MCP4131=m
9554 +CONFIG_MCP4531=m
9555 +CONFIG_MCP41010=m
9556 +CONFIG_TPL0102=m
9557 +# end of Digital potentiometers
9560 +# Digital potentiostats
9562 +CONFIG_LMP91000=m
9563 +# end of Digital potentiostats
9566 +# Pressure sensors
9568 +CONFIG_ABP060MG=m
9569 +CONFIG_BMP280=m
9570 +CONFIG_BMP280_I2C=m
9571 +CONFIG_BMP280_SPI=m
9572 +CONFIG_IIO_CROS_EC_BARO=m
9573 +CONFIG_DLHL60D=m
9574 +CONFIG_DPS310=m
9575 +CONFIG_HID_SENSOR_PRESS=m
9576 +CONFIG_HP03=m
9577 +CONFIG_ICP10100=m
9578 +CONFIG_MPL115=m
9579 +CONFIG_MPL115_I2C=m
9580 +CONFIG_MPL115_SPI=m
9581 +CONFIG_MPL3115=m
9582 +CONFIG_MS5611=m
9583 +CONFIG_MS5611_I2C=m
9584 +CONFIG_MS5611_SPI=m
9585 +CONFIG_MS5637=m
9586 +CONFIG_IIO_ST_PRESS=m
9587 +CONFIG_IIO_ST_PRESS_I2C=m
9588 +CONFIG_IIO_ST_PRESS_SPI=m
9589 +CONFIG_T5403=m
9590 +CONFIG_HP206C=m
9591 +CONFIG_ZPA2326=m
9592 +CONFIG_ZPA2326_I2C=m
9593 +CONFIG_ZPA2326_SPI=m
9594 +# end of Pressure sensors
9597 +# Lightning sensors
9599 +CONFIG_AS3935=m
9600 +# end of Lightning sensors
9603 +# Proximity and distance sensors
9605 +CONFIG_ISL29501=m
9606 +CONFIG_LIDAR_LITE_V2=m
9607 +CONFIG_MB1232=m
9608 +CONFIG_PING=m
9609 +CONFIG_RFD77402=m
9610 +CONFIG_SRF04=m
9611 +CONFIG_SX9310=m
9612 +CONFIG_SX9500=m
9613 +CONFIG_SRF08=m
9614 +CONFIG_VCNL3020=m
9615 +CONFIG_VL53L0X_I2C=m
9616 +# end of Proximity and distance sensors
9619 +# Resolver to digital converters
9621 +CONFIG_AD2S90=m
9622 +CONFIG_AD2S1200=m
9623 +# end of Resolver to digital converters
9626 +# Temperature sensors
9628 +CONFIG_IQS620AT_TEMP=m
9629 +CONFIG_LTC2983=m
9630 +CONFIG_MAXIM_THERMOCOUPLE=m
9631 +CONFIG_HID_SENSOR_TEMP=m
9632 +CONFIG_MLX90614=m
9633 +CONFIG_MLX90632=m
9634 +CONFIG_TMP006=m
9635 +CONFIG_TMP007=m
9636 +CONFIG_TSYS01=m
9637 +CONFIG_TSYS02D=m
9638 +CONFIG_MAX31856=m
9639 +# end of Temperature sensors
9641 +CONFIG_NTB=m
9642 +CONFIG_NTB_MSI=y
9643 +# CONFIG_NTB_AMD is not set
9644 +CONFIG_NTB_IDT=m
9645 +CONFIG_NTB_INTEL=m
9646 +CONFIG_NTB_EPF=m
9647 +CONFIG_NTB_SWITCHTEC=m
9648 +CONFIG_NTB_PINGPONG=m
9649 +CONFIG_NTB_TOOL=m
9650 +CONFIG_NTB_PERF=m
9651 +# CONFIG_NTB_MSI_TEST is not set
9652 +CONFIG_NTB_TRANSPORT=m
9653 +CONFIG_VME_BUS=y
9656 +# VME Bridge Drivers
9658 +CONFIG_VME_CA91CX42=m
9659 +CONFIG_VME_TSI148=m
9660 +CONFIG_VME_FAKE=m
9663 +# VME Board Drivers
9665 +CONFIG_VMIVME_7805=m
9668 +# VME Device Drivers
9670 +CONFIG_VME_USER=m
9671 +CONFIG_PWM=y
9672 +CONFIG_PWM_SYSFS=y
9673 +# CONFIG_PWM_DEBUG is not set
9674 +CONFIG_PWM_CRC=y
9675 +CONFIG_PWM_CROS_EC=m
9676 +CONFIG_PWM_DWC=m
9677 +CONFIG_PWM_IQS620A=m
9678 +CONFIG_PWM_LP3943=m
9679 +CONFIG_PWM_LPSS=y
9680 +CONFIG_PWM_LPSS_PCI=y
9681 +CONFIG_PWM_LPSS_PLATFORM=y
9682 +CONFIG_PWM_PCA9685=m
9683 +CONFIG_PWM_TWL=m
9684 +CONFIG_PWM_TWL_LED=m
9687 +# IRQ chip support
9689 +CONFIG_MADERA_IRQ=m
9690 +# end of IRQ chip support
9692 +CONFIG_IPACK_BUS=m
9693 +CONFIG_BOARD_TPCI200=m
9694 +CONFIG_SERIAL_IPOCTAL=m
9695 +CONFIG_RESET_CONTROLLER=y
9696 +CONFIG_RESET_BRCMSTB_RESCAL=y
9697 +CONFIG_RESET_TI_SYSCON=m
9700 +# PHY Subsystem
9702 +CONFIG_GENERIC_PHY=y
9703 +CONFIG_USB_LGM_PHY=m
9704 +CONFIG_BCM_KONA_USB2_PHY=m
9705 +CONFIG_PHY_PXA_28NM_HSIC=m
9706 +CONFIG_PHY_PXA_28NM_USB2=m
9707 +CONFIG_PHY_CPCAP_USB=m
9708 +CONFIG_PHY_QCOM_USB_HS=m
9709 +CONFIG_PHY_QCOM_USB_HSIC=m
9710 +CONFIG_PHY_SAMSUNG_USB2=m
9711 +CONFIG_PHY_TUSB1210=m
9712 +CONFIG_PHY_INTEL_LGM_EMMC=m
9713 +# end of PHY Subsystem
9715 +CONFIG_POWERCAP=y
9716 +CONFIG_INTEL_RAPL_CORE=m
9717 +CONFIG_INTEL_RAPL=m
9718 +CONFIG_IDLE_INJECT=y
9719 +CONFIG_DTPM=y
9720 +CONFIG_DTPM_CPU=y
9721 +CONFIG_MCB=m
9722 +CONFIG_MCB_PCI=m
9723 +CONFIG_MCB_LPC=m
9726 +# Performance monitor support
9728 +# end of Performance monitor support
9730 +CONFIG_RAS=y
9731 +CONFIG_RAS_CEC=y
9732 +# CONFIG_RAS_CEC_DEBUG is not set
9733 +CONFIG_USB4=m
9734 +# CONFIG_USB4_DEBUGFS_WRITE is not set
9735 +# CONFIG_USB4_DMA_TEST is not set
9738 +# Android
9740 +CONFIG_ANDROID=y
9741 +CONFIG_ANDROID_BINDER_IPC=m
9742 +CONFIG_ANDROID_BINDERFS=m
9743 +CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder,vndbinder"
9744 +# CONFIG_ANDROID_BINDER_IPC_SELFTEST is not set
9745 +# end of Android
9747 +CONFIG_LIBNVDIMM=y
9748 +CONFIG_BLK_DEV_PMEM=m
9749 +CONFIG_ND_BLK=m
9750 +CONFIG_ND_CLAIM=y
9751 +CONFIG_ND_BTT=m
9752 +CONFIG_BTT=y
9753 +CONFIG_ND_PFN=m
9754 +CONFIG_NVDIMM_PFN=y
9755 +CONFIG_NVDIMM_DAX=y
9756 +CONFIG_NVDIMM_KEYS=y
9757 +CONFIG_DAX_DRIVER=y
9758 +CONFIG_DAX=y
9759 +CONFIG_DEV_DAX=m
9760 +CONFIG_DEV_DAX_PMEM=m
9761 +CONFIG_DEV_DAX_HMEM=m
9762 +CONFIG_DEV_DAX_HMEM_DEVICES=y
9763 +CONFIG_DEV_DAX_KMEM=m
9764 +CONFIG_DEV_DAX_PMEM_COMPAT=m
9765 +CONFIG_NVMEM=y
9766 +CONFIG_NVMEM_SYSFS=y
9767 +CONFIG_NVMEM_SPMI_SDAM=m
9768 +CONFIG_RAVE_SP_EEPROM=m
9769 +CONFIG_NVMEM_RMEM=m
9772 +# HW tracing support
9774 +CONFIG_STM=m
9775 +CONFIG_STM_PROTO_BASIC=m
9776 +CONFIG_STM_PROTO_SYS_T=m
9777 +CONFIG_STM_DUMMY=m
9778 +CONFIG_STM_SOURCE_CONSOLE=m
9779 +CONFIG_STM_SOURCE_HEARTBEAT=m
9780 +CONFIG_INTEL_TH=m
9781 +CONFIG_INTEL_TH_PCI=m
9782 +CONFIG_INTEL_TH_ACPI=m
9783 +CONFIG_INTEL_TH_GTH=m
9784 +CONFIG_INTEL_TH_STH=m
9785 +CONFIG_INTEL_TH_MSU=m
9786 +CONFIG_INTEL_TH_PTI=m
9787 +# CONFIG_INTEL_TH_DEBUG is not set
9788 +# end of HW tracing support
9790 +CONFIG_FPGA=m
9791 +CONFIG_ALTERA_PR_IP_CORE=m
9792 +CONFIG_FPGA_MGR_ALTERA_PS_SPI=m
9793 +CONFIG_FPGA_MGR_ALTERA_CVP=m
9794 +CONFIG_FPGA_MGR_XILINX_SPI=m
9795 +CONFIG_FPGA_MGR_MACHXO2_SPI=m
9796 +CONFIG_FPGA_BRIDGE=m
9797 +CONFIG_ALTERA_FREEZE_BRIDGE=m
9798 +CONFIG_XILINX_PR_DECOUPLER=m
9799 +CONFIG_FPGA_REGION=m
9800 +CONFIG_FPGA_DFL=m
9801 +CONFIG_FPGA_DFL_FME=m
9802 +CONFIG_FPGA_DFL_FME_MGR=m
9803 +CONFIG_FPGA_DFL_FME_BRIDGE=m
9804 +CONFIG_FPGA_DFL_FME_REGION=m
9805 +CONFIG_FPGA_DFL_AFU=m
9806 +CONFIG_FPGA_DFL_NIOS_INTEL_PAC_N3000=m
9807 +CONFIG_FPGA_DFL_PCI=m
9808 +CONFIG_TEE=m
9811 +# TEE drivers
9813 +CONFIG_AMDTEE=m
9814 +# end of TEE drivers
9816 +CONFIG_MULTIPLEXER=m
9819 +# Multiplexer drivers
9821 +CONFIG_MUX_ADG792A=m
9822 +CONFIG_MUX_ADGS1408=m
9823 +CONFIG_MUX_GPIO=m
9824 +# end of Multiplexer drivers
9826 +CONFIG_PM_OPP=y
9827 +CONFIG_UNISYS_VISORBUS=m
9828 +CONFIG_SIOX=m
9829 +CONFIG_SIOX_BUS_GPIO=m
9830 +CONFIG_SLIMBUS=m
9831 +CONFIG_SLIM_QCOM_CTRL=m
9832 +CONFIG_INTERCONNECT=y
9833 +CONFIG_COUNTER=m
9834 +CONFIG_104_QUAD_8=m
9835 +CONFIG_MOST=m
9836 +CONFIG_MOST_USB_HDM=m
9837 +CONFIG_MOST_CDEV=m
9838 +# end of Device Drivers
9841 +# File systems
9843 +CONFIG_DCACHE_WORD_ACCESS=y
9844 +CONFIG_VALIDATE_FS_PARSER=y
9845 +CONFIG_FS_IOMAP=y
9846 +# CONFIG_EXT2_FS is not set
9847 +# CONFIG_EXT3_FS is not set
9848 +CONFIG_EXT4_FS=y
9849 +CONFIG_EXT4_USE_FOR_EXT2=y
9850 +CONFIG_EXT4_FS_POSIX_ACL=y
9851 +CONFIG_EXT4_FS_SECURITY=y
9852 +# CONFIG_EXT4_DEBUG is not set
9853 +CONFIG_JBD2=y
9854 +# CONFIG_JBD2_DEBUG is not set
9855 +CONFIG_FS_MBCACHE=y
9856 +CONFIG_REISERFS_FS=m
9857 +# CONFIG_REISERFS_CHECK is not set
9858 +# CONFIG_REISERFS_PROC_INFO is not set
9859 +CONFIG_REISERFS_FS_XATTR=y
9860 +CONFIG_REISERFS_FS_POSIX_ACL=y
9861 +CONFIG_REISERFS_FS_SECURITY=y
9862 +CONFIG_JFS_FS=m
9863 +CONFIG_JFS_POSIX_ACL=y
9864 +CONFIG_JFS_SECURITY=y
9865 +# CONFIG_JFS_DEBUG is not set
9866 +CONFIG_JFS_STATISTICS=y
9867 +CONFIG_XFS_FS=m
9868 +CONFIG_XFS_SUPPORT_V4=y
9869 +CONFIG_XFS_QUOTA=y
9870 +CONFIG_XFS_POSIX_ACL=y
9871 +CONFIG_XFS_RT=y
9872 +# CONFIG_XFS_ONLINE_SCRUB is not set
9873 +# CONFIG_XFS_WARN is not set
9874 +# CONFIG_XFS_DEBUG is not set
9875 +CONFIG_GFS2_FS=m
9876 +CONFIG_GFS2_FS_LOCKING_DLM=y
9877 +CONFIG_OCFS2_FS=m
9878 +CONFIG_OCFS2_FS_O2CB=m
9879 +CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
9880 +CONFIG_OCFS2_FS_STATS=y
9881 +CONFIG_OCFS2_DEBUG_MASKLOG=y
9882 +# CONFIG_OCFS2_DEBUG_FS is not set
9883 +CONFIG_BTRFS_FS=m
9884 +CONFIG_BTRFS_FS_POSIX_ACL=y
9885 +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set
9886 +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set
9887 +# CONFIG_BTRFS_DEBUG is not set
9888 +# CONFIG_BTRFS_ASSERT is not set
9889 +# CONFIG_BTRFS_FS_REF_VERIFY is not set
9890 +CONFIG_NILFS2_FS=m
9891 +CONFIG_F2FS_FS=m
9892 +CONFIG_F2FS_STAT_FS=y
9893 +CONFIG_F2FS_FS_XATTR=y
9894 +CONFIG_F2FS_FS_POSIX_ACL=y
9895 +CONFIG_F2FS_FS_SECURITY=y
9896 +# CONFIG_F2FS_CHECK_FS is not set
9897 +# CONFIG_F2FS_FAULT_INJECTION is not set
9898 +CONFIG_F2FS_FS_COMPRESSION=y
9899 +CONFIG_F2FS_FS_LZO=y
9900 +CONFIG_F2FS_FS_LZ4=y
9901 +CONFIG_F2FS_FS_LZ4HC=y
9902 +CONFIG_F2FS_FS_ZSTD=y
9903 +CONFIG_F2FS_FS_LZORLE=y
9904 +CONFIG_ZONEFS_FS=m
9905 +CONFIG_FS_DAX=y
9906 +CONFIG_FS_DAX_PMD=y
9907 +CONFIG_FS_POSIX_ACL=y
9908 +CONFIG_EXPORTFS=y
9909 +CONFIG_EXPORTFS_BLOCK_OPS=y
9910 +CONFIG_FILE_LOCKING=y
9911 +CONFIG_MANDATORY_FILE_LOCKING=y
9912 +CONFIG_FS_ENCRYPTION=y
9913 +CONFIG_FS_ENCRYPTION_ALGS=y
9914 +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
9915 +CONFIG_FS_VERITY=y
9916 +# CONFIG_FS_VERITY_DEBUG is not set
9917 +CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
9918 +CONFIG_FSNOTIFY=y
9919 +CONFIG_DNOTIFY=y
9920 +CONFIG_INOTIFY_USER=y
9921 +CONFIG_FANOTIFY=y
9922 +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
9923 +CONFIG_QUOTA=y
9924 +CONFIG_QUOTA_NETLINK_INTERFACE=y
9925 +# CONFIG_PRINT_QUOTA_WARNING is not set
9926 +# CONFIG_QUOTA_DEBUG is not set
9927 +CONFIG_QUOTA_TREE=m
9928 +CONFIG_QFMT_V1=m
9929 +CONFIG_QFMT_V2=m
9930 +CONFIG_QUOTACTL=y
9931 +CONFIG_AUTOFS4_FS=m
9932 +CONFIG_AUTOFS_FS=m
9933 +CONFIG_FUSE_FS=y
9934 +CONFIG_CUSE=m
9935 +CONFIG_VIRTIO_FS=m
9936 +CONFIG_FUSE_DAX=y
9937 +CONFIG_OVERLAY_FS=m
9938 +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set
9939 +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y
9940 +# CONFIG_OVERLAY_FS_INDEX is not set
9941 +CONFIG_OVERLAY_FS_XINO_AUTO=y
9942 +# CONFIG_OVERLAY_FS_METACOPY is not set
9945 +# Caches
9947 +CONFIG_FSCACHE=m
9948 +CONFIG_FSCACHE_STATS=y
9949 +# CONFIG_FSCACHE_HISTOGRAM is not set
9950 +# CONFIG_FSCACHE_DEBUG is not set
9951 +# CONFIG_FSCACHE_OBJECT_LIST is not set
9952 +CONFIG_CACHEFILES=m
9953 +# CONFIG_CACHEFILES_DEBUG is not set
9954 +# CONFIG_CACHEFILES_HISTOGRAM is not set
9955 +# end of Caches
9958 +# CD-ROM/DVD Filesystems
9960 +CONFIG_ISO9660_FS=m
9961 +CONFIG_JOLIET=y
9962 +CONFIG_ZISOFS=y
9963 +CONFIG_UDF_FS=m
9964 +# end of CD-ROM/DVD Filesystems
9967 +# DOS/FAT/EXFAT/NT Filesystems
9969 +CONFIG_FAT_FS=y
9970 +CONFIG_MSDOS_FS=m
9971 +CONFIG_VFAT_FS=y
9972 +CONFIG_FAT_DEFAULT_CODEPAGE=437
9973 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
9974 +# CONFIG_FAT_DEFAULT_UTF8 is not set
9975 +CONFIG_EXFAT_FS=m
9976 +CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8"
9977 +# CONFIG_NTFS_FS is not set
9978 +CONFIG_NTFS3_FS=m
9979 +# CONFIG_NTFS3_64BIT_CLUSTER is not set
9980 +CONFIG_NTFS3_LZX_XPRESS=y
9981 +# CONFIG_NTFS3_FS_POSIX_ACL is not set
9982 +# end of DOS/FAT/EXFAT/NT Filesystems
9985 +# Pseudo filesystems
9987 +CONFIG_PROC_FS=y
9988 +CONFIG_PROC_KCORE=y
9989 +CONFIG_PROC_VMCORE=y
9990 +CONFIG_PROC_VMCORE_DEVICE_DUMP=y
9991 +CONFIG_PROC_SYSCTL=y
9992 +CONFIG_PROC_PAGE_MONITOR=y
9993 +CONFIG_PROC_CHILDREN=y
9994 +CONFIG_PROC_PID_ARCH_STATUS=y
9995 +CONFIG_PROC_CPU_RESCTRL=y
9996 +CONFIG_KERNFS=y
9997 +CONFIG_SYSFS=y
9998 +CONFIG_TMPFS=y
9999 +CONFIG_TMPFS_POSIX_ACL=y
10000 +CONFIG_TMPFS_XATTR=y
10001 +CONFIG_TMPFS_INODE64=y
10002 +CONFIG_HUGETLBFS=y
10003 +CONFIG_HUGETLB_PAGE=y
10004 +CONFIG_MEMFD_CREATE=y
10005 +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y
10006 +CONFIG_CONFIGFS_FS=y
10007 +CONFIG_EFIVAR_FS=y
10008 +# end of Pseudo filesystems
10010 +CONFIG_MISC_FILESYSTEMS=y
10011 +CONFIG_ORANGEFS_FS=m
10012 +CONFIG_ADFS_FS=m
10013 +# CONFIG_ADFS_FS_RW is not set
10014 +CONFIG_AFFS_FS=m
10015 +CONFIG_ECRYPT_FS=y
10016 +CONFIG_ECRYPT_FS_MESSAGING=y
10017 +CONFIG_HFS_FS=m
10018 +CONFIG_HFSPLUS_FS=m
10019 +CONFIG_BEFS_FS=m
10020 +# CONFIG_BEFS_DEBUG is not set
10021 +CONFIG_BFS_FS=m
10022 +CONFIG_EFS_FS=m
10023 +CONFIG_JFFS2_FS=m
10024 +CONFIG_JFFS2_FS_DEBUG=0
10025 +CONFIG_JFFS2_FS_WRITEBUFFER=y
10026 +# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
10027 +# CONFIG_JFFS2_SUMMARY is not set
10028 +CONFIG_JFFS2_FS_XATTR=y
10029 +CONFIG_JFFS2_FS_POSIX_ACL=y
10030 +CONFIG_JFFS2_FS_SECURITY=y
10031 +CONFIG_JFFS2_COMPRESSION_OPTIONS=y
10032 +CONFIG_JFFS2_ZLIB=y
10033 +CONFIG_JFFS2_LZO=y
10034 +CONFIG_JFFS2_RTIME=y
10035 +# CONFIG_JFFS2_RUBIN is not set
10036 +# CONFIG_JFFS2_CMODE_NONE is not set
10037 +# CONFIG_JFFS2_CMODE_PRIORITY is not set
10038 +# CONFIG_JFFS2_CMODE_SIZE is not set
10039 +CONFIG_JFFS2_CMODE_FAVOURLZO=y
10040 +CONFIG_UBIFS_FS=m
10041 +# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
10042 +CONFIG_UBIFS_FS_LZO=y
10043 +CONFIG_UBIFS_FS_ZLIB=y
10044 +CONFIG_UBIFS_FS_ZSTD=y
10045 +# CONFIG_UBIFS_ATIME_SUPPORT is not set
10046 +CONFIG_UBIFS_FS_XATTR=y
10047 +CONFIG_UBIFS_FS_SECURITY=y
10048 +CONFIG_UBIFS_FS_AUTHENTICATION=y
10049 +CONFIG_CRAMFS=m
10050 +CONFIG_CRAMFS_BLOCKDEV=y
10051 +CONFIG_CRAMFS_MTD=y
10052 +CONFIG_SQUASHFS=y
10053 +# CONFIG_SQUASHFS_FILE_CACHE is not set
10054 +CONFIG_SQUASHFS_FILE_DIRECT=y
10055 +CONFIG_SQUASHFS_DECOMP_SINGLE=y
10056 +# CONFIG_SQUASHFS_DECOMP_MULTI is not set
10057 +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set
10058 +CONFIG_SQUASHFS_XATTR=y
10059 +CONFIG_SQUASHFS_ZLIB=y
10060 +CONFIG_SQUASHFS_LZ4=y
10061 +CONFIG_SQUASHFS_LZO=y
10062 +CONFIG_SQUASHFS_XZ=y
10063 +CONFIG_SQUASHFS_ZSTD=y
10064 +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set
10065 +# CONFIG_SQUASHFS_EMBEDDED is not set
10066 +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
10067 +CONFIG_VXFS_FS=m
10068 +CONFIG_MINIX_FS=m
10069 +CONFIG_OMFS_FS=m
10070 +CONFIG_HPFS_FS=m
10071 +CONFIG_QNX4FS_FS=m
10072 +CONFIG_QNX6FS_FS=m
10073 +# CONFIG_QNX6FS_DEBUG is not set
10074 +CONFIG_ROMFS_FS=m
10075 +CONFIG_ROMFS_BACKED_BY_BLOCK=y
10076 +# CONFIG_ROMFS_BACKED_BY_MTD is not set
10077 +# CONFIG_ROMFS_BACKED_BY_BOTH is not set
10078 +CONFIG_ROMFS_ON_BLOCK=y
10079 +CONFIG_PSTORE=y
10080 +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240
10081 +# CONFIG_PSTORE_DEFLATE_COMPRESS is not set
10082 +# CONFIG_PSTORE_LZO_COMPRESS is not set
10083 +# CONFIG_PSTORE_LZ4_COMPRESS is not set
10084 +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set
10085 +# CONFIG_PSTORE_842_COMPRESS is not set
10086 +CONFIG_PSTORE_ZSTD_COMPRESS=y
10087 +CONFIG_PSTORE_COMPRESS=y
10088 +CONFIG_PSTORE_ZSTD_COMPRESS_DEFAULT=y
10089 +CONFIG_PSTORE_COMPRESS_DEFAULT="zstd"
10090 +# CONFIG_PSTORE_CONSOLE is not set
10091 +# CONFIG_PSTORE_PMSG is not set
10092 +CONFIG_PSTORE_RAM=m
10093 +CONFIG_PSTORE_ZONE=m
10094 +CONFIG_PSTORE_BLK=m
10095 +CONFIG_PSTORE_BLK_BLKDEV=""
10096 +CONFIG_PSTORE_BLK_KMSG_SIZE=64
10097 +CONFIG_PSTORE_BLK_MAX_REASON=2
10098 +CONFIG_SYSV_FS=m
10099 +CONFIG_UFS_FS=m
10100 +# CONFIG_UFS_FS_WRITE is not set
10101 +# CONFIG_UFS_DEBUG is not set
10102 +CONFIG_EROFS_FS=m
10103 +# CONFIG_EROFS_FS_DEBUG is not set
10104 +CONFIG_EROFS_FS_XATTR=y
10105 +CONFIG_EROFS_FS_POSIX_ACL=y
10106 +CONFIG_EROFS_FS_SECURITY=y
10107 +CONFIG_EROFS_FS_ZIP=y
10108 +CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT=1
10109 +CONFIG_VBOXSF_FS=m
10110 +CONFIG_NETWORK_FILESYSTEMS=y
10111 +CONFIG_NFS_FS=m
10112 +CONFIG_NFS_V2=m
10113 +CONFIG_NFS_V3=m
10114 +CONFIG_NFS_V3_ACL=y
10115 +CONFIG_NFS_V4=m
10116 +CONFIG_NFS_SWAP=y
10117 +CONFIG_NFS_V4_1=y
10118 +CONFIG_NFS_V4_2=y
10119 +CONFIG_PNFS_FILE_LAYOUT=m
10120 +CONFIG_PNFS_BLOCK=m
10121 +CONFIG_PNFS_FLEXFILE_LAYOUT=m
10122 +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org"
10123 +CONFIG_NFS_V4_1_MIGRATION=y
10124 +CONFIG_NFS_V4_SECURITY_LABEL=y
10125 +CONFIG_NFS_FSCACHE=y
10126 +# CONFIG_NFS_USE_LEGACY_DNS is not set
10127 +CONFIG_NFS_USE_KERNEL_DNS=y
10128 +CONFIG_NFS_DEBUG=y
10129 +CONFIG_NFS_DISABLE_UDP_SUPPORT=y
10130 +# CONFIG_NFS_V4_2_READ_PLUS is not set
10131 +CONFIG_NFSD=m
10132 +CONFIG_NFSD_V2_ACL=y
10133 +CONFIG_NFSD_V3=y
10134 +CONFIG_NFSD_V3_ACL=y
10135 +CONFIG_NFSD_V4=y
10136 +CONFIG_NFSD_PNFS=y
10137 +CONFIG_NFSD_BLOCKLAYOUT=y
10138 +CONFIG_NFSD_SCSILAYOUT=y
10139 +CONFIG_NFSD_FLEXFILELAYOUT=y
10140 +CONFIG_NFSD_V4_2_INTER_SSC=y
10141 +CONFIG_NFSD_V4_SECURITY_LABEL=y
10142 +CONFIG_GRACE_PERIOD=m
10143 +CONFIG_LOCKD=m
10144 +CONFIG_LOCKD_V4=y
10145 +CONFIG_NFS_ACL_SUPPORT=m
10146 +CONFIG_NFS_COMMON=y
10147 +CONFIG_NFS_V4_2_SSC_HELPER=m
10148 +CONFIG_SUNRPC=m
10149 +CONFIG_SUNRPC_GSS=m
10150 +CONFIG_SUNRPC_BACKCHANNEL=y
10151 +CONFIG_SUNRPC_SWAP=y
10152 +CONFIG_RPCSEC_GSS_KRB5=m
10153 +# CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES is not set
10154 +CONFIG_SUNRPC_DEBUG=y
10155 +CONFIG_SUNRPC_XPRT_RDMA=m
10156 +CONFIG_CEPH_FS=m
10157 +CONFIG_CEPH_FSCACHE=y
10158 +CONFIG_CEPH_FS_POSIX_ACL=y
10159 +CONFIG_CEPH_FS_SECURITY_LABEL=y
10160 +CONFIG_CIFS=m
10161 +# CONFIG_CIFS_STATS2 is not set
10162 +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y
10163 +CONFIG_CIFS_WEAK_PW_HASH=y
10164 +CONFIG_CIFS_UPCALL=y
10165 +CONFIG_CIFS_XATTR=y
10166 +CONFIG_CIFS_POSIX=y
10167 +CONFIG_CIFS_DEBUG=y
10168 +# CONFIG_CIFS_DEBUG2 is not set
10169 +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set
10170 +CONFIG_CIFS_DFS_UPCALL=y
10171 +CONFIG_CIFS_SWN_UPCALL=y
10172 +# CONFIG_CIFS_SMB_DIRECT is not set
10173 +CONFIG_CIFS_FSCACHE=y
10174 +CONFIG_CODA_FS=m
10175 +CONFIG_AFS_FS=m
10176 +# CONFIG_AFS_DEBUG is not set
10177 +CONFIG_AFS_FSCACHE=y
10178 +# CONFIG_AFS_DEBUG_CURSOR is not set
10179 +CONFIG_9P_FS=m
10180 +CONFIG_9P_FSCACHE=y
10181 +CONFIG_9P_FS_POSIX_ACL=y
10182 +CONFIG_9P_FS_SECURITY=y
10183 +CONFIG_NLS=y
10184 +CONFIG_NLS_DEFAULT="utf8"
10185 +CONFIG_NLS_CODEPAGE_437=y
10186 +CONFIG_NLS_CODEPAGE_737=m
10187 +CONFIG_NLS_CODEPAGE_775=m
10188 +CONFIG_NLS_CODEPAGE_850=m
10189 +CONFIG_NLS_CODEPAGE_852=m
10190 +CONFIG_NLS_CODEPAGE_855=m
10191 +CONFIG_NLS_CODEPAGE_857=m
10192 +CONFIG_NLS_CODEPAGE_860=m
10193 +CONFIG_NLS_CODEPAGE_861=m
10194 +CONFIG_NLS_CODEPAGE_862=m
10195 +CONFIG_NLS_CODEPAGE_863=m
10196 +CONFIG_NLS_CODEPAGE_864=m
10197 +CONFIG_NLS_CODEPAGE_865=m
10198 +CONFIG_NLS_CODEPAGE_866=m
10199 +CONFIG_NLS_CODEPAGE_869=m
10200 +CONFIG_NLS_CODEPAGE_936=m
10201 +CONFIG_NLS_CODEPAGE_950=m
10202 +CONFIG_NLS_CODEPAGE_932=m
10203 +CONFIG_NLS_CODEPAGE_949=m
10204 +CONFIG_NLS_CODEPAGE_874=m
10205 +CONFIG_NLS_ISO8859_8=m
10206 +CONFIG_NLS_CODEPAGE_1250=m
10207 +CONFIG_NLS_CODEPAGE_1251=m
10208 +CONFIG_NLS_ASCII=m
10209 +CONFIG_NLS_ISO8859_1=m
10210 +CONFIG_NLS_ISO8859_2=m
10211 +CONFIG_NLS_ISO8859_3=m
10212 +CONFIG_NLS_ISO8859_4=m
10213 +CONFIG_NLS_ISO8859_5=m
10214 +CONFIG_NLS_ISO8859_6=m
10215 +CONFIG_NLS_ISO8859_7=m
10216 +CONFIG_NLS_ISO8859_9=m
10217 +CONFIG_NLS_ISO8859_13=m
10218 +CONFIG_NLS_ISO8859_14=m
10219 +CONFIG_NLS_ISO8859_15=m
10220 +CONFIG_NLS_KOI8_R=m
10221 +CONFIG_NLS_KOI8_U=m
10222 +CONFIG_NLS_MAC_ROMAN=m
10223 +CONFIG_NLS_MAC_CELTIC=m
10224 +CONFIG_NLS_MAC_CENTEURO=m
10225 +CONFIG_NLS_MAC_CROATIAN=m
10226 +CONFIG_NLS_MAC_CYRILLIC=m
10227 +CONFIG_NLS_MAC_GAELIC=m
10228 +CONFIG_NLS_MAC_GREEK=m
10229 +CONFIG_NLS_MAC_ICELAND=m
10230 +CONFIG_NLS_MAC_INUIT=m
10231 +CONFIG_NLS_MAC_ROMANIAN=m
10232 +CONFIG_NLS_MAC_TURKISH=m
10233 +CONFIG_NLS_UTF8=m
10234 +CONFIG_DLM=m
10235 +# CONFIG_DLM_DEBUG is not set
10236 +CONFIG_UNICODE=y
10237 +# CONFIG_UNICODE_NORMALIZATION_SELFTEST is not set
10238 +CONFIG_IO_WQ=y
10239 +# end of File systems
10242 +# Security options
10244 +CONFIG_KEYS=y
10245 +CONFIG_KEYS_REQUEST_CACHE=y
10246 +CONFIG_PERSISTENT_KEYRINGS=y
10247 +CONFIG_TRUSTED_KEYS=y
10248 +CONFIG_ENCRYPTED_KEYS=y
10249 +CONFIG_KEY_DH_OPERATIONS=y
10250 +CONFIG_KEY_NOTIFICATIONS=y
10251 +CONFIG_SECURITY_DMESG_RESTRICT=y
10252 +CONFIG_SECURITY=y
10253 +CONFIG_SECURITYFS=y
10254 +CONFIG_SECURITY_NETWORK=y
10255 +CONFIG_PAGE_TABLE_ISOLATION=y
10256 +CONFIG_SECURITY_INFINIBAND=y
10257 +CONFIG_SECURITY_NETWORK_XFRM=y
10258 +CONFIG_SECURITY_PATH=y
10259 +CONFIG_INTEL_TXT=y
10260 +CONFIG_LSM_MMAP_MIN_ADDR=0
10261 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y
10262 +CONFIG_HARDENED_USERCOPY=y
10263 +CONFIG_HARDENED_USERCOPY_FALLBACK=y
10264 +# CONFIG_HARDENED_USERCOPY_PAGESPAN is not set
10265 +CONFIG_FORTIFY_SOURCE=y
10266 +# CONFIG_STATIC_USERMODEHELPER is not set
10267 +CONFIG_SECURITY_SELINUX=y
10268 +CONFIG_SECURITY_SELINUX_BOOTPARAM=y
10269 +# CONFIG_SECURITY_SELINUX_DISABLE is not set
10270 +CONFIG_SECURITY_SELINUX_DEVELOP=y
10271 +CONFIG_SECURITY_SELINUX_AVC_STATS=y
10272 +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
10273 +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9
10274 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256
10275 +CONFIG_SECURITY_SMACK=y
10276 +# CONFIG_SECURITY_SMACK_BRINGUP is not set
10277 +CONFIG_SECURITY_SMACK_NETFILTER=y
10278 +CONFIG_SECURITY_SMACK_APPEND_SIGNALS=y
10279 +CONFIG_SECURITY_TOMOYO=y
10280 +CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY=2048
10281 +CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG=1024
10282 +# CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER is not set
10283 +CONFIG_SECURITY_TOMOYO_POLICY_LOADER="/sbin/tomoyo-init"
10284 +CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER="/sbin/init"
10285 +# CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING is not set
10286 +CONFIG_SECURITY_APPARMOR=y
10287 +CONFIG_SECURITY_APPARMOR_HASH=y
10288 +CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y
10289 +# CONFIG_SECURITY_APPARMOR_DEBUG is not set
10290 +# CONFIG_SECURITY_LOADPIN is not set
10291 +CONFIG_SECURITY_YAMA=y
10292 +CONFIG_SECURITY_SAFESETID=y
10293 +CONFIG_SECURITY_LOCKDOWN_LSM=y
10294 +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
10295 +CONFIG_LOCK_DOWN_KERNEL_FORCE_NONE=y
10296 +# CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY is not set
10297 +# CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY is not set
10298 +CONFIG_INTEGRITY=y
10299 +CONFIG_INTEGRITY_SIGNATURE=y
10300 +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
10301 +CONFIG_INTEGRITY_TRUSTED_KEYRING=y
10302 +CONFIG_INTEGRITY_PLATFORM_KEYRING=y
10303 +CONFIG_LOAD_UEFI_KEYS=y
10304 +CONFIG_INTEGRITY_AUDIT=y
10305 +CONFIG_IMA=y
10306 +CONFIG_IMA_MEASURE_PCR_IDX=10
10307 +CONFIG_IMA_LSM_RULES=y
10308 +# CONFIG_IMA_TEMPLATE is not set
10309 +CONFIG_IMA_NG_TEMPLATE=y
10310 +# CONFIG_IMA_SIG_TEMPLATE is not set
10311 +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng"
10312 +CONFIG_IMA_DEFAULT_HASH_SHA1=y
10313 +# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set
10314 +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set
10315 +CONFIG_IMA_DEFAULT_HASH="sha1"
10316 +# CONFIG_IMA_WRITE_POLICY is not set
10317 +# CONFIG_IMA_READ_POLICY is not set
10318 +CONFIG_IMA_APPRAISE=y
10319 +# CONFIG_IMA_ARCH_POLICY is not set
10320 +# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set
10321 +CONFIG_IMA_APPRAISE_BOOTPARAM=y
10322 +CONFIG_IMA_APPRAISE_MODSIG=y
10323 +CONFIG_IMA_TRUSTED_KEYRING=y
10324 +# CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY is not set
10325 +# CONFIG_IMA_BLACKLIST_KEYRING is not set
10326 +# CONFIG_IMA_LOAD_X509 is not set
10327 +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y
10328 +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y
10329 +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set
10330 +CONFIG_EVM=y
10331 +CONFIG_EVM_ATTR_FSUUID=y
10332 +CONFIG_EVM_EXTRA_SMACK_XATTRS=y
10333 +CONFIG_EVM_ADD_XATTRS=y
10334 +# CONFIG_EVM_LOAD_X509 is not set
10335 +# CONFIG_DEFAULT_SECURITY_SELINUX is not set
10336 +# CONFIG_DEFAULT_SECURITY_SMACK is not set
10337 +# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
10338 +CONFIG_DEFAULT_SECURITY_APPARMOR=y
10339 +# CONFIG_DEFAULT_SECURITY_DAC is not set
10340 +CONFIG_LSM="lockdown,yama,integrity,apparmor"
10343 +# Kernel hardening options
10347 +# Memory initialization
10349 +CONFIG_INIT_STACK_NONE=y
10350 +CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
10351 +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set
10352 +# end of Memory initialization
10353 +# end of Kernel hardening options
10354 +# end of Security options
10356 +CONFIG_XOR_BLOCKS=m
10357 +CONFIG_ASYNC_CORE=m
10358 +CONFIG_ASYNC_MEMCPY=m
10359 +CONFIG_ASYNC_XOR=m
10360 +CONFIG_ASYNC_PQ=m
10361 +CONFIG_ASYNC_RAID6_RECOV=m
10362 +CONFIG_CRYPTO=y
10365 +# Crypto core or helper
10367 +CONFIG_CRYPTO_ALGAPI=y
10368 +CONFIG_CRYPTO_ALGAPI2=y
10369 +CONFIG_CRYPTO_AEAD=y
10370 +CONFIG_CRYPTO_AEAD2=y
10371 +CONFIG_CRYPTO_SKCIPHER=y
10372 +CONFIG_CRYPTO_SKCIPHER2=y
10373 +CONFIG_CRYPTO_HASH=y
10374 +CONFIG_CRYPTO_HASH2=y
10375 +CONFIG_CRYPTO_RNG=y
10376 +CONFIG_CRYPTO_RNG2=y
10377 +CONFIG_CRYPTO_RNG_DEFAULT=y
10378 +CONFIG_CRYPTO_AKCIPHER2=y
10379 +CONFIG_CRYPTO_AKCIPHER=y
10380 +CONFIG_CRYPTO_KPP2=y
10381 +CONFIG_CRYPTO_KPP=y
10382 +CONFIG_CRYPTO_ACOMP2=y
10383 +CONFIG_CRYPTO_MANAGER=y
10384 +CONFIG_CRYPTO_MANAGER2=y
10385 +CONFIG_CRYPTO_USER=m
10386 +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
10387 +CONFIG_CRYPTO_GF128MUL=y
10388 +CONFIG_CRYPTO_NULL=y
10389 +CONFIG_CRYPTO_NULL2=y
10390 +CONFIG_CRYPTO_PCRYPT=m
10391 +CONFIG_CRYPTO_CRYPTD=m
10392 +CONFIG_CRYPTO_AUTHENC=m
10393 +CONFIG_CRYPTO_TEST=m
10394 +CONFIG_CRYPTO_SIMD=m
10395 +CONFIG_CRYPTO_ENGINE=m
10398 +# Public-key cryptography
10400 +CONFIG_CRYPTO_RSA=y
10401 +CONFIG_CRYPTO_DH=y
10402 +CONFIG_CRYPTO_ECC=m
10403 +CONFIG_CRYPTO_ECDH=m
10404 +CONFIG_CRYPTO_ECRDSA=m
10405 +CONFIG_CRYPTO_SM2=m
10406 +CONFIG_CRYPTO_CURVE25519=m
10407 +CONFIG_CRYPTO_CURVE25519_X86=m
10410 +# Authenticated Encryption with Associated Data
10412 +CONFIG_CRYPTO_CCM=m
10413 +CONFIG_CRYPTO_GCM=y
10414 +CONFIG_CRYPTO_CHACHA20POLY1305=m
10415 +CONFIG_CRYPTO_AEGIS128=m
10416 +CONFIG_CRYPTO_AEGIS128_AESNI_SSE2=m
10417 +CONFIG_CRYPTO_SEQIV=y
10418 +CONFIG_CRYPTO_ECHAINIV=m
10421 +# Block modes
10423 +CONFIG_CRYPTO_CBC=y
10424 +CONFIG_CRYPTO_CFB=m
10425 +CONFIG_CRYPTO_CTR=y
10426 +CONFIG_CRYPTO_CTS=y
10427 +CONFIG_CRYPTO_ECB=y
10428 +CONFIG_CRYPTO_LRW=m
10429 +CONFIG_CRYPTO_OFB=m
10430 +CONFIG_CRYPTO_PCBC=m
10431 +CONFIG_CRYPTO_XTS=y
10432 +CONFIG_CRYPTO_KEYWRAP=m
10433 +CONFIG_CRYPTO_NHPOLY1305=m
10434 +CONFIG_CRYPTO_NHPOLY1305_SSE2=m
10435 +CONFIG_CRYPTO_NHPOLY1305_AVX2=m
10436 +CONFIG_CRYPTO_ADIANTUM=m
10437 +CONFIG_CRYPTO_ESSIV=m
10440 +# Hash modes
10442 +CONFIG_CRYPTO_CMAC=m
10443 +CONFIG_CRYPTO_HMAC=y
10444 +CONFIG_CRYPTO_XCBC=m
10445 +CONFIG_CRYPTO_VMAC=m
10448 +# Digest
10450 +CONFIG_CRYPTO_CRC32C=y
10451 +CONFIG_CRYPTO_CRC32C_INTEL=y
10452 +CONFIG_CRYPTO_CRC32=m
10453 +CONFIG_CRYPTO_CRC32_PCLMUL=m
10454 +CONFIG_CRYPTO_XXHASH=m
10455 +CONFIG_CRYPTO_BLAKE2B=m
10456 +CONFIG_CRYPTO_BLAKE2S=m
10457 +CONFIG_CRYPTO_BLAKE2S_X86=m
10458 +CONFIG_CRYPTO_CRCT10DIF=y
10459 +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m
10460 +CONFIG_CRYPTO_GHASH=y
10461 +CONFIG_CRYPTO_POLY1305=m
10462 +CONFIG_CRYPTO_POLY1305_X86_64=m
10463 +CONFIG_CRYPTO_MD4=m
10464 +CONFIG_CRYPTO_MD5=y
10465 +CONFIG_CRYPTO_MICHAEL_MIC=m
10466 +CONFIG_CRYPTO_RMD160=m
10467 +CONFIG_CRYPTO_SHA1=y
10468 +CONFIG_CRYPTO_SHA1_SSSE3=m
10469 +CONFIG_CRYPTO_SHA256_SSSE3=m
10470 +CONFIG_CRYPTO_SHA512_SSSE3=m
10471 +CONFIG_CRYPTO_SHA256=y
10472 +CONFIG_CRYPTO_SHA512=y
10473 +CONFIG_CRYPTO_SHA3=m
10474 +CONFIG_CRYPTO_SM3=m
10475 +CONFIG_CRYPTO_STREEBOG=m
10476 +CONFIG_CRYPTO_WP512=m
10477 +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m
10480 +# Ciphers
10482 +CONFIG_CRYPTO_AES=y
10483 +CONFIG_CRYPTO_AES_TI=m
10484 +CONFIG_CRYPTO_AES_NI_INTEL=m
10485 +CONFIG_CRYPTO_BLOWFISH=m
10486 +CONFIG_CRYPTO_BLOWFISH_COMMON=m
10487 +CONFIG_CRYPTO_BLOWFISH_X86_64=m
10488 +CONFIG_CRYPTO_CAMELLIA=m
10489 +CONFIG_CRYPTO_CAMELLIA_X86_64=m
10490 +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m
10491 +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m
10492 +CONFIG_CRYPTO_CAST_COMMON=m
10493 +CONFIG_CRYPTO_CAST5=m
10494 +CONFIG_CRYPTO_CAST5_AVX_X86_64=m
10495 +CONFIG_CRYPTO_CAST6=m
10496 +CONFIG_CRYPTO_CAST6_AVX_X86_64=m
10497 +CONFIG_CRYPTO_DES=m
10498 +CONFIG_CRYPTO_DES3_EDE_X86_64=m
10499 +CONFIG_CRYPTO_FCRYPT=m
10500 +CONFIG_CRYPTO_CHACHA20=m
10501 +CONFIG_CRYPTO_CHACHA20_X86_64=m
10502 +CONFIG_CRYPTO_SERPENT=m
10503 +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m
10504 +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m
10505 +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m
10506 +CONFIG_CRYPTO_SM4=m
10507 +CONFIG_CRYPTO_TWOFISH=m
10508 +CONFIG_CRYPTO_TWOFISH_COMMON=m
10509 +CONFIG_CRYPTO_TWOFISH_X86_64=m
10510 +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m
10511 +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m
10514 +# Compression
10516 +CONFIG_CRYPTO_DEFLATE=y
10517 +CONFIG_CRYPTO_LZO=y
10518 +CONFIG_CRYPTO_842=m
10519 +CONFIG_CRYPTO_LZ4=y
10520 +CONFIG_CRYPTO_LZ4HC=m
10521 +CONFIG_CRYPTO_ZSTD=y
10524 +# Random Number Generation
10526 +CONFIG_CRYPTO_ANSI_CPRNG=m
10527 +CONFIG_CRYPTO_DRBG_MENU=y
10528 +CONFIG_CRYPTO_DRBG_HMAC=y
10529 +CONFIG_CRYPTO_DRBG_HASH=y
10530 +CONFIG_CRYPTO_DRBG_CTR=y
10531 +CONFIG_CRYPTO_DRBG=y
10532 +CONFIG_CRYPTO_JITTERENTROPY=y
10533 +CONFIG_CRYPTO_USER_API=m
10534 +CONFIG_CRYPTO_USER_API_HASH=m
10535 +CONFIG_CRYPTO_USER_API_SKCIPHER=m
10536 +CONFIG_CRYPTO_USER_API_RNG=m
10537 +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set
10538 +CONFIG_CRYPTO_USER_API_AEAD=m
10539 +# CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE is not set
10540 +CONFIG_CRYPTO_STATS=y
10541 +CONFIG_CRYPTO_HASH_INFO=y
10544 +# Crypto library routines
10546 +CONFIG_CRYPTO_LIB_AES=y
10547 +CONFIG_CRYPTO_LIB_ARC4=m
10548 +CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S=m
10549 +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=m
10550 +CONFIG_CRYPTO_LIB_BLAKE2S=m
10551 +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m
10552 +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m
10553 +CONFIG_CRYPTO_LIB_CHACHA=m
10554 +CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519=m
10555 +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m
10556 +CONFIG_CRYPTO_LIB_CURVE25519=m
10557 +CONFIG_CRYPTO_LIB_DES=m
10558 +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11
10559 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m
10560 +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m
10561 +CONFIG_CRYPTO_LIB_POLY1305=m
10562 +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
10563 +CONFIG_CRYPTO_LIB_SHA256=y
10564 +CONFIG_CRYPTO_HW=y
10565 +CONFIG_CRYPTO_DEV_PADLOCK=y
10566 +CONFIG_CRYPTO_DEV_PADLOCK_AES=m
10567 +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m
10568 +CONFIG_CRYPTO_DEV_ATMEL_I2C=m
10569 +CONFIG_CRYPTO_DEV_ATMEL_ECC=m
10570 +CONFIG_CRYPTO_DEV_ATMEL_SHA204A=m
10571 +CONFIG_CRYPTO_DEV_CCP=y
10572 +CONFIG_CRYPTO_DEV_CCP_DD=m
10573 +CONFIG_CRYPTO_DEV_SP_CCP=y
10574 +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m
10575 +CONFIG_CRYPTO_DEV_SP_PSP=y
10576 +# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set
10577 +CONFIG_CRYPTO_DEV_QAT=m
10578 +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m
10579 +CONFIG_CRYPTO_DEV_QAT_C3XXX=m
10580 +CONFIG_CRYPTO_DEV_QAT_C62X=m
10581 +CONFIG_CRYPTO_DEV_QAT_4XXX=m
10582 +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m
10583 +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m
10584 +CONFIG_CRYPTO_DEV_QAT_C62XVF=m
10585 +CONFIG_CRYPTO_DEV_NITROX=m
10586 +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m
10587 +CONFIG_CRYPTO_DEV_CHELSIO=m
10588 +CONFIG_CRYPTO_DEV_VIRTIO=m
10589 +CONFIG_CRYPTO_DEV_SAFEXCEL=m
10590 +CONFIG_CRYPTO_DEV_AMLOGIC_GXL=m
10591 +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG is not set
10592 +CONFIG_ASYMMETRIC_KEY_TYPE=y
10593 +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
10594 +CONFIG_ASYMMETRIC_TPM_KEY_SUBTYPE=m
10595 +CONFIG_X509_CERTIFICATE_PARSER=y
10596 +CONFIG_PKCS8_PRIVATE_KEY_PARSER=m
10597 +CONFIG_TPM_KEY_PARSER=m
10598 +CONFIG_PKCS7_MESSAGE_PARSER=y
10599 +CONFIG_PKCS7_TEST_KEY=m
10600 +CONFIG_SIGNED_PE_FILE_VERIFICATION=y
10603 +# Certificates for signature checking
10605 +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem"
10606 +CONFIG_SYSTEM_TRUSTED_KEYRING=y
10607 +CONFIG_SYSTEM_TRUSTED_KEYS=""
10608 +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y
10609 +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=4096
10610 +CONFIG_SECONDARY_TRUSTED_KEYRING=y
10611 +CONFIG_SYSTEM_BLACKLIST_KEYRING=y
10612 +CONFIG_SYSTEM_BLACKLIST_HASH_LIST=""
10613 +# end of Certificates for signature checking
10616 +# Library routines
10618 +CONFIG_RAID6_PQ=m
10619 +CONFIG_RAID6_PQ_BENCHMARK=y
10620 +CONFIG_LINEAR_RANGES=y
10621 +CONFIG_PACKING=y
10622 +CONFIG_BITREVERSE=y
10623 +CONFIG_GENERIC_STRNCPY_FROM_USER=y
10624 +CONFIG_GENERIC_STRNLEN_USER=y
10625 +CONFIG_GENERIC_NET_UTILS=y
10626 +CONFIG_GENERIC_FIND_FIRST_BIT=y
10627 +CONFIG_CORDIC=m
10628 +# CONFIG_PRIME_NUMBERS is not set
10629 +CONFIG_RATIONAL=y
10630 +CONFIG_GENERIC_PCI_IOMAP=y
10631 +CONFIG_GENERIC_IOMAP=y
10632 +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
10633 +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y
10634 +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y
10635 +CONFIG_CRC_CCITT=y
10636 +CONFIG_CRC16=y
10637 +CONFIG_CRC_T10DIF=y
10638 +CONFIG_CRC_ITU_T=m
10639 +CONFIG_CRC32=y
10640 +# CONFIG_CRC32_SELFTEST is not set
10641 +CONFIG_CRC32_SLICEBY8=y
10642 +# CONFIG_CRC32_SLICEBY4 is not set
10643 +# CONFIG_CRC32_SARWATE is not set
10644 +# CONFIG_CRC32_BIT is not set
10645 +CONFIG_CRC64=m
10646 +CONFIG_CRC4=m
10647 +CONFIG_CRC7=m
10648 +CONFIG_LIBCRC32C=m
10649 +CONFIG_CRC8=m
10650 +CONFIG_XXHASH=y
10651 +# CONFIG_RANDOM32_SELFTEST is not set
10652 +CONFIG_842_COMPRESS=m
10653 +CONFIG_842_DECOMPRESS=m
10654 +CONFIG_ZLIB_INFLATE=y
10655 +CONFIG_ZLIB_DEFLATE=y
10656 +CONFIG_LZO_COMPRESS=y
10657 +CONFIG_LZO_DECOMPRESS=y
10658 +CONFIG_LZ4_COMPRESS=y
10659 +CONFIG_LZ4HC_COMPRESS=y
10660 +CONFIG_LZ4_DECOMPRESS=y
10661 +CONFIG_ZSTD_COMPRESS=y
10662 +CONFIG_ZSTD_DECOMPRESS=y
10663 +CONFIG_XZ_DEC=y
10664 +CONFIG_XZ_DEC_X86=y
10665 +CONFIG_XZ_DEC_POWERPC=y
10666 +CONFIG_XZ_DEC_IA64=y
10667 +CONFIG_XZ_DEC_ARM=y
10668 +CONFIG_XZ_DEC_ARMTHUMB=y
10669 +CONFIG_XZ_DEC_SPARC=y
10670 +CONFIG_XZ_DEC_BCJ=y
10671 +CONFIG_XZ_DEC_TEST=m
10672 +CONFIG_DECOMPRESS_GZIP=y
10673 +CONFIG_DECOMPRESS_BZIP2=y
10674 +CONFIG_DECOMPRESS_LZMA=y
10675 +CONFIG_DECOMPRESS_XZ=y
10676 +CONFIG_DECOMPRESS_LZO=y
10677 +CONFIG_DECOMPRESS_LZ4=y
10678 +CONFIG_DECOMPRESS_ZSTD=y
10679 +CONFIG_GENERIC_ALLOCATOR=y
10680 +CONFIG_REED_SOLOMON=m
10681 +CONFIG_REED_SOLOMON_ENC8=y
10682 +CONFIG_REED_SOLOMON_DEC8=y
10683 +CONFIG_REED_SOLOMON_DEC16=y
10684 +CONFIG_BCH=m
10685 +CONFIG_TEXTSEARCH=y
10686 +CONFIG_TEXTSEARCH_KMP=m
10687 +CONFIG_TEXTSEARCH_BM=m
10688 +CONFIG_TEXTSEARCH_FSM=m
10689 +CONFIG_BTREE=y
10690 +CONFIG_INTERVAL_TREE=y
10691 +CONFIG_XARRAY_MULTI=y
10692 +CONFIG_ASSOCIATIVE_ARRAY=y
10693 +CONFIG_HAS_IOMEM=y
10694 +CONFIG_HAS_IOPORT_MAP=y
10695 +CONFIG_HAS_DMA=y
10696 +CONFIG_DMA_OPS=y
10697 +CONFIG_NEED_SG_DMA_LENGTH=y
10698 +CONFIG_NEED_DMA_MAP_STATE=y
10699 +CONFIG_ARCH_DMA_ADDR_T_64BIT=y
10700 +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y
10701 +CONFIG_SWIOTLB=y
10702 +CONFIG_DMA_COHERENT_POOL=y
10703 +# CONFIG_DMA_API_DEBUG is not set
10704 +# CONFIG_DMA_MAP_BENCHMARK is not set
10705 +CONFIG_SGL_ALLOC=y
10706 +CONFIG_IOMMU_HELPER=y
10707 +CONFIG_CHECK_SIGNATURE=y
10708 +CONFIG_CPU_RMAP=y
10709 +CONFIG_DQL=y
10710 +CONFIG_GLOB=y
10711 +# CONFIG_GLOB_SELFTEST is not set
10712 +CONFIG_NLATTR=y
10713 +CONFIG_LRU_CACHE=m
10714 +CONFIG_CLZ_TAB=y
10715 +CONFIG_IRQ_POLL=y
10716 +CONFIG_MPILIB=y
10717 +CONFIG_SIGNATURE=y
10718 +CONFIG_DIMLIB=y
10719 +CONFIG_OID_REGISTRY=y
10720 +CONFIG_UCS2_STRING=y
10721 +CONFIG_HAVE_GENERIC_VDSO=y
10722 +CONFIG_GENERIC_GETTIMEOFDAY=y
10723 +CONFIG_GENERIC_VDSO_TIME_NS=y
10724 +CONFIG_FONT_SUPPORT=y
10725 +CONFIG_FONTS=y
10726 +CONFIG_FONT_8x8=y
10727 +CONFIG_FONT_8x16=y
10728 +# CONFIG_FONT_6x11 is not set
10729 +# CONFIG_FONT_7x14 is not set
10730 +# CONFIG_FONT_PEARL_8x8 is not set
10731 +CONFIG_FONT_ACORN_8x8=y
10732 +# CONFIG_FONT_MINI_4x6 is not set
10733 +CONFIG_FONT_6x10=y
10734 +# CONFIG_FONT_10x18 is not set
10735 +# CONFIG_FONT_SUN8x16 is not set
10736 +# CONFIG_FONT_SUN12x22 is not set
10737 +CONFIG_FONT_TER16x32=y
10738 +# CONFIG_FONT_6x8 is not set
10739 +CONFIG_SG_POOL=y
10740 +CONFIG_ARCH_HAS_PMEM_API=y
10741 +CONFIG_MEMREGION=y
10742 +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y
10743 +CONFIG_ARCH_HAS_COPY_MC=y
10744 +CONFIG_ARCH_STACKWALK=y
10745 +CONFIG_SBITMAP=y
10746 +CONFIG_PARMAN=m
10747 +CONFIG_OBJAGG=m
10748 +# CONFIG_STRING_SELFTEST is not set
10749 +# end of Library routines
10751 +CONFIG_PLDMFW=y
10754 +# Kernel hacking
10758 +# printk and dmesg options
10760 +CONFIG_PRINTK_TIME=y
10761 +# CONFIG_PRINTK_CALLER is not set
10762 +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7
10763 +CONFIG_CONSOLE_LOGLEVEL_QUIET=3
10764 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4
10765 +CONFIG_BOOT_PRINTK_DELAY=y
10766 +CONFIG_DYNAMIC_DEBUG=y
10767 +CONFIG_DYNAMIC_DEBUG_CORE=y
10768 +# CONFIG_SYMBOLIC_ERRNAME is not set
10769 +# CONFIG_DEBUG_BUGVERBOSE is not set
10770 +# end of printk and dmesg options
10773 +# Compile-time checks and compiler options
10775 +# CONFIG_DEBUG_INFO is not set
10776 +CONFIG_FRAME_WARN=1024
10777 +# CONFIG_STRIP_ASM_SYMS is not set
10778 +# CONFIG_READABLE_ASM is not set
10779 +# CONFIG_HEADERS_INSTALL is not set
10780 +# CONFIG_DEBUG_SECTION_MISMATCH is not set
10781 +CONFIG_SECTION_MISMATCH_WARN_ONLY=y
10782 +# CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_32B is not set
10783 +CONFIG_STACK_VALIDATION=y
10784 +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
10785 +# end of Compile-time checks and compiler options
10788 +# Generic Kernel Debugging Instruments
10790 +CONFIG_MAGIC_SYSRQ=y
10791 +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x01b6
10792 +CONFIG_MAGIC_SYSRQ_SERIAL=y
10793 +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE=""
10794 +CONFIG_DEBUG_FS=y
10795 +CONFIG_DEBUG_FS_ALLOW_ALL=y
10796 +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set
10797 +# CONFIG_DEBUG_FS_ALLOW_NONE is not set
10798 +CONFIG_HAVE_ARCH_KGDB=y
10799 +CONFIG_KGDB=y
10800 +CONFIG_KGDB_HONOUR_BLOCKLIST=y
10801 +CONFIG_KGDB_SERIAL_CONSOLE=y
10802 +# CONFIG_KGDB_TESTS is not set
10803 +CONFIG_KGDB_LOW_LEVEL_TRAP=y
10804 +CONFIG_KGDB_KDB=y
10805 +CONFIG_KDB_DEFAULT_ENABLE=0x1
10806 +CONFIG_KDB_KEYBOARD=y
10807 +CONFIG_KDB_CONTINUE_CATASTROPHIC=0
10808 +CONFIG_ARCH_HAS_EARLY_DEBUG=y
10809 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
10810 +# CONFIG_UBSAN is not set
10811 +CONFIG_HAVE_ARCH_KCSAN=y
10812 +CONFIG_HAVE_KCSAN_COMPILER=y
10813 +# CONFIG_KCSAN is not set
10814 +# end of Generic Kernel Debugging Instruments
10816 +CONFIG_DEBUG_KERNEL=y
10817 +CONFIG_DEBUG_MISC=y
10820 +# Memory Debugging
10822 +# CONFIG_PAGE_EXTENSION is not set
10823 +# CONFIG_DEBUG_PAGEALLOC is not set
10824 +# CONFIG_PAGE_OWNER is not set
10825 +CONFIG_PAGE_POISONING=y
10826 +# CONFIG_DEBUG_RODATA_TEST is not set
10827 +CONFIG_ARCH_HAS_DEBUG_WX=y
10828 +CONFIG_DEBUG_WX=y
10829 +CONFIG_GENERIC_PTDUMP=y
10830 +CONFIG_PTDUMP_CORE=y
10831 +# CONFIG_PTDUMP_DEBUGFS is not set
10832 +# CONFIG_DEBUG_OBJECTS is not set
10833 +# CONFIG_SLUB_DEBUG_ON is not set
10834 +# CONFIG_SLUB_STATS is not set
10835 +CONFIG_HAVE_DEBUG_KMEMLEAK=y
10836 +# CONFIG_DEBUG_KMEMLEAK is not set
10837 +# CONFIG_DEBUG_STACK_USAGE is not set
10838 +CONFIG_SCHED_STACK_END_CHECK=y
10839 +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y
10840 +# CONFIG_DEBUG_VM is not set
10841 +# CONFIG_DEBUG_VM_PGTABLE is not set
10842 +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y
10843 +# CONFIG_DEBUG_VIRTUAL is not set
10844 +# CONFIG_DEBUG_MEMORY_INIT is not set
10845 +CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
10846 +# CONFIG_DEBUG_PER_CPU_MAPS is not set
10847 +CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP=y
10848 +# CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP is not set
10849 +CONFIG_HAVE_ARCH_KASAN=y
10850 +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y
10851 +CONFIG_CC_HAS_KASAN_GENERIC=y
10852 +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y
10853 +# CONFIG_KASAN is not set
10854 +CONFIG_HAVE_ARCH_KFENCE=y
10855 +CONFIG_KFENCE=y
10856 +CONFIG_KFENCE_STATIC_KEYS=y
10857 +CONFIG_KFENCE_SAMPLE_INTERVAL=0
10858 +CONFIG_KFENCE_NUM_OBJECTS=255
10859 +CONFIG_KFENCE_STRESS_TEST_FAULTS=0
10860 +# end of Memory Debugging
10862 +# CONFIG_DEBUG_SHIRQ is not set
10865 +# Debug Oops, Lockups and Hangs
10867 +# CONFIG_PANIC_ON_OOPS is not set
10868 +CONFIG_PANIC_ON_OOPS_VALUE=0
10869 +CONFIG_PANIC_TIMEOUT=0
10870 +CONFIG_LOCKUP_DETECTOR=y
10871 +CONFIG_SOFTLOCKUP_DETECTOR=y
10872 +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
10873 +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
10874 +CONFIG_HARDLOCKUP_DETECTOR_PERF=y
10875 +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y
10876 +CONFIG_HARDLOCKUP_DETECTOR=y
10877 +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
10878 +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
10879 +CONFIG_DETECT_HUNG_TASK=y
10880 +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
10881 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
10882 +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
10883 +# CONFIG_WQ_WATCHDOG is not set
10884 +# CONFIG_TEST_LOCKUP is not set
10885 +# end of Debug Oops, Lockups and Hangs
10888 +# Scheduler Debugging
10890 +CONFIG_SCHED_DEBUG=y
10891 +CONFIG_SCHED_INFO=y
10892 +CONFIG_SCHEDSTATS=y
10893 +# end of Scheduler Debugging
10895 +# CONFIG_DEBUG_TIMEKEEPING is not set
10896 +# CONFIG_DEBUG_PREEMPT is not set
10899 +# Lock Debugging (spinlocks, mutexes, etc...)
10901 +CONFIG_LOCK_DEBUGGING_SUPPORT=y
10902 +# CONFIG_PROVE_LOCKING is not set
10903 +# CONFIG_LOCK_STAT is not set
10904 +# CONFIG_DEBUG_RT_MUTEXES is not set
10905 +# CONFIG_DEBUG_SPINLOCK is not set
10906 +# CONFIG_DEBUG_MUTEXES is not set
10907 +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set
10908 +# CONFIG_DEBUG_RWSEMS is not set
10909 +# CONFIG_DEBUG_LOCK_ALLOC is not set
10910 +# CONFIG_DEBUG_ATOMIC_SLEEP is not set
10911 +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
10912 +# CONFIG_LOCK_TORTURE_TEST is not set
10913 +# CONFIG_WW_MUTEX_SELFTEST is not set
10914 +# CONFIG_SCF_TORTURE_TEST is not set
10915 +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set
10916 +# end of Lock Debugging (spinlocks, mutexes, etc...)
10918 +# CONFIG_DEBUG_IRQFLAGS is not set
10919 +CONFIG_STACKTRACE=y
10920 +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set
10921 +# CONFIG_DEBUG_KOBJECT is not set
10924 +# Debug kernel data structures
10926 +# CONFIG_DEBUG_LIST is not set
10927 +# CONFIG_DEBUG_PLIST is not set
10928 +# CONFIG_DEBUG_SG is not set
10929 +# CONFIG_DEBUG_NOTIFIERS is not set
10930 +# CONFIG_BUG_ON_DATA_CORRUPTION is not set
10931 +# end of Debug kernel data structures
10933 +# CONFIG_DEBUG_CREDENTIALS is not set
10936 +# RCU Debugging
10938 +# CONFIG_RCU_SCALE_TEST is not set
10939 +# CONFIG_RCU_TORTURE_TEST is not set
10940 +# CONFIG_RCU_REF_SCALE_TEST is not set
10941 +CONFIG_RCU_CPU_STALL_TIMEOUT=60
10942 +# CONFIG_RCU_TRACE is not set
10943 +# CONFIG_RCU_EQS_DEBUG is not set
10944 +# CONFIG_RCU_STRICT_GRACE_PERIOD is not set
10945 +# end of RCU Debugging
10947 +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set
10948 +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
10949 +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set
10950 +CONFIG_LATENCYTOP=y
10951 +CONFIG_USER_STACKTRACE_SUPPORT=y
10952 +CONFIG_HAVE_FUNCTION_TRACER=y
10953 +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
10954 +CONFIG_HAVE_DYNAMIC_FTRACE=y
10955 +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
10956 +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y
10957 +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y
10958 +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
10959 +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
10960 +CONFIG_HAVE_FENTRY=y
10961 +CONFIG_HAVE_OBJTOOL_MCOUNT=y
10962 +CONFIG_HAVE_C_RECORDMCOUNT=y
10963 +CONFIG_TRACING_SUPPORT=y
10964 +# CONFIG_FTRACE is not set
10965 +# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
10966 +CONFIG_SAMPLES=y
10967 +# CONFIG_SAMPLE_AUXDISPLAY is not set
10968 +# CONFIG_SAMPLE_KOBJECT is not set
10969 +# CONFIG_SAMPLE_KPROBES is not set
10970 +# CONFIG_SAMPLE_HW_BREAKPOINT is not set
10971 +# CONFIG_SAMPLE_KFIFO is not set
10972 +# CONFIG_SAMPLE_KDB is not set
10973 +# CONFIG_SAMPLE_RPMSG_CLIENT is not set
10974 +# CONFIG_SAMPLE_CONFIGFS is not set
10975 +# CONFIG_SAMPLE_VFIO_MDEV_MTTY is not set
10976 +# CONFIG_SAMPLE_VFIO_MDEV_MDPY is not set
10977 +# CONFIG_SAMPLE_VFIO_MDEV_MDPY_FB is not set
10978 +# CONFIG_SAMPLE_VFIO_MDEV_MBOCHS is not set
10979 +# CONFIG_SAMPLE_WATCHDOG is not set
10980 +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y
10981 +CONFIG_STRICT_DEVMEM=y
10982 +# CONFIG_IO_STRICT_DEVMEM is not set
10985 +# x86 Debugging
10987 +CONFIG_TRACE_IRQFLAGS_SUPPORT=y
10988 +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y
10989 +CONFIG_EARLY_PRINTK_USB=y
10990 +# CONFIG_X86_VERBOSE_BOOTUP is not set
10991 +CONFIG_EARLY_PRINTK=y
10992 +CONFIG_EARLY_PRINTK_DBGP=y
10993 +CONFIG_EARLY_PRINTK_USB_XDBC=y
10994 +# CONFIG_EFI_PGT_DUMP is not set
10995 +# CONFIG_DEBUG_TLBFLUSH is not set
10996 +# CONFIG_IOMMU_DEBUG is not set
10997 +CONFIG_HAVE_MMIOTRACE_SUPPORT=y
10998 +# CONFIG_X86_DECODER_SELFTEST is not set
10999 +# CONFIG_IO_DELAY_0X80 is not set
11000 +CONFIG_IO_DELAY_0XED=y
11001 +# CONFIG_IO_DELAY_UDELAY is not set
11002 +# CONFIG_IO_DELAY_NONE is not set
11003 +# CONFIG_DEBUG_BOOT_PARAMS is not set
11004 +# CONFIG_CPA_DEBUG is not set
11005 +# CONFIG_DEBUG_ENTRY is not set
11006 +# CONFIG_DEBUG_NMI_SELFTEST is not set
11007 +CONFIG_X86_DEBUG_FPU=y
11008 +CONFIG_PUNIT_ATOM_DEBUG=m
11009 +CONFIG_UNWINDER_ORC=y
11010 +# CONFIG_UNWINDER_FRAME_POINTER is not set
11011 +# CONFIG_UNWINDER_GUESS is not set
11012 +# end of x86 Debugging
11015 +# Kernel Testing and Coverage
11017 +# CONFIG_KUNIT is not set
11018 +CONFIG_NOTIFIER_ERROR_INJECTION=m
11019 +CONFIG_PM_NOTIFIER_ERROR_INJECT=m
11020 +# CONFIG_NETDEV_NOTIFIER_ERROR_INJECT is not set
11021 +CONFIG_FUNCTION_ERROR_INJECTION=y
11022 +# CONFIG_FAULT_INJECTION is not set
11023 +CONFIG_ARCH_HAS_KCOV=y
11024 +CONFIG_CC_HAS_SANCOV_TRACE_PC=y
11025 +# CONFIG_KCOV is not set
11026 +CONFIG_RUNTIME_TESTING_MENU=y
11027 +# CONFIG_LKDTM is not set
11028 +# CONFIG_TEST_LIST_SORT is not set
11029 +# CONFIG_TEST_MIN_HEAP is not set
11030 +# CONFIG_TEST_SORT is not set
11031 +# CONFIG_KPROBES_SANITY_TEST is not set
11032 +# CONFIG_BACKTRACE_SELF_TEST is not set
11033 +# CONFIG_RBTREE_TEST is not set
11034 +# CONFIG_REED_SOLOMON_TEST is not set
11035 +# CONFIG_INTERVAL_TREE_TEST is not set
11036 +# CONFIG_PERCPU_TEST is not set
11037 +# CONFIG_ATOMIC64_SELFTEST is not set
11038 +# CONFIG_ASYNC_RAID6_TEST is not set
11039 +# CONFIG_TEST_HEXDUMP is not set
11040 +# CONFIG_TEST_STRING_HELPERS is not set
11041 +# CONFIG_TEST_STRSCPY is not set
11042 +# CONFIG_TEST_KSTRTOX is not set
11043 +# CONFIG_TEST_PRINTF is not set
11044 +# CONFIG_TEST_BITMAP is not set
11045 +# CONFIG_TEST_UUID is not set
11046 +# CONFIG_TEST_XARRAY is not set
11047 +# CONFIG_TEST_OVERFLOW is not set
11048 +# CONFIG_TEST_RHASHTABLE is not set
11049 +# CONFIG_TEST_HASH is not set
11050 +# CONFIG_TEST_IDA is not set
11051 +# CONFIG_TEST_PARMAN is not set
11052 +# CONFIG_TEST_LKM is not set
11053 +# CONFIG_TEST_BITOPS is not set
11054 +# CONFIG_TEST_VMALLOC is not set
11055 +# CONFIG_TEST_USER_COPY is not set
11056 +CONFIG_TEST_BPF=m
11057 +CONFIG_TEST_BLACKHOLE_DEV=m
11058 +# CONFIG_FIND_BIT_BENCHMARK is not set
11059 +# CONFIG_TEST_FIRMWARE is not set
11060 +# CONFIG_TEST_SYSCTL is not set
11061 +# CONFIG_TEST_UDELAY is not set
11062 +# CONFIG_TEST_STATIC_KEYS is not set
11063 +# CONFIG_TEST_KMOD is not set
11064 +# CONFIG_TEST_MEMCAT_P is not set
11065 +# CONFIG_TEST_OBJAGG is not set
11066 +# CONFIG_TEST_STACKINIT is not set
11067 +# CONFIG_TEST_MEMINIT is not set
11068 +# CONFIG_TEST_HMM is not set
11069 +# CONFIG_TEST_FREE_PAGES is not set
11070 +# CONFIG_TEST_FPU is not set
11071 +CONFIG_MEMTEST=y
11072 +# CONFIG_HYPERV_TESTING is not set
11073 +# end of Kernel Testing and Coverage
11074 +# end of Kernel hacking
11075 diff --git a/.gitignore b/.gitignore
11076 index 3af66272d6f1..127012c1f717 100644
11077 --- a/.gitignore
11078 +++ b/.gitignore
11079 @@ -57,6 +57,7 @@ modules.order
11080  /tags
11081  /TAGS
11082  /linux
11083 +/modules-only.symvers
11084  /vmlinux
11085  /vmlinux.32
11086  /vmlinux.symvers
11087 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
11088 index 04545725f187..e38e2c55b2fa 100644
11089 --- a/Documentation/admin-guide/kernel-parameters.txt
11090 +++ b/Documentation/admin-guide/kernel-parameters.txt
11091 @@ -358,6 +358,10 @@
11092         autoconf=       [IPV6]
11093                         See Documentation/networking/ipv6.rst.
11095 +       autogroup=      [KNL] Enable or disable scheduler automatic task group
11096 +                       creation.
11097 +                       Format: <bool>
11099         show_lapic=     [APIC,X86] Advanced Programmable Interrupt Controller
11100                         Limit apic dumping. The parameter defines the maximal
11101                         number of local apics being dumped. Also it is possible
11102 @@ -1869,13 +1873,6 @@
11103                         bypassed by not enabling DMAR with this option. In
11104                         this case, gfx device will use physical address for
11105                         DMA.
11106 -               forcedac [X86-64]
11107 -                       With this option iommu will not optimize to look
11108 -                       for io virtual address below 32-bit forcing dual
11109 -                       address cycle on pci bus for cards supporting greater
11110 -                       than 32-bit addressing. The default is to look
11111 -                       for translation below 32-bit and if not available
11112 -                       then look in the higher range.
11113                 strict [Default Off]
11114                         With this option on every unmap_single operation will
11115                         result in a hardware IOTLB flush operation as opposed
11116 @@ -1964,6 +1961,14 @@
11117                 nobypass        [PPC/POWERNV]
11118                         Disable IOMMU bypass, using IOMMU for PCI devices.
11120 +       iommu.forcedac= [ARM64, X86] Control IOVA allocation for PCI devices.
11121 +                       Format: { "0" | "1" }
11122 +                       0 - Try to allocate a 32-bit DMA address first, before
11123 +                         falling back to the full range if needed.
11124 +                       1 - Allocate directly from the full usable range,
11125 +                         forcing Dual Address Cycle for PCI cards supporting
11126 +                         greater than 32-bit addressing.
11128         iommu.strict=   [ARM64] Configure TLB invalidation behaviour
11129                         Format: { "0" | "1" }
11130                         0 - Lazy mode.
11131 @@ -3196,8 +3201,6 @@
11132         noapic          [SMP,APIC] Tells the kernel to not make use of any
11133                         IOAPICs that may be present in the system.
11135 -       noautogroup     Disable scheduler automatic task group creation.
11137         nobats          [PPC] Do not use BATs for mapping kernel lowmem
11138                         on "Classic" PPC cores.
11140 @@ -3660,6 +3663,15 @@
11141                 nomsi           [MSI] If the PCI_MSI kernel config parameter is
11142                                 enabled, this kernel boot option can be used to
11143                                 disable the use of MSI interrupts system-wide.
11144 +               pcie_acs_override =
11145 +                                       [PCIE] Override missing PCIe ACS support for:
11146 +                               downstream
11147 +                                       All downstream ports - full ACS capabilities
11148 +                               multifunction
11149 +                                       All multifunction devices - multifunction ACS subset
11150 +                               id:nnnn:nnnn
11151 +                                       Specific device - full ACS capabilities
11152 +                                       Specified as vid:did (vendor/device ID) in hex
11153                 noioapicquirk   [APIC] Disable all boot interrupt quirks.
11154                                 Safety option to keep boot IRQs enabled. This
11155                                 should never be necessary.
11156 diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
11157 index 586cd4b86428..cf4a90d7a058 100644
11158 --- a/Documentation/admin-guide/sysctl/vm.rst
11159 +++ b/Documentation/admin-guide/sysctl/vm.rst
11160 @@ -26,6 +26,8 @@ Currently, these files are in /proc/sys/vm:
11162  - admin_reserve_kbytes
11163  - block_dump
11164 +- clean_low_kbytes
11165 +- clean_min_kbytes
11166  - compact_memory
11167  - compaction_proactiveness
11168  - compact_unevictable_allowed
11169 @@ -113,6 +115,41 @@ block_dump enables block I/O debugging when set to a nonzero value. More
11170  information on block I/O debugging is in Documentation/admin-guide/laptops/laptop-mode.rst.
11173 +clean_low_kbytes
11174 +=====================
11176 +This knob provides *best-effort* protection of clean file pages. The clean file
11177 +pages on the current node won't be reclaimed under memory pressure when their
11178 +amount is below vm.clean_low_kbytes *unless* we threaten to OOM or have no
11179 +free swap space or vm.swappiness=0.
11181 +Protection of clean file pages may be used to prevent thrashing and
11182 +reducing I/O under low-memory conditions.
11184 +Setting it to a high value may result in a early eviction of anonymous pages
11185 +into the swap space by attempting to hold the protected amount of clean file
11186 +pages in memory.
11188 +The default value is defined by CONFIG_CLEAN_LOW_KBYTES.
11191 +clean_min_kbytes
11192 +=====================
11194 +This knob provides *hard* protection of clean file pages. The clean file pages
11195 +on the current node won't be reclaimed under memory pressure when their amount
11196 +is below vm.clean_min_kbytes.
11198 +Hard protection of clean file pages may be used to avoid high latency and
11199 +prevent livelock in near-OOM conditions.
11201 +Setting it to a high value may result in a early out-of-memory condition due to
11202 +the inability to reclaim the protected amount of clean file pages when other
11203 +types of pages cannot be reclaimed.
11205 +The default value is defined by CONFIG_CLEAN_MIN_KBYTES.
11208  compact_memory
11209  ==============
11211 diff --git a/Documentation/devicetree/bindings/media/renesas,vin.yaml b/Documentation/devicetree/bindings/media/renesas,vin.yaml
11212 index fe7c4cbfe4ba..dd1a5ce5896c 100644
11213 --- a/Documentation/devicetree/bindings/media/renesas,vin.yaml
11214 +++ b/Documentation/devicetree/bindings/media/renesas,vin.yaml
11215 @@ -193,23 +193,35 @@ required:
11216    - interrupts
11217    - clocks
11218    - power-domains
11219 -  - resets
11221 -if:
11222 -  properties:
11223 -    compatible:
11224 -      contains:
11225 -        enum:
11226 -          - renesas,vin-r8a7778
11227 -          - renesas,vin-r8a7779
11228 -          - renesas,rcar-gen2-vin
11229 -then:
11230 -  required:
11231 -    - port
11232 -else:
11233 -  required:
11234 -    - renesas,id
11235 -    - ports
11237 +allOf:
11238 +  - if:
11239 +      not:
11240 +        properties:
11241 +          compatible:
11242 +            contains:
11243 +              enum:
11244 +                - renesas,vin-r8a7778
11245 +                - renesas,vin-r8a7779
11246 +    then:
11247 +      required:
11248 +        - resets
11250 +  - if:
11251 +      properties:
11252 +        compatible:
11253 +          contains:
11254 +            enum:
11255 +              - renesas,vin-r8a7778
11256 +              - renesas,vin-r8a7779
11257 +              - renesas,rcar-gen2-vin
11258 +    then:
11259 +      required:
11260 +        - port
11261 +    else:
11262 +      required:
11263 +        - renesas,id
11264 +        - ports
11266  additionalProperties: false
11268 diff --git a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
11269 index 4a2bcc0158e2..8fdfbc763d70 100644
11270 --- a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
11271 +++ b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
11272 @@ -17,6 +17,7 @@ allOf:
11273  properties:
11274    compatible:
11275      oneOf:
11276 +      - const: renesas,pcie-r8a7779       # R-Car H1
11277        - items:
11278            - enum:
11279                - renesas,pcie-r8a7742      # RZ/G1H
11280 @@ -74,7 +75,16 @@ required:
11281    - clocks
11282    - clock-names
11283    - power-domains
11284 -  - resets
11286 +if:
11287 +  not:
11288 +    properties:
11289 +      compatible:
11290 +        contains:
11291 +          const: renesas,pcie-r8a7779
11292 +then:
11293 +  required:
11294 +    - resets
11296  unevaluatedProperties: false
11298 diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
11299 index 626447fee092..7808ec8bc712 100644
11300 --- a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
11301 +++ b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
11302 @@ -25,11 +25,13 @@ properties:
11303        - qcom,msm8998-qmp-pcie-phy
11304        - qcom,msm8998-qmp-ufs-phy
11305        - qcom,msm8998-qmp-usb3-phy
11306 +      - qcom,sc7180-qmp-usb3-phy
11307        - qcom,sc8180x-qmp-ufs-phy
11308        - qcom,sc8180x-qmp-usb3-phy
11309        - qcom,sdm845-qhp-pcie-phy
11310        - qcom,sdm845-qmp-pcie-phy
11311        - qcom,sdm845-qmp-ufs-phy
11312 +      - qcom,sdm845-qmp-usb3-phy
11313        - qcom,sdm845-qmp-usb3-uni-phy
11314        - qcom,sm8150-qmp-ufs-phy
11315        - qcom,sm8150-qmp-usb3-phy
11316 diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
11317 index 33974ad10afe..62c0179d1765 100644
11318 --- a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
11319 +++ b/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
11320 @@ -14,9 +14,7 @@ properties:
11321    compatible:
11322      enum:
11323        - qcom,sc7180-qmp-usb3-dp-phy
11324 -      - qcom,sc7180-qmp-usb3-phy
11325        - qcom,sdm845-qmp-usb3-dp-phy
11326 -      - qcom,sdm845-qmp-usb3-phy
11327    reg:
11328      items:
11329        - description: Address and length of PHY's USB serdes block.
11330 diff --git a/Documentation/devicetree/bindings/serial/8250.yaml b/Documentation/devicetree/bindings/serial/8250.yaml
11331 index f54cae9ff7b2..d3f87f2bfdc2 100644
11332 --- a/Documentation/devicetree/bindings/serial/8250.yaml
11333 +++ b/Documentation/devicetree/bindings/serial/8250.yaml
11334 @@ -93,11 +93,6 @@ properties:
11335                - mediatek,mt7622-btif
11336                - mediatek,mt7623-btif
11337            - const: mediatek,mtk-btif
11338 -      - items:
11339 -          - enum:
11340 -              - mediatek,mt7622-btif
11341 -              - mediatek,mt7623-btif
11342 -          - const: mediatek,mtk-btif
11343        - items:
11344            - const: mrvl,mmp-uart
11345            - const: intel,xscale-uart
11346 diff --git a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
11347 index 8631678283f9..865be05083c3 100644
11348 --- a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
11349 +++ b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
11350 @@ -80,7 +80,8 @@ required:
11351    - interrupts
11352    - clocks
11354 -additionalProperties: false
11355 +additionalProperties:
11356 +  type: object
11358  examples:
11359    - |
11360 diff --git a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
11361 index b33a76eeac4e..f963204e0b16 100644
11362 --- a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
11363 +++ b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
11364 @@ -28,14 +28,7 @@ properties:
11365        - renesas,r8a77980-thermal # R-Car V3H
11366        - renesas,r8a779a0-thermal # R-Car V3U
11368 -  reg:
11369 -    minItems: 2
11370 -    maxItems: 4
11371 -    items:
11372 -      - description: TSC1 registers
11373 -      - description: TSC2 registers
11374 -      - description: TSC3 registers
11375 -      - description: TSC4 registers
11376 +  reg: true
11378    interrupts:
11379      items:
11380 @@ -71,8 +64,25 @@ if:
11381            enum:
11382              - renesas,r8a779a0-thermal
11383  then:
11384 +  properties:
11385 +    reg:
11386 +      minItems: 2
11387 +      maxItems: 3
11388 +      items:
11389 +        - description: TSC1 registers
11390 +        - description: TSC2 registers
11391 +        - description: TSC3 registers
11392    required:
11393      - interrupts
11394 +else:
11395 +  properties:
11396 +    reg:
11397 +      items:
11398 +        - description: TSC0 registers
11399 +        - description: TSC1 registers
11400 +        - description: TSC2 registers
11401 +        - description: TSC3 registers
11402 +        - description: TSC4 registers
11404  additionalProperties: false
11406 @@ -111,3 +121,20 @@ examples:
11407                      };
11408              };
11409      };
11410 +  - |
11411 +    #include <dt-bindings/clock/r8a779a0-cpg-mssr.h>
11412 +    #include <dt-bindings/interrupt-controller/arm-gic.h>
11413 +    #include <dt-bindings/power/r8a779a0-sysc.h>
11415 +    tsc_r8a779a0: thermal@e6190000 {
11416 +            compatible = "renesas,r8a779a0-thermal";
11417 +            reg = <0xe6190000 0x200>,
11418 +                  <0xe6198000 0x200>,
11419 +                  <0xe61a0000 0x200>,
11420 +                  <0xe61a8000 0x200>,
11421 +                  <0xe61b0000 0x200>;
11422 +            clocks = <&cpg CPG_MOD 919>;
11423 +            power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
11424 +            resets = <&cpg 919>;
11425 +            #thermal-sensor-cells = <1>;
11426 +    };
11427 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
11428 index e361fc95ca29..82e3eee7363b 100644
11429 --- a/Documentation/dontdiff
11430 +++ b/Documentation/dontdiff
11431 @@ -178,6 +178,7 @@ mktables
11432  mktree
11433  mkutf8data
11434  modpost
11435 +modules-only.symvers
11436  modules.builtin
11437  modules.builtin.modinfo
11438  modules.nsdeps
11439 diff --git a/Documentation/driver-api/xilinx/eemi.rst b/Documentation/driver-api/xilinx/eemi.rst
11440 index 9dcbc6f18d75..c1bc47b9000d 100644
11441 --- a/Documentation/driver-api/xilinx/eemi.rst
11442 +++ b/Documentation/driver-api/xilinx/eemi.rst
11443 @@ -16,35 +16,8 @@ components running across different processing clusters on a chip or
11444  device to communicate with a power management controller (PMC) on a
11445  device to issue or respond to power management requests.
11447 -EEMI ops is a structure containing all eemi APIs supported by Zynq MPSoC.
11448 -The zynqmp-firmware driver maintain all EEMI APIs in zynqmp_eemi_ops
11449 -structure. Any driver who want to communicate with PMC using EEMI APIs
11450 -can call zynqmp_pm_get_eemi_ops().
11452 -Example of EEMI ops::
11454 -       /* zynqmp-firmware driver maintain all EEMI APIs */
11455 -       struct zynqmp_eemi_ops {
11456 -               int (*get_api_version)(u32 *version);
11457 -               int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out);
11458 -       };
11460 -       static const struct zynqmp_eemi_ops eemi_ops = {
11461 -               .get_api_version = zynqmp_pm_get_api_version,
11462 -               .query_data = zynqmp_pm_query_data,
11463 -       };
11465 -Example of EEMI ops usage::
11467 -       static const struct zynqmp_eemi_ops *eemi_ops;
11468 -       u32 ret_payload[PAYLOAD_ARG_CNT];
11469 -       int ret;
11471 -       eemi_ops = zynqmp_pm_get_eemi_ops();
11472 -       if (IS_ERR(eemi_ops))
11473 -               return PTR_ERR(eemi_ops);
11475 -       ret = eemi_ops->query_data(qdata, ret_payload);
11476 +Any driver who wants to communicate with PMC using EEMI APIs use the
11477 +functions provided for each function.
11479  IOCTL
11480  ------
11481 diff --git a/Documentation/filesystems/ntfs3.rst b/Documentation/filesystems/ntfs3.rst
11482 new file mode 100644
11483 index 000000000000..ffe9ea0c1499
11484 --- /dev/null
11485 +++ b/Documentation/filesystems/ntfs3.rst
11486 @@ -0,0 +1,106 @@
11487 +.. SPDX-License-Identifier: GPL-2.0
11489 +=====
11490 +NTFS3
11491 +=====
11494 +Summary and Features
11495 +====================
11497 +NTFS3 is fully functional NTFS Read-Write driver. The driver works with
11498 +NTFS versions up to 3.1, normal/compressed/sparse files
11499 +and journal replaying. File system type to use on mount is 'ntfs3'.
11501 +- This driver implements NTFS read/write support for normal, sparse and
11502 +  compressed files.
11503 +- Supports native journal replaying;
11504 +- Supports extended attributes
11505 +       Predefined extended attributes:
11506 +       - 'system.ntfs_security' gets/sets security
11507 +                       descriptor (SECURITY_DESCRIPTOR_RELATIVE)
11508 +       - 'system.ntfs_attrib' gets/sets ntfs file/dir attributes.
11509 +               Note: applied to empty files, this allows to switch type between
11510 +               sparse(0x200), compressed(0x800) and normal;
11511 +- Supports NFS export of mounted NTFS volumes.
11513 +Mount Options
11514 +=============
11516 +The list below describes mount options supported by NTFS3 driver in addition to
11517 +generic ones.
11519 +===============================================================================
11521 +nls=name               This option informs the driver how to interpret path
11522 +                       strings and translate them to Unicode and back. If
11523 +                       this option is not set, the default codepage will be
11524 +                       used (CONFIG_NLS_DEFAULT).
11525 +                       Examples:
11526 +                               'nls=utf8'
11528 +uid=
11529 +gid=
11530 +umask=                 Controls the default permissions for files/directories created
11531 +                       after the NTFS volume is mounted.
11533 +fmask=
11534 +dmask=                 Instead of specifying umask which applies both to
11535 +                       files and directories, fmask applies only to files and
11536 +                       dmask only to directories.
11538 +nohidden               Files with the Windows-specific HIDDEN (FILE_ATTRIBUTE_HIDDEN)
11539 +                       attribute will not be shown under Linux.
11541 +sys_immutable          Files with the Windows-specific SYSTEM
11542 +                       (FILE_ATTRIBUTE_SYSTEM) attribute will be marked as system
11543 +                       immutable files.
11545 +discard                        Enable support of the TRIM command for improved performance
11546 +                       on delete operations, which is recommended for use with the
11547 +                       solid-state drives (SSD).
11549 +force                  Forces the driver to mount partitions even if 'dirty' flag
11550 +                       (volume dirty) is set. Not recommended for use.
11552 +sparse                 Create new files as "sparse".
11554 +showmeta               Use this parameter to show all meta-files (System Files) on
11555 +                       a mounted NTFS partition.
11556 +                       By default, all meta-files are hidden.
11558 +prealloc               Preallocate space for files excessively when file size is
11559 +                       increasing on writes. Decreases fragmentation in case of
11560 +                       parallel write operations to different files.
11562 +no_acs_rules           "No access rules" mount option sets access rights for
11563 +                       files/folders to 777 and owner/group to root. This mount
11564 +                       option absorbs all other permissions:
11565 +                       - permissions change for files/folders will be reported
11566 +                               as successful, but they will remain 777;
11567 +                       - owner/group change will be reported as successful, but
11568 +                               they will stay as root
11570 +acl                    Support POSIX ACLs (Access Control Lists). Effective if
11571 +                       supported by Kernel. Not to be confused with NTFS ACLs.
11572 +                       The option specified as acl enables support for POSIX ACLs.
11574 +noatime                        All files and directories will not update their last access
11575 +                       time attribute if a partition is mounted with this parameter.
11576 +                       This option can speed up file system operation.
11578 +===============================================================================
11580 +ToDo list
11581 +=========
11583 +- Full journaling support (currently journal replaying is supported) over JBD.
11586 +References
11587 +==========
11588 +https://www.paragon-software.com/home/ntfs-linux-professional/
11589 +       - Commercial version of the NTFS driver for Linux.
11591 +almaz.alexandrovich@paragon-software.com
11592 +       - Direct e-mail address for feedback and requests on the NTFS3 implementation.
11593 diff --git a/Documentation/locking/futex2.rst b/Documentation/locking/futex2.rst
11594 new file mode 100644
11595 index 000000000000..3ab49f0e741c
11596 --- /dev/null
11597 +++ b/Documentation/locking/futex2.rst
11598 @@ -0,0 +1,198 @@
11599 +.. SPDX-License-Identifier: GPL-2.0
11601 +======
11602 +futex2
11603 +======
11605 +:Author: André Almeida <andrealmeid@collabora.com>
11607 +futex, or fast user mutex, is a set of syscalls to allow userspace to create
11608 +performant synchronization mechanisms, such as mutexes, semaphores and
11609 +conditional variables in userspace. C standard libraries, like glibc, uses it
11610 +as a means to implement more high level interfaces like pthreads.
11612 +The interface
11613 +=============
11615 +uAPI functions
11616 +--------------
11618 +.. kernel-doc:: kernel/futex2.c
11619 +   :identifiers: sys_futex_wait sys_futex_wake sys_futex_waitv sys_futex_requeue
11621 +uAPI structures
11622 +---------------
11624 +.. kernel-doc:: include/uapi/linux/futex.h
11626 +The ``flag`` argument
11627 +---------------------
11629 +The flag is used to specify the size of the futex word
11630 +(FUTEX_[8, 16, 32]). It's mandatory to define one, since there's no
11631 +default size.
11633 +By default, the timeout uses a monotonic clock, but can be used as a realtime
11634 +one by using the FUTEX_REALTIME_CLOCK flag.
11636 +By default, futexes are of the private type, that means that this user address
11637 +will be accessed by threads that share the same memory region. This allows for
11638 +some internal optimizations, so they are faster. However, if the address needs
11639 +to be shared with different processes (like using ``mmap()`` or ``shm()``), they
11640 +need to be defined as shared and the flag FUTEX_SHARED_FLAG is used to set that.
11642 +By default, the operation has no NUMA-awareness, meaning that the user can't
11643 +choose the memory node where the kernel side futex data will be stored. The
11644 +user can choose the node where it wants to operate by setting the
11645 +FUTEX_NUMA_FLAG and using the following structure (where X can be 8, 16, or
11646 +32)::
11648 + struct futexX_numa {
11649 +         __uX value;
11650 +         __sX hint;
11651 + };
11653 +This structure should be passed at the ``void *uaddr`` of futex functions. The
11654 +address of the structure will be used to be waited on/waken on, and the
11655 +``value`` will be compared to ``val`` as usual. The ``hint`` member is used to
11656 +define which node the futex will use. When waiting, the futex will be
11657 +registered on a kernel-side table stored on that node; when waking, the futex
11658 +will be searched for on that given table. That means that there's no redundancy
11659 +between tables, and the wrong ``hint`` value will lead to undesired behavior.
11660 +Userspace is responsible for dealing with node migrations issues that may
11661 +occur. ``hint`` can range from [0, MAX_NUMA_NODES), for specifying a node, or
11662 +-1, to use the same node the current process is using.
11664 +When not using FUTEX_NUMA_FLAG on a NUMA system, the futex will be stored on a
11665 +global table on allocated on the first node.
11667 +The ``timo`` argument
11668 +---------------------
11670 +As per the Y2038 work done in the kernel, new interfaces shouldn't add timeout
11671 +options known to be buggy. Given that, ``timo`` should be a 64-bit timeout at
11672 +all platforms, using an absolute timeout value.
11674 +Implementation
11675 +==============
11677 +The internal implementation follows a similar design to the original futex.
11678 +Given that we want to replicate the same external behavior of current futex,
11679 +this should be somewhat expected.
11681 +Waiting
11682 +-------
11684 +For the wait operations, they are all treated as if you want to wait on N
11685 +futexes, so the path for futex_wait and futex_waitv is the basically the same.
11686 +For both syscalls, the first step is to prepare an internal list for the list
11687 +of futexes to wait for (using struct futexv_head). For futex_wait() calls, this
11688 +list will have a single object.
11690 +We have a hash table, where waiters register themselves before sleeping. Then
11691 +the wake function checks this table looking for waiters at uaddr.  The hash
11692 +bucket to be used is determined by a struct futex_key, that stores information
11693 +to uniquely identify an address from a given process. Given the huge address
11694 +space, there'll be hash collisions, so we store information to be later used on
11695 +collision treatment.
11697 +First, for every futex we want to wait on, we check if (``*uaddr == val``).
11698 +This check is done holding the bucket lock, so we are correctly serialized with
11699 +any futex_wake() calls. If any waiter fails the check above, we dequeue all
11700 +futexes. The check (``*uaddr == val``) can fail for two reasons:
11702 +- The values are different, and we return -EAGAIN. However, if while
11703 +  dequeueing we found that some futexes were awakened, we prioritize this
11704 +  and return success.
11706 +- When trying to access the user address, we do so with page faults
11707 +  disabled because we are holding a bucket's spin lock (and can't sleep
11708 +  while holding a spin lock). If there's an error, it might be a page
11709 +  fault, or an invalid address. We release the lock, dequeue everyone
11710 +  (because it's illegal to sleep while there are futexes enqueued, we
11711 +  could lose wakeups) and try again with page fault enabled. If we
11712 +  succeed, this means that the address is valid, but we need to do
11713 +  all the work again. For serialization reasons, we need to have the
11714 +  spin lock when getting the user value. Additionally, for shared
11715 +  futexes, we also need to recalculate the hash, since the underlying
11716 +  mapping mechanisms could have changed when dealing with page fault.
11717 +  If, even with page fault enabled, we can't access the address, it
11718 +  means it's an invalid user address, and we return -EFAULT. For this
11719 +  case, we prioritize the error, even if some futexes were awaken.
11721 +If the check is OK, they are enqueued on a linked list in our bucket, and
11722 +proceed to the next one. If all waiters succeed, we put the thread to sleep
11723 +until a futex_wake() call, timeout expires or we get a signal. After waking up,
11724 +we dequeue everyone, and check if some futex was awakened. This dequeue is done
11725 +by iteratively walking at each element of struct futex_head list.
11727 +All enqueuing/dequeuing operations requires to hold the bucket lock, to avoid
11728 +racing while modifying the list.
11730 +Waking
11731 +------
11733 +We get the bucket that's storing the waiters at uaddr, and wake the required
11734 +number of waiters, checking for hash collision.
11736 +There's an optimization that makes futex_wake() not take the bucket lock if
11737 +there's no one to be woken on that bucket. It checks an atomic counter that each
11738 +bucket has, if it says 0, then the syscall exits. In order for this to work, the
11739 +waiter thread increases it before taking the lock, so the wake thread will
11740 +correctly see that there's someone waiting and will continue the path to take
11741 +the bucket lock. To get the correct serialization, the waiter issues a memory
11742 +barrier after increasing the bucket counter and the waker issues a memory
11743 +barrier before checking it.
11745 +Requeuing
11746 +---------
11748 +The requeue path first checks for each struct futex_requeue and their flags.
11749 +Then, it will compare the expected value with the one at uaddr1::uaddr.
11750 +Following the same serialization explained at Waking_, we increase the atomic
11751 +counter for the bucket of uaddr2 before taking the lock. We need to have both
11752 +buckets locks at same time so we don't race with other futex operation. To
11753 +ensure the locks are taken in the same order for all threads (and thus avoiding
11754 +deadlocks), every requeue operation takes the "smaller" bucket first, when
11755 +comparing both addresses.
11757 +If the compare with user value succeeds, we proceed by waking ``nr_wake``
11758 +futexes, and then requeuing ``nr_requeue`` from bucket of uaddr1 to the uaddr2.
11759 +This consists in a simple list deletion/addition and replacing the old futex key
11760 +with the new one.
11762 +Futex keys
11763 +----------
11765 +There are two types of futexes: private and shared ones. The private are futexes
11766 +meant to be used by threads that share the same memory space, are easier to be
11767 +uniquely identified and thus can have some performance optimization. The
11768 +elements for identifying one are: the start address of the page where the
11769 +address is, the address offset within the page and the current->mm pointer.
11771 +Now, for uniquely identifying a shared futex:
11773 +- If the page containing the user address is an anonymous page, we can
11774 +  just use the same data used for private futexes (the start address of
11775 +  the page, the address offset within the page and the current->mm
11776 +  pointer); that will be enough for uniquely identifying such futex. We
11777 +  also set one bit at the key to differentiate if a private futex is
11778 +  used on the same address (mixing shared and private calls does not
11779 +  work).
11781 +- If the page is file-backed, current->mm maybe isn't the same one for
11782 +  every user of this futex, so we need to use other data: the
11783 +  page->index, a UUID for the struct inode and the offset within the
11784 +  page.
11786 +Note that members of futex_key don't have any particular meaning after they
11787 +are part of the struct - they are just bytes to identify a futex.  Given that,
11788 +we don't need to use a particular name or type that matches the original data,
11789 +we only need to care about the bitsize of each component and make both private
11790 +and shared fit in the same memory space.
11792 +Source code documentation
11793 +=========================
11795 +.. kernel-doc:: kernel/futex2.c
11796 +   :no-identifiers: sys_futex_wait sys_futex_wake sys_futex_waitv sys_futex_requeue
11797 diff --git a/Documentation/locking/index.rst b/Documentation/locking/index.rst
11798 index 7003bd5aeff4..9bf03c7fa1ec 100644
11799 --- a/Documentation/locking/index.rst
11800 +++ b/Documentation/locking/index.rst
11801 @@ -24,6 +24,7 @@ locking
11802      percpu-rw-semaphore
11803      robust-futexes
11804      robust-futex-ABI
11805 +    futex2
11807  .. only::  subproject and html
11809 diff --git a/Documentation/userspace-api/media/v4l/subdev-formats.rst b/Documentation/userspace-api/media/v4l/subdev-formats.rst
11810 index 7f16cbe46e5c..e6a9faa81197 100644
11811 --- a/Documentation/userspace-api/media/v4l/subdev-formats.rst
11812 +++ b/Documentation/userspace-api/media/v4l/subdev-formats.rst
11813 @@ -1567,8 +1567,8 @@ The following tables list existing packed RGB formats.
11814        - MEDIA_BUS_FMT_RGB101010_1X30
11815        - 0x1018
11816        -
11817 -      - 0
11818 -      - 0
11819 +      -
11820 +      -
11821        - r\ :sub:`9`
11822        - r\ :sub:`8`
11823        - r\ :sub:`7`
11824 diff --git a/Documentation/vm/index.rst b/Documentation/vm/index.rst
11825 index eff5fbd492d0..c353b3f55924 100644
11826 --- a/Documentation/vm/index.rst
11827 +++ b/Documentation/vm/index.rst
11828 @@ -17,6 +17,7 @@ various features of the Linux memory management
11830     swap_numa
11831     zswap
11832 +   multigen_lru
11834  Kernel developers MM documentation
11835  ==================================
11836 diff --git a/Documentation/vm/multigen_lru.rst b/Documentation/vm/multigen_lru.rst
11837 new file mode 100644
11838 index 000000000000..cf772aeca317
11839 --- /dev/null
11840 +++ b/Documentation/vm/multigen_lru.rst
11841 @@ -0,0 +1,192 @@
11842 +=====================
11843 +Multigenerational LRU
11844 +=====================
11846 +Quick Start
11847 +===========
11848 +Build Options
11849 +-------------
11850 +:Required: Set ``CONFIG_LRU_GEN=y``.
11852 +:Optional: Change ``CONFIG_NR_LRU_GENS`` to a number ``X`` to support
11853 + a maximum of ``X`` generations.
11855 +:Optional: Change ``CONFIG_TIERS_PER_GEN`` to a number ``Y`` to support
11856 + a maximum of ``Y`` tiers per generation.
11858 +:Optional: Set ``CONFIG_LRU_GEN_ENABLED=y`` to turn the feature on by
11859 + default.
11861 +Runtime Options
11862 +---------------
11863 +:Required: Write ``1`` to ``/sys/kernel/mm/lru_gen/enable`` if the
11864 + feature was not turned on by default.
11866 +:Optional: Change ``/sys/kernel/mm/lru_gen/spread`` to a number ``N``
11867 + to spread pages out across ``N+1`` generations. ``N`` should be less
11868 + than ``X``. Larger values make the background aging more aggressive.
11870 +:Optional: Read ``/sys/kernel/debug/lru_gen`` to verify the feature.
11871 + This file has the following output:
11875 +  memcg  memcg_id  memcg_path
11876 +    node  node_id
11877 +      min_gen  birth_time  anon_size  file_size
11878 +      ...
11879 +      max_gen  birth_time  anon_size  file_size
11881 +Given a memcg and a node, ``min_gen`` is the oldest generation
11882 +(number) and ``max_gen`` is the youngest. Birth time is in
11883 +milliseconds. The sizes of anon and file types are in pages.
11885 +Recipes
11886 +-------
11887 +:Android on ARMv8.1+: ``X=4``, ``N=0``
11889 +:Android on pre-ARMv8.1 CPUs: Not recommended due to the lack of
11890 + ``ARM64_HW_AFDBM``
11892 +:Laptops running Chrome on x86_64: ``X=7``, ``N=2``
11894 +:Working set estimation: Write ``+ memcg_id node_id gen [swappiness]``
11895 + to ``/sys/kernel/debug/lru_gen`` to account referenced pages to
11896 + generation ``max_gen`` and create the next generation ``max_gen+1``.
11897 + ``gen`` should be equal to ``max_gen``. A swap file and a non-zero
11898 + ``swappiness`` are required to scan anon type. If swapping is not
11899 + desired, set ``vm.swappiness`` to ``0``.
11901 +:Proactive reclaim: Write ``- memcg_id node_id gen [swappiness]
11902 + [nr_to_reclaim]`` to ``/sys/kernel/debug/lru_gen`` to evict
11903 + generations less than or equal to ``gen``. ``gen`` should be less
11904 + than ``max_gen-1`` as ``max_gen`` and ``max_gen-1`` are active
11905 + generations and therefore protected from the eviction. Use
11906 + ``nr_to_reclaim`` to limit the number of pages to be evicted.
11907 + Multiple command lines are supported, so does concatenation with
11908 + delimiters ``,`` and ``;``.
11910 +Framework
11911 +=========
11912 +For each ``lruvec``, evictable pages are divided into multiple
11913 +generations. The youngest generation number is stored in ``max_seq``
11914 +for both anon and file types as they are aged on an equal footing. The
11915 +oldest generation numbers are stored in ``min_seq[2]`` separately for
11916 +anon and file types as clean file pages can be evicted regardless of
11917 +swap and write-back constraints. Generation numbers are truncated into
11918 +``order_base_2(CONFIG_NR_LRU_GENS+1)`` bits in order to fit into
11919 +``page->flags``. The sliding window technique is used to prevent
11920 +truncated generation numbers from overlapping. Each truncated
11921 +generation number is an index to an array of per-type and per-zone
11922 +lists. Evictable pages are added to the per-zone lists indexed by
11923 +``max_seq`` or ``min_seq[2]`` (modulo ``CONFIG_NR_LRU_GENS``),
11924 +depending on whether they are being faulted in.
11926 +Each generation is then divided into multiple tiers. Tiers represent
11927 +levels of usage from file descriptors only. Pages accessed N times via
11928 +file descriptors belong to tier order_base_2(N). In contrast to moving
11929 +across generations which requires the lru lock, moving across tiers
11930 +only involves an atomic operation on ``page->flags`` and therefore has
11931 +a negligible cost.
11933 +The workflow comprises two conceptually independent functions: the
11934 +aging and the eviction.
11936 +Aging
11937 +-----
11938 +The aging produces young generations. Given an ``lruvec``, the aging
11939 +scans page tables for referenced pages of this ``lruvec``. Upon
11940 +finding one, the aging updates its generation number to ``max_seq``.
11941 +After each round of scan, the aging increments ``max_seq``.
11943 +The aging maintains either a system-wide ``mm_struct`` list or
11944 +per-memcg ``mm_struct`` lists, and it only scans page tables of
11945 +processes that have been scheduled since the last scan. Since scans
11946 +are differential with respect to referenced pages, the cost is roughly
11947 +proportional to their number.
11949 +The aging is due when both of ``min_seq[2]`` reaches ``max_seq-1``,
11950 +assuming both anon and file types are reclaimable.
11952 +Eviction
11953 +--------
11954 +The eviction consumes old generations. Given an ``lruvec``, the
11955 +eviction scans the pages on the per-zone lists indexed by either of
11956 +``min_seq[2]``. It first tries to select a type based on the values of
11957 +``min_seq[2]``. When anon and file types are both available from the
11958 +same generation, it selects the one that has a lower refault rate.
11960 +During a scan, the eviction sorts pages according to their generation
11961 +numbers, if the aging has found them referenced.  It also moves pages
11962 +from the tiers that have higher refault rates than tier 0 to the next
11963 +generation.
11965 +When it finds all the per-zone lists of a selected type are empty, the
11966 +eviction increments ``min_seq[2]`` indexed by this selected type.
11968 +Rationale
11969 +=========
11970 +Limitations of Current Implementation
11971 +-------------------------------------
11972 +Notion of Active/Inactive
11973 +~~~~~~~~~~~~~~~~~~~~~~~~~
11974 +For servers equipped with hundreds of gigabytes of memory, the
11975 +granularity of the active/inactive is too coarse to be useful for job
11976 +scheduling. False active/inactive rates are relatively high, and thus
11977 +the assumed savings may not materialize.
11979 +For phones and laptops, executable pages are frequently evicted
11980 +despite the fact that there are many less recently used anon pages.
11981 +Major faults on executable pages cause ``janks`` (slow UI renderings)
11982 +and negatively impact user experience.
11984 +For ``lruvec``\s from different memcgs or nodes, comparisons are
11985 +impossible due to the lack of a common frame of reference.
11987 +Incremental Scans via ``rmap``
11988 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11989 +Each incremental scan picks up at where the last scan left off and
11990 +stops after it has found a handful of unreferenced pages. For
11991 +workloads using a large amount of anon memory, incremental scans lose
11992 +the advantage under sustained memory pressure due to high ratios of
11993 +the number of scanned pages to the number of reclaimed pages. On top
11994 +of that, the ``rmap`` has poor memory locality due to its complex data
11995 +structures. The combined effects typically result in a high amount of
11996 +CPU usage in the reclaim path.
11998 +Benefits of Multigenerational LRU
11999 +---------------------------------
12000 +Notion of Generation Numbers
12001 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12002 +The notion of generation numbers introduces a quantitative approach to
12003 +memory overcommit. A larger number of pages can be spread out across
12004 +configurable generations, and thus they have relatively low false
12005 +active/inactive rates. Each generation includes all pages that have
12006 +been referenced since the last generation.
12008 +Given an ``lruvec``, scans and the selections between anon and file
12009 +types are all based on generation numbers, which are simple and yet
12010 +effective. For different ``lruvec``\s, comparisons are still possible
12011 +based on birth times of generations.
12013 +Differential Scans via Page Tables
12014 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12015 +Each differential scan discovers all pages that have been referenced
12016 +since the last scan. Specifically, it walks the ``mm_struct`` list
12017 +associated with an ``lruvec`` to scan page tables of processes that
12018 +have been scheduled since the last scan. The cost of each differential
12019 +scan is roughly proportional to the number of referenced pages it
12020 +discovers. Unless address spaces are extremely sparse, page tables
12021 +usually have better memory locality than the ``rmap``. The end result
12022 +is generally a significant reduction in CPU usage, for workloads
12023 +using a large amount of anon memory.
12025 +To-do List
12026 +==========
12027 +KVM Optimization
12028 +----------------
12029 +Support shadow page table scanning.
12031 +NUMA Optimization
12032 +-----------------
12033 +Support NUMA policies and per-node RSS counters.
12034 diff --git a/MAINTAINERS b/MAINTAINERS
12035 index 9450e052f1b1..b7a2162d159a 100644
12036 --- a/MAINTAINERS
12037 +++ b/MAINTAINERS
12038 @@ -7377,7 +7377,7 @@ F:        Documentation/locking/*futex*
12039  F:     include/asm-generic/futex.h
12040  F:     include/linux/futex.h
12041  F:     include/uapi/linux/futex.h
12042 -F:     kernel/futex.c
12043 +F:     kernel/futex*
12044  F:     tools/perf/bench/futex*
12045  F:     tools/testing/selftests/futex/
12047 @@ -12775,6 +12775,13 @@ T:     git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs.git
12048  F:     Documentation/filesystems/ntfs.rst
12049  F:     fs/ntfs/
12051 +NTFS3 FILESYSTEM
12052 +M:     Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
12053 +S:     Supported
12054 +W:     http://www.paragon-software.com/
12055 +F:     Documentation/filesystems/ntfs3.rst
12056 +F:     fs/ntfs3/
12058  NUBUS SUBSYSTEM
12059  M:     Finn Thain <fthain@telegraphics.com.au>
12060  L:     linux-m68k@lists.linux-m68k.org
12061 @@ -19912,6 +19919,18 @@ F:     Documentation/vm/zsmalloc.rst
12062  F:     include/linux/zsmalloc.h
12063  F:     mm/zsmalloc.c
12065 +ZSTD
12066 +M:     Nick Terrell <terrelln@fb.com>
12067 +S:     Maintained
12068 +B:     https://github.com/facebook/zstd/issues
12069 +T:     git git://github.com/terrelln/linux.git
12070 +F:     include/linux/zstd*
12071 +F:     lib/zstd/
12072 +F:     lib/decompress_unzstd.c
12073 +F:     crypto/zstd.c
12074 +N:     zstd
12075 +K:     zstd
12077  ZSWAP COMPRESSED SWAP CACHING
12078  M:     Seth Jennings <sjenning@redhat.com>
12079  M:     Dan Streetman <ddstreet@ieee.org>
12080 diff --git a/Makefile b/Makefile
12081 index 3a10a8e08b6d..f1efce53c4e3 100644
12082 --- a/Makefile
12083 +++ b/Makefile
12084 @@ -1,7 +1,7 @@
12085  # SPDX-License-Identifier: GPL-2.0
12086  VERSION = 5
12087  PATCHLEVEL = 12
12088 -SUBLEVEL = 0
12089 +SUBLEVEL = 5
12090  EXTRAVERSION =
12091  NAME = Frozen Wasteland
12093 @@ -775,16 +775,16 @@ KBUILD_CFLAGS += -Wno-gnu
12094  KBUILD_CFLAGS += -mno-global-merge
12095  else
12097 -# These warnings generated too much noise in a regular build.
12098 -# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
12099 -KBUILD_CFLAGS += -Wno-unused-but-set-variable
12101  # Warn about unmarked fall-throughs in switch statement.
12102  # Disabled for clang while comment to attribute conversion happens and
12103  # https://github.com/ClangBuiltLinux/linux/issues/636 is discussed.
12104  KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,)
12105  endif
12107 +# These warnings generated too much noise in a regular build.
12108 +# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
12109 +KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
12111  KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
12112  ifdef CONFIG_FRAME_POINTER
12113  KBUILD_CFLAGS  += -fno-omit-frame-pointer -fno-optimize-sibling-calls
12114 @@ -1066,8 +1066,8 @@ endif # INSTALL_MOD_STRIP
12115  export mod_strip_cmd
12117  # CONFIG_MODULE_COMPRESS, if defined, will cause module to be compressed
12118 -# after they are installed in agreement with CONFIG_MODULE_COMPRESS_GZIP
12119 -# or CONFIG_MODULE_COMPRESS_XZ.
12120 +# after they are installed in agreement with CONFIG_MODULE_COMPRESS_GZIP,
12121 +# CONFIG_MODULE_COMPRESS_XZ, or CONFIG_MODULE_COMPRESS_ZSTD.
12123  mod_compress_cmd = true
12124  ifdef CONFIG_MODULE_COMPRESS
12125 @@ -1077,6 +1077,9 @@ ifdef CONFIG_MODULE_COMPRESS
12126    ifdef CONFIG_MODULE_COMPRESS_XZ
12127      mod_compress_cmd = $(XZ) --lzma2=dict=2MiB -f
12128    endif # CONFIG_MODULE_COMPRESS_XZ
12129 +  ifdef CONFIG_MODULE_COMPRESS_ZSTD
12130 +    mod_compress_cmd = $(ZSTD) -T0 --rm -f -q
12131 +  endif # CONFIG_MODULE_COMPRESS_ZSTD
12132  endif # CONFIG_MODULE_COMPRESS
12133  export mod_compress_cmd
12135 @@ -1513,7 +1516,7 @@ endif # CONFIG_MODULES
12136  # make distclean Remove editor backup files, patch leftover files and the like
12138  # Directories & files removed with 'make clean'
12139 -CLEAN_FILES += include/ksym vmlinux.symvers \
12140 +CLEAN_FILES += include/ksym vmlinux.symvers modules-only.symvers \
12141                modules.builtin modules.builtin.modinfo modules.nsdeps \
12142                compile_commands.json .thinlto-cache
12144 diff --git a/arch/Kconfig b/arch/Kconfig
12145 index ecfd3520b676..cbd7f66734ee 100644
12146 --- a/arch/Kconfig
12147 +++ b/arch/Kconfig
12148 @@ -782,6 +782,15 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE
12149  config HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
12150         bool
12152 +config HAVE_ARCH_PARENT_PMD_YOUNG
12153 +       bool
12154 +       depends on PGTABLE_LEVELS > 2
12155 +       help
12156 +         Architectures that select this are able to set the accessed bit on
12157 +         non-leaf PMD entries in addition to leaf PTE entries where pages are
12158 +         mapped. For them, page table walkers that clear the accessed bit may
12159 +         stop at non-leaf PMD entries when they do not see the accessed bit.
12161  config HAVE_ARCH_HUGE_VMAP
12162         bool
12164 diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
12165 index ad9b7fe4dba3..4a9d33372fe2 100644
12166 --- a/arch/arc/include/asm/page.h
12167 +++ b/arch/arc/include/asm/page.h
12168 @@ -7,6 +7,18 @@
12170  #include <uapi/asm/page.h>
12172 +#ifdef CONFIG_ARC_HAS_PAE40
12174 +#define MAX_POSSIBLE_PHYSMEM_BITS      40
12175 +#define PAGE_MASK_PHYS                 (0xff00000000ull | PAGE_MASK)
12177 +#else /* CONFIG_ARC_HAS_PAE40 */
12179 +#define MAX_POSSIBLE_PHYSMEM_BITS      32
12180 +#define PAGE_MASK_PHYS                 PAGE_MASK
12182 +#endif /* CONFIG_ARC_HAS_PAE40 */
12184  #ifndef __ASSEMBLY__
12186  #define clear_page(paddr)              memset((paddr), 0, PAGE_SIZE)
12187 diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
12188 index 163641726a2b..5878846f00cf 100644
12189 --- a/arch/arc/include/asm/pgtable.h
12190 +++ b/arch/arc/include/asm/pgtable.h
12191 @@ -107,8 +107,8 @@
12192  #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
12194  /* Set of bits not changed in pte_modify */
12195 -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
12197 +#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
12198 +                                                          _PAGE_SPECIAL)
12199  /* More Abbrevaited helpers */
12200  #define PAGE_U_NONE     __pgprot(___DEF)
12201  #define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
12202 @@ -132,13 +132,7 @@
12203  #define PTE_BITS_IN_PD0                (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
12204  #define PTE_BITS_RWX           (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
12206 -#ifdef CONFIG_ARC_HAS_PAE40
12207 -#define PTE_BITS_NON_RWX_IN_PD1        (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
12208 -#define MAX_POSSIBLE_PHYSMEM_BITS 40
12209 -#else
12210 -#define PTE_BITS_NON_RWX_IN_PD1        (PAGE_MASK | _PAGE_CACHEABLE)
12211 -#define MAX_POSSIBLE_PHYSMEM_BITS 32
12212 -#endif
12213 +#define PTE_BITS_NON_RWX_IN_PD1        (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
12215  /**************************************************************************
12216   * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
12217 diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
12218 index 2a97e2718a21..2a4ad619abfb 100644
12219 --- a/arch/arc/include/uapi/asm/page.h
12220 +++ b/arch/arc/include/uapi/asm/page.h
12221 @@ -33,5 +33,4 @@
12223  #define PAGE_MASK      (~(PAGE_SIZE-1))
12226  #endif /* _UAPI__ASM_ARC_PAGE_H */
12227 diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
12228 index 1743506081da..2cb8dfe866b6 100644
12229 --- a/arch/arc/kernel/entry.S
12230 +++ b/arch/arc/kernel/entry.S
12231 @@ -177,7 +177,7 @@ tracesys:
12233         ; Do the Sys Call as we normally would.
12234         ; Validate the Sys Call number
12235 -       cmp     r8,  NR_syscalls
12236 +       cmp     r8,  NR_syscalls - 1
12237         mov.hi  r0, -ENOSYS
12238         bhi     tracesys_exit
12240 @@ -255,7 +255,7 @@ ENTRY(EV_Trap)
12241         ;============ Normal syscall case
12243         ; syscall num shd not exceed the total system calls avail
12244 -       cmp     r8,  NR_syscalls
12245 +       cmp     r8,  NR_syscalls - 1
12246         mov.hi  r0, -ENOSYS
12247         bhi     .Lret_from_system_call
12249 diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
12250 index ce07e697916c..1bcc6985b9a0 100644
12251 --- a/arch/arc/mm/init.c
12252 +++ b/arch/arc/mm/init.c
12253 @@ -157,7 +157,16 @@ void __init setup_arch_memory(void)
12254         min_high_pfn = PFN_DOWN(high_mem_start);
12255         max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
12257 -       max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
12258 +       /*
12259 +        * max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
12260 +        * For HIGHMEM without PAE max_high_pfn should be less than
12261 +        * min_low_pfn to guarantee that these two regions don't overlap.
12262 +        * For PAE case highmem is greater than lowmem, so it is natural
12263 +        * to use max_high_pfn.
12264 +        *
12265 +        * In both cases, holes should be handled by pfn_valid().
12266 +        */
12267 +       max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
12269         high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
12271 diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
12272 index fac4adc90204..95c649fbc95a 100644
12273 --- a/arch/arc/mm/ioremap.c
12274 +++ b/arch/arc/mm/ioremap.c
12275 @@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap);
12276  void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
12277                            unsigned long flags)
12279 +       unsigned int off;
12280         unsigned long vaddr;
12281         struct vm_struct *area;
12282 -       phys_addr_t off, end;
12283 +       phys_addr_t end;
12284         pgprot_t prot = __pgprot(flags);
12286         /* Don't allow wraparound, zero size */
12287 @@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
12289         /* Mappings have to be page-aligned */
12290         off = paddr & ~PAGE_MASK;
12291 -       paddr &= PAGE_MASK;
12292 +       paddr &= PAGE_MASK_PHYS;
12293         size = PAGE_ALIGN(end + 1) - paddr;
12295         /*
12296 diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
12297 index 9bb3c24f3677..9c7c68247289 100644
12298 --- a/arch/arc/mm/tlb.c
12299 +++ b/arch/arc/mm/tlb.c
12300 @@ -576,7 +576,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
12301                       pte_t *ptep)
12303         unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
12304 -       phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
12305 +       phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
12306         struct page *page = pfn_to_page(pte_pfn(*ptep));
12308         create_tlb(vma, vaddr, ptep);
12309 diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
12310 index fd94e27ba4fa..c1f804768621 100644
12311 --- a/arch/arm/boot/compressed/Makefile
12312 +++ b/arch/arm/boot/compressed/Makefile
12313 @@ -118,8 +118,8 @@ asflags-y := -DZIMAGE
12315  # Supply kernel BSS size to the decompressor via a linker symbol.
12316  KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \
12317 -               sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \
12318 -                      -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) )
12319 +               sed -n -e 's/^\([^ ]*\) [ABD] __bss_start$$/-0x\1/p' \
12320 +                      -e 's/^\([^ ]*\) [ABD] __bss_stop$$/+0x\1/p') )) )
12321  LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
12322  # Supply ZRELADDR to the decompressor via a linker symbol.
12323  ifneq ($(CONFIG_AUTO_ZRELADDR),y)
12324 diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
12325 index 6c9804d2f3b4..6df1ce545061 100644
12326 --- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
12327 +++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
12328 @@ -713,9 +713,9 @@ &i2c7 {
12329         multi-master;
12330         status = "okay";
12332 -       si7021-a20@20 {
12333 +       si7021-a20@40 {
12334                 compatible = "silabs,si7020";
12335 -               reg = <0x20>;
12336 +               reg = <0x40>;
12337         };
12339         tmp275@48 {
12340 diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
12341 index 775ceb3acb6c..edca66c232c1 100644
12342 --- a/arch/arm/boot/dts/at91-sam9x60ek.dts
12343 +++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
12344 @@ -8,6 +8,7 @@
12345   */
12346  /dts-v1/;
12347  #include "sam9x60.dtsi"
12348 +#include <dt-bindings/input/input.h>
12350  / {
12351         model = "Microchip SAM9X60-EK";
12352 @@ -84,7 +85,7 @@ gpio_keys {
12353                 sw1 {
12354                         label = "SW1";
12355                         gpios = <&pioD 18 GPIO_ACTIVE_LOW>;
12356 -                       linux,code=<0x104>;
12357 +                       linux,code=<KEY_PROG1>;
12358                         wakeup-source;
12359                 };
12360         };
12361 diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
12362 index 84e1180f3e89..a9e6fee55a2a 100644
12363 --- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
12364 +++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
12365 @@ -11,6 +11,7 @@
12366  #include "at91-sama5d27_som1.dtsi"
12367  #include <dt-bindings/mfd/atmel-flexcom.h>
12368  #include <dt-bindings/gpio/gpio.h>
12369 +#include <dt-bindings/input/input.h>
12371  / {
12372         model = "Atmel SAMA5D27 SOM1 EK";
12373 @@ -466,7 +467,7 @@ gpio_keys {
12374                 pb4 {
12375                         label = "USER";
12376                         gpios = <&pioA PIN_PA29 GPIO_ACTIVE_LOW>;
12377 -                       linux,code = <0x104>;
12378 +                       linux,code = <KEY_PROG1>;
12379                         wakeup-source;
12380                 };
12381         };
12382 diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
12383 index 180a08765cb8..ff83967fd008 100644
12384 --- a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
12385 +++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
12386 @@ -8,6 +8,7 @@
12387   */
12388  /dts-v1/;
12389  #include "at91-sama5d27_wlsom1.dtsi"
12390 +#include <dt-bindings/input/input.h>
12392  / {
12393         model = "Microchip SAMA5D27 WLSOM1 EK";
12394 @@ -35,7 +36,7 @@ gpio_keys {
12395                 sw4 {
12396                         label = "USER BUTTON";
12397                         gpios = <&pioA PIN_PB2 GPIO_ACTIVE_LOW>;
12398 -                       linux,code = <0x104>;
12399 +                       linux,code = <KEY_PROG1>;
12400                         wakeup-source;
12401                 };
12402         };
12403 diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
12404 index 46722a163184..bd64721fa23c 100644
12405 --- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
12406 +++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
12407 @@ -12,6 +12,7 @@
12408  #include "sama5d2.dtsi"
12409  #include "sama5d2-pinfunc.h"
12410  #include <dt-bindings/gpio/gpio.h>
12411 +#include <dt-bindings/input/input.h>
12412  #include <dt-bindings/mfd/atmel-flexcom.h>
12414  / {
12415 @@ -51,7 +52,7 @@ gpio_keys {
12416                 sw4 {
12417                         label = "USER_PB1";
12418                         gpios = <&pioA PIN_PD0 GPIO_ACTIVE_LOW>;
12419 -                       linux,code = <0x104>;
12420 +                       linux,code = <KEY_PROG1>;
12421                         wakeup-source;
12422                 };
12423         };
12424 diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
12425 index 8de57d164acd..dfd150eb0fd8 100644
12426 --- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
12427 +++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
12428 @@ -11,6 +11,7 @@
12429  #include "sama5d2-pinfunc.h"
12430  #include <dt-bindings/mfd/atmel-flexcom.h>
12431  #include <dt-bindings/gpio/gpio.h>
12432 +#include <dt-bindings/input/input.h>
12433  #include <dt-bindings/pinctrl/at91.h>
12435  / {
12436 @@ -402,7 +403,7 @@ gpio_keys {
12437                 bp1 {
12438                         label = "PB_USER";
12439                         gpios = <&pioA PIN_PA10 GPIO_ACTIVE_LOW>;
12440 -                       linux,code = <0x104>;
12441 +                       linux,code = <KEY_PROG1>;
12442                         wakeup-source;
12443                 };
12444         };
12445 diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
12446 index 4e7cf21f124c..509c732a0d8b 100644
12447 --- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
12448 +++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
12449 @@ -10,6 +10,7 @@
12450  #include "sama5d2-pinfunc.h"
12451  #include <dt-bindings/mfd/atmel-flexcom.h>
12452  #include <dt-bindings/gpio/gpio.h>
12453 +#include <dt-bindings/input/input.h>
12454  #include <dt-bindings/regulator/active-semi,8945a-regulator.h>
12456  / {
12457 @@ -712,7 +713,7 @@ gpio_keys {
12458                 bp1 {
12459                         label = "PB_USER";
12460                         gpios = <&pioA PIN_PB9 GPIO_ACTIVE_LOW>;
12461 -                       linux,code = <0x104>;
12462 +                       linux,code = <KEY_PROG1>;
12463                         wakeup-source;
12464                 };
12465         };
12466 diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
12467 index 5179258f9247..9c55a921263b 100644
12468 --- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
12469 +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
12470 @@ -7,6 +7,7 @@
12471   */
12472  /dts-v1/;
12473  #include "sama5d36.dtsi"
12474 +#include <dt-bindings/input/input.h>
12476  / {
12477         model = "SAMA5D3 Xplained";
12478 @@ -354,7 +355,7 @@ gpio_keys {
12479                 bp3 {
12480                         label = "PB_USER";
12481                         gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
12482 -                       linux,code = <0x104>;
12483 +                       linux,code = <KEY_PROG1>;
12484                         wakeup-source;
12485                 };
12486         };
12487 diff --git a/arch/arm/boot/dts/at91sam9260ek.dts b/arch/arm/boot/dts/at91sam9260ek.dts
12488 index d3446e42b598..ce96345d28a3 100644
12489 --- a/arch/arm/boot/dts/at91sam9260ek.dts
12490 +++ b/arch/arm/boot/dts/at91sam9260ek.dts
12491 @@ -7,6 +7,7 @@
12492   */
12493  /dts-v1/;
12494  #include "at91sam9260.dtsi"
12495 +#include <dt-bindings/input/input.h>
12497  / {
12498         model = "Atmel at91sam9260ek";
12499 @@ -156,7 +157,7 @@ btn3 {
12500                 btn4 {
12501                         label = "Button 4";
12502                         gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
12503 -                       linux,code = <0x104>;
12504 +                       linux,code = <KEY_PROG1>;
12505                         wakeup-source;
12506                 };
12507         };
12508 diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
12509 index 6e6e672c0b86..87bb39060e8b 100644
12510 --- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
12511 +++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
12512 @@ -5,6 +5,7 @@
12513   * Copyright (C) 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
12514   */
12515  #include "at91sam9g20.dtsi"
12516 +#include <dt-bindings/input/input.h>
12518  / {
12520 @@ -234,7 +235,7 @@ btn3 {
12521                 btn4 {
12522                         label = "Button 4";
12523                         gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
12524 -                       linux,code = <0x104>;
12525 +                       linux,code = <KEY_PROG1>;
12526                         wakeup-source;
12527                 };
12528         };
12529 diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
12530 index 6a96655d8626..8ed403767540 100644
12531 --- a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
12532 +++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
12533 @@ -21,8 +21,8 @@ chosen {
12535         memory@0 {
12536                 device_type = "memory";
12537 -               reg = <0x00000000 0x08000000
12538 -                      0x88000000 0x08000000>;
12539 +               reg = <0x00000000 0x08000000>,
12540 +                     <0x88000000 0x08000000>;
12541         };
12543         leds {
12544 diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
12545 index 3b0029e61b4c..667b118ba4ee 100644
12546 --- a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
12547 +++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
12548 @@ -21,8 +21,8 @@ chosen {
12550         memory@0 {
12551                 device_type = "memory";
12552 -               reg = <0x00000000 0x08000000
12553 -                      0x88000000 0x08000000>;
12554 +               reg = <0x00000000 0x08000000>,
12555 +                     <0x88000000 0x08000000>;
12556         };
12558         leds {
12559 diff --git a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
12560 index 90f57bad6b24..ff31ce45831a 100644
12561 --- a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
12562 +++ b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
12563 @@ -21,8 +21,8 @@ chosen {
12565         memory@0 {
12566                 device_type = "memory";
12567 -               reg = <0x00000000 0x08000000
12568 -                      0x88000000 0x18000000>;
12569 +               reg = <0x00000000 0x08000000>,
12570 +                     <0x88000000 0x18000000>;
12571         };
12573         spi {
12574 diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
12575 index fed75e6ab58c..61c7b137607e 100644
12576 --- a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
12577 +++ b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
12578 @@ -22,8 +22,8 @@ chosen {
12580         memory {
12581                 device_type = "memory";
12582 -               reg = <0x00000000 0x08000000
12583 -                      0x88000000 0x08000000>;
12584 +               reg = <0x00000000 0x08000000>,
12585 +                     <0x88000000 0x08000000>;
12586         };
12588         leds {
12589 diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
12590 index 79542e18915c..4c60eda296d9 100644
12591 --- a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
12592 +++ b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
12593 @@ -21,8 +21,8 @@ chosen {
12595         memory@0 {
12596                 device_type = "memory";
12597 -               reg = <0x00000000 0x08000000
12598 -                      0x88000000 0x08000000>;
12599 +               reg = <0x00000000 0x08000000>,
12600 +                     <0x88000000 0x08000000>;
12601         };
12603         leds {
12604 diff --git a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
12605 index 51c64f0b2560..9ca6d1b2590d 100644
12606 --- a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
12607 +++ b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
12608 @@ -21,8 +21,8 @@ chosen {
12610         memory@0 {
12611                 device_type = "memory";
12612 -               reg = <0x00000000 0x08000000
12613 -                      0x88000000 0x08000000>;
12614 +               reg = <0x00000000 0x08000000>,
12615 +                     <0x88000000 0x08000000>;
12616         };
12618         leds {
12619 diff --git a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
12620 index c29950b43a95..0e273c598732 100644
12621 --- a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
12622 +++ b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
12623 @@ -21,8 +21,8 @@ chosen {
12625         memory@0 {
12626                 device_type = "memory";
12627 -               reg = <0x00000000 0x08000000
12628 -                      0x88000000 0x08000000>;
12629 +               reg = <0x00000000 0x08000000>,
12630 +                     <0x88000000 0x08000000>;
12631         };
12633         leds {
12634 diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
12635 index 2f2d2b0a6893..d857751ec507 100644
12636 --- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
12637 +++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
12638 @@ -21,8 +21,8 @@ chosen {
12640         memory@0 {
12641                 device_type = "memory";
12642 -               reg = <0x00000000 0x08000000
12643 -                      0x88000000 0x08000000>;
12644 +               reg = <0x00000000 0x08000000>,
12645 +                     <0x88000000 0x08000000>;
12646         };
12648         spi {
12649 diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
12650 index 0e349e39f608..8b1a05a0f1a1 100644
12651 --- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
12652 +++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
12653 @@ -21,8 +21,8 @@ chosen {
12655         memory@0 {
12656                 device_type = "memory";
12657 -               reg = <0x00000000 0x08000000
12658 -                      0x88000000 0x08000000>;
12659 +               reg = <0x00000000 0x08000000>,
12660 +                     <0x88000000 0x08000000>;
12661         };
12663         spi {
12664 diff --git a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
12665 index 8f1e565c3db4..6c6bb7b17d27 100644
12666 --- a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
12667 +++ b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
12668 @@ -21,8 +21,8 @@ chosen {
12670         memory {
12671                 device_type = "memory";
12672 -               reg = <0x00000000 0x08000000
12673 -                      0x88000000 0x08000000>;
12674 +               reg = <0x00000000 0x08000000>,
12675 +                     <0x88000000 0x08000000>;
12676         };
12678         leds {
12679 diff --git a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
12680 index ce888b1835d1..d29e7f80ea6a 100644
12681 --- a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
12682 +++ b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
12683 @@ -21,8 +21,8 @@ chosen {
12685         memory {
12686                 device_type = "memory";
12687 -               reg = <0x00000000 0x08000000
12688 -                      0x88000000 0x18000000>;
12689 +               reg = <0x00000000 0x08000000>,
12690 +                     <0x88000000 0x18000000>;
12691         };
12693         leds {
12694 diff --git a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
12695 index ed8619b54d69..38fbefdf2e4e 100644
12696 --- a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
12697 +++ b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
12698 @@ -18,8 +18,8 @@ chosen {
12700         memory {
12701                 device_type = "memory";
12702 -               reg = <0x00000000 0x08000000
12703 -                      0x88000000 0x08000000>;
12704 +               reg = <0x00000000 0x08000000>,
12705 +                     <0x88000000 0x08000000>;
12706         };
12708         gpio-keys {
12709 diff --git a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
12710 index 1f87993eae1d..7989a53597d4 100644
12711 --- a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
12712 +++ b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
12713 @@ -21,8 +21,8 @@ chosen {
12715         memory {
12716                 device_type = "memory";
12717 -               reg = <0x00000000 0x08000000
12718 -                      0x88000000 0x08000000>;
12719 +               reg = <0x00000000 0x08000000>,
12720 +                     <0x88000000 0x08000000>;
12721         };
12723         leds {
12724 diff --git a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
12725 index 6c6199a53d09..87b655be674c 100644
12726 --- a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
12727 +++ b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
12728 @@ -32,8 +32,8 @@ chosen {
12730         memory {
12731                 device_type = "memory";
12732 -               reg = <0x00000000 0x08000000
12733 -                      0x88000000 0x08000000>;
12734 +               reg = <0x00000000 0x08000000>,
12735 +                     <0x88000000 0x08000000>;
12736         };
12738         leds {
12739 diff --git a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
12740 index 911c65fbf251..e635a15041dd 100644
12741 --- a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
12742 +++ b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
12743 @@ -21,8 +21,8 @@ chosen {
12745         memory@0 {
12746                 device_type = "memory";
12747 -               reg = <0x00000000 0x08000000
12748 -                      0x88000000 0x08000000>;
12749 +               reg = <0x00000000 0x08000000>,
12750 +                     <0x88000000 0x08000000>;
12751         };
12753         nand: nand@18028000 {
12754 diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
12755 index 3725f2b0d60b..4b24b25389b5 100644
12756 --- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
12757 +++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
12758 @@ -18,8 +18,8 @@ chosen {
12760         memory@0 {
12761                 device_type = "memory";
12762 -               reg = <0x00000000 0x08000000
12763 -                      0x88000000 0x08000000>;
12764 +               reg = <0x00000000 0x08000000>,
12765 +                     <0x88000000 0x08000000>;
12766         };
12768         gpio-keys {
12769 diff --git a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
12770 index 50f7cd08cfbb..a6dc99955e19 100644
12771 --- a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
12772 +++ b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
12773 @@ -18,8 +18,8 @@ chosen {
12775         memory@0 {
12776                 device_type = "memory";
12777 -               reg = <0x00000000 0x08000000
12778 -                      0x88000000 0x18000000>;
12779 +               reg = <0x00000000 0x08000000>,
12780 +                     <0x88000000 0x18000000>;
12781         };
12783         leds {
12784 diff --git a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
12785 index bcc420f85b56..ff98837bc0db 100644
12786 --- a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
12787 +++ b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
12788 @@ -18,8 +18,8 @@ chosen {
12790         memory@0 {
12791                 device_type = "memory";
12792 -               reg = <0x00000000 0x08000000
12793 -                      0x88000000 0x18000000>;
12794 +               reg = <0x00000000 0x08000000>,
12795 +                     <0x88000000 0x18000000>;
12796         };
12798         leds {
12799 diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
12800 index 4f8d777ae18d..452b8d0ab180 100644
12801 --- a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
12802 +++ b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
12803 @@ -18,8 +18,8 @@ chosen {
12805         memory {
12806                 device_type = "memory";
12807 -               reg = <0x00000000 0x08000000
12808 -                      0x88000000 0x18000000>;
12809 +               reg = <0x00000000 0x08000000>,
12810 +                     <0x88000000 0x18000000>;
12811         };
12813         leds {
12814 diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
12815 index e17e9a17fb00..b76bfe6efcd4 100644
12816 --- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
12817 +++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
12818 @@ -18,8 +18,8 @@ chosen {
12820         memory@0 {
12821                 device_type = "memory";
12822 -               reg = <0x00000000 0x08000000
12823 -                      0x88000000 0x08000000>;
12824 +               reg = <0x00000000 0x08000000>,
12825 +                     <0x88000000 0x08000000>;
12826         };
12828         leds {
12829 diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
12830 index 60cc87ecc7ec..32d5a50578ec 100644
12831 --- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
12832 +++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
12833 @@ -18,8 +18,8 @@ chosen {
12835         memory@0 {
12836                 device_type = "memory";
12837 -               reg = <0x00000000 0x08000000
12838 -                      0x88000000 0x18000000>;
12839 +               reg = <0x00000000 0x08000000>,
12840 +                     <0x88000000 0x18000000>;
12841         };
12843         leds {
12844 diff --git a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
12845 index f42a1703f4ab..42097a4c2659 100644
12846 --- a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
12847 +++ b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
12848 @@ -18,8 +18,8 @@ chosen {
12850         memory@0 {
12851                 device_type = "memory";
12852 -               reg = <0x00000000 0x08000000
12853 -                      0x88000000 0x18000000>;
12854 +               reg = <0x00000000 0x08000000>,
12855 +                     <0x88000000 0x18000000>;
12856         };
12858         leds {
12859 diff --git a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
12860 index ac3a4483dcb3..a2566ad4619c 100644
12861 --- a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
12862 +++ b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
12863 @@ -15,8 +15,8 @@ / {
12865         memory@0 {
12866                 device_type = "memory";
12867 -               reg = <0x00000000 0x08000000
12868 -                      0x88000000 0x18000000>;
12869 +               reg = <0x00000000 0x08000000>,
12870 +                     <0x88000000 0x18000000>;
12871         };
12873         gpio-keys {
12874 diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
12875 index 3bf90d9e3335..a294a02f2d23 100644
12876 --- a/arch/arm/boot/dts/dra7-l4.dtsi
12877 +++ b/arch/arm/boot/dts/dra7-l4.dtsi
12878 @@ -1168,7 +1168,7 @@ timer2: timer@0 {
12879                         };
12880                 };
12882 -               target-module@34000 {                   /* 0x48034000, ap 7 46.0 */
12883 +               timer3_target: target-module@34000 {    /* 0x48034000, ap 7 46.0 */
12884                         compatible = "ti,sysc-omap4-timer", "ti,sysc";
12885                         reg = <0x34000 0x4>,
12886                               <0x34010 0x4>;
12887 @@ -1195,7 +1195,7 @@ timer3: timer@0 {
12888                         };
12889                 };
12891 -               target-module@36000 {                   /* 0x48036000, ap 9 4e.0 */
12892 +               timer4_target: target-module@36000 {    /* 0x48036000, ap 9 4e.0 */
12893                         compatible = "ti,sysc-omap4-timer", "ti,sysc";
12894                         reg = <0x36000 0x4>,
12895                               <0x36010 0x4>;
12896 diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
12897 index ce1194744f84..53d68786a61f 100644
12898 --- a/arch/arm/boot/dts/dra7.dtsi
12899 +++ b/arch/arm/boot/dts/dra7.dtsi
12900 @@ -46,6 +46,7 @@ aliases {
12902         timer {
12903                 compatible = "arm,armv7-timer";
12904 +               status = "disabled";    /* See ARM architected timer wrap erratum i940 */
12905                 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
12906                              <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
12907                              <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
12908 @@ -1241,3 +1242,22 @@ timer@0 {
12909                 assigned-clock-parents = <&sys_32k_ck>;
12910         };
12911  };
12913 +/* Local timers, see ARM architected timer wrap erratum i940 */
12914 +&timer3_target {
12915 +       ti,no-reset-on-init;
12916 +       ti,no-idle;
12917 +       timer@0 {
12918 +               assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER3_CLKCTRL 24>;
12919 +               assigned-clock-parents = <&timer_sys_clk_div>;
12920 +       };
12923 +&timer4_target {
12924 +       ti,no-reset-on-init;
12925 +       ti,no-idle;
12926 +       timer@0 {
12927 +               assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 24>;
12928 +               assigned-clock-parents = <&timer_sys_clk_div>;
12929 +       };
12931 diff --git a/arch/arm/boot/dts/exynos4210-i9100.dts b/arch/arm/boot/dts/exynos4210-i9100.dts
12932 index 304a8ee2364c..d98c78207aaf 100644
12933 --- a/arch/arm/boot/dts/exynos4210-i9100.dts
12934 +++ b/arch/arm/boot/dts/exynos4210-i9100.dts
12935 @@ -136,7 +136,7 @@ battery@36 {
12936                         compatible = "maxim,max17042";
12938                         interrupt-parent = <&gpx2>;
12939 -                       interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
12940 +                       interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
12942                         pinctrl-0 = <&max17042_fuel_irq>;
12943                         pinctrl-names = "default";
12944 diff --git a/arch/arm/boot/dts/exynos4412-midas.dtsi b/arch/arm/boot/dts/exynos4412-midas.dtsi
12945 index 111c32bae02c..fc77c1bfd844 100644
12946 --- a/arch/arm/boot/dts/exynos4412-midas.dtsi
12947 +++ b/arch/arm/boot/dts/exynos4412-midas.dtsi
12948 @@ -173,7 +173,7 @@ i2c_max77693: i2c-gpio-1 {
12949                 pmic@66 {
12950                         compatible = "maxim,max77693";
12951                         interrupt-parent = <&gpx1>;
12952 -                       interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
12953 +                       interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
12954                         pinctrl-names = "default";
12955                         pinctrl-0 = <&max77693_irq>;
12956                         reg = <0x66>;
12957 @@ -221,7 +221,7 @@ i2c_max77693_fuel: i2c-gpio-3 {
12958                 fuel-gauge@36 {
12959                         compatible = "maxim,max17047";
12960                         interrupt-parent = <&gpx2>;
12961 -                       interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
12962 +                       interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
12963                         pinctrl-names = "default";
12964                         pinctrl-0 = <&max77693_fuel_irq>;
12965                         reg = <0x36>;
12966 @@ -665,7 +665,7 @@ &i2c_7 {
12967         max77686: pmic@9 {
12968                 compatible = "maxim,max77686";
12969                 interrupt-parent = <&gpx0>;
12970 -               interrupts = <7 IRQ_TYPE_NONE>;
12971 +               interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
12972                 pinctrl-0 = <&max77686_irq>;
12973                 pinctrl-names = "default";
12974                 reg = <0x09>;
12975 diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
12976 index 2b20d9095d9f..eebe6a3952ce 100644
12977 --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
12978 +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
12979 @@ -278,7 +278,7 @@ usb3503: usb-hub@8 {
12980         max77686: pmic@9 {
12981                 compatible = "maxim,max77686";
12982                 interrupt-parent = <&gpx3>;
12983 -               interrupts = <2 IRQ_TYPE_NONE>;
12984 +               interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
12985                 pinctrl-names = "default";
12986                 pinctrl-0 = <&max77686_irq>;
12987                 reg = <0x09>;
12988 diff --git a/arch/arm/boot/dts/exynos4412-p4note.dtsi b/arch/arm/boot/dts/exynos4412-p4note.dtsi
12989 index b2f9d5448a18..9e750890edb8 100644
12990 --- a/arch/arm/boot/dts/exynos4412-p4note.dtsi
12991 +++ b/arch/arm/boot/dts/exynos4412-p4note.dtsi
12992 @@ -146,7 +146,7 @@ fuel-gauge@36 {
12993                         pinctrl-0 = <&fuel_alert_irq>;
12994                         pinctrl-names = "default";
12995                         interrupt-parent = <&gpx2>;
12996 -                       interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
12997 +                       interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
12998                         maxim,rsns-microohm = <10000>;
12999                         maxim,over-heat-temp = <600>;
13000                         maxim,over-volt = <4300>;
13001 @@ -322,7 +322,7 @@ &i2c_7 {
13002         max77686: pmic@9 {
13003                 compatible = "maxim,max77686";
13004                 interrupt-parent = <&gpx0>;
13005 -               interrupts = <7 IRQ_TYPE_NONE>;
13006 +               interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
13007                 pinctrl-0 = <&max77686_irq>;
13008                 pinctrl-names = "default";
13009                 reg = <0x09>;
13010 diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
13011 index 8b5a79a8720c..39bbe18145cf 100644
13012 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
13013 +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
13014 @@ -134,7 +134,7 @@ max77686: pmic@9 {
13015                 compatible = "maxim,max77686";
13016                 reg = <0x09>;
13017                 interrupt-parent = <&gpx3>;
13018 -               interrupts = <2 IRQ_TYPE_NONE>;
13019 +               interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
13020                 pinctrl-names = "default";
13021                 pinctrl-0 = <&max77686_irq>;
13022                 #clock-cells = <1>;
13023 diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
13024 index 6635f6184051..2335c4687349 100644
13025 --- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi
13026 +++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
13027 @@ -292,7 +292,7 @@ &i2c_0 {
13028         max77686: pmic@9 {
13029                 compatible = "maxim,max77686";
13030                 interrupt-parent = <&gpx3>;
13031 -               interrupts = <2 IRQ_TYPE_NONE>;
13032 +               interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
13033                 pinctrl-names = "default";
13034                 pinctrl-0 = <&max77686_irq>;
13035                 wakeup-source;
13036 diff --git a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
13037 index 0cda654371ae..56ee02ceba7d 100644
13038 --- a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
13039 +++ b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
13040 @@ -575,7 +575,7 @@ fuelgauge: max17048@36 {
13041                         maxim,rcomp = /bits/ 8 <0x4d>;
13043                         interrupt-parent = <&msmgpio>;
13044 -                       interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
13045 +                       interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
13047                         pinctrl-names = "default";
13048                         pinctrl-0 = <&fuelgauge_pin>;
13049 diff --git a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
13050 index a0f7f461f48c..2dadb836c5fe 100644
13051 --- a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
13052 +++ b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
13053 @@ -717,7 +717,7 @@ fuelgauge@36 {
13054                         maxim,rcomp = /bits/ 8 <0x56>;
13056                         interrupt-parent = <&pma8084_gpios>;
13057 -                       interrupts = <21 IRQ_TYPE_EDGE_FALLING>;
13058 +                       interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
13060                         pinctrl-names = "default";
13061                         pinctrl-0 = <&fuelgauge_pin>;
13062 diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
13063 index 09a152b91557..1d6f0c5d02e9 100644
13064 --- a/arch/arm/boot/dts/r8a7790-lager.dts
13065 +++ b/arch/arm/boot/dts/r8a7790-lager.dts
13066 @@ -53,6 +53,9 @@ aliases {
13067                 i2c11 = &i2cexio1;
13068                 i2c12 = &i2chdmi;
13069                 i2c13 = &i2cpwr;
13070 +               mmc0 = &mmcif1;
13071 +               mmc1 = &sdhi0;
13072 +               mmc2 = &sdhi2;
13073         };
13075         chosen {
13076 diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
13077 index f603cba5441f..6af1727b8269 100644
13078 --- a/arch/arm/boot/dts/r8a7791-koelsch.dts
13079 +++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
13080 @@ -53,6 +53,9 @@ aliases {
13081                 i2c12 = &i2cexio1;
13082                 i2c13 = &i2chdmi;
13083                 i2c14 = &i2cexio4;
13084 +               mmc0 = &sdhi0;
13085 +               mmc1 = &sdhi1;
13086 +               mmc2 = &sdhi2;
13087         };
13089         chosen {
13090 diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
13091 index c6d563fb7ec7..bf51e29c793a 100644
13092 --- a/arch/arm/boot/dts/r8a7791-porter.dts
13093 +++ b/arch/arm/boot/dts/r8a7791-porter.dts
13094 @@ -28,6 +28,8 @@ aliases {
13095                 serial0 = &scif0;
13096                 i2c9 = &gpioi2c2;
13097                 i2c10 = &i2chdmi;
13098 +               mmc0 = &sdhi0;
13099 +               mmc1 = &sdhi2;
13100         };
13102         chosen {
13103 diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
13104 index abf487e8fe0f..2b59a0491350 100644
13105 --- a/arch/arm/boot/dts/r8a7793-gose.dts
13106 +++ b/arch/arm/boot/dts/r8a7793-gose.dts
13107 @@ -49,6 +49,9 @@ aliases {
13108                 i2c10 = &gpioi2c4;
13109                 i2c11 = &i2chdmi;
13110                 i2c12 = &i2cexio4;
13111 +               mmc0 = &sdhi0;
13112 +               mmc1 = &sdhi1;
13113 +               mmc2 = &sdhi2;
13114         };
13116         chosen {
13117 diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts
13118 index 3f1cc5bbf329..32025986b3b9 100644
13119 --- a/arch/arm/boot/dts/r8a7794-alt.dts
13120 +++ b/arch/arm/boot/dts/r8a7794-alt.dts
13121 @@ -19,6 +19,9 @@ aliases {
13122                 i2c10 = &gpioi2c4;
13123                 i2c11 = &i2chdmi;
13124                 i2c12 = &i2cexio4;
13125 +               mmc0 = &mmcif0;
13126 +               mmc1 = &sdhi0;
13127 +               mmc2 = &sdhi1;
13128         };
13130         chosen {
13131 diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts
13132 index 677596f6c9c9..af066ee5e275 100644
13133 --- a/arch/arm/boot/dts/r8a7794-silk.dts
13134 +++ b/arch/arm/boot/dts/r8a7794-silk.dts
13135 @@ -31,6 +31,8 @@ aliases {
13136                 serial0 = &scif2;
13137                 i2c9 = &gpioi2c1;
13138                 i2c10 = &i2chdmi;
13139 +               mmc0 = &mmcif0;
13140 +               mmc1 = &sdhi1;
13141         };
13143         chosen {
13144 diff --git a/arch/arm/boot/dts/s5pv210-fascinate4g.dts b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
13145 index ca064359dd30..b47d8300e536 100644
13146 --- a/arch/arm/boot/dts/s5pv210-fascinate4g.dts
13147 +++ b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
13148 @@ -115,7 +115,7 @@ &fg {
13149         compatible = "maxim,max77836-battery";
13151         interrupt-parent = <&gph3>;
13152 -       interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
13153 +       interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
13155         pinctrl-names = "default";
13156         pinctrl-0 = <&fg_irq>;
13157 diff --git a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
13158 index cb3677f0a1cb..b580397ede83 100644
13159 --- a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
13160 +++ b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
13161 @@ -8,37 +8,43 @@
13162  / {
13163         soc {
13164                 i2c@80128000 {
13165 -                       /* Marked:
13166 -                        * 129
13167 -                        * M35
13168 -                        * L3GD20
13169 -                        */
13170 -                       l3gd20@6a {
13171 -                               /* Gyroscope */
13172 -                               compatible = "st,l3gd20";
13173 -                               status = "disabled";
13174 +                       accelerometer@19 {
13175 +                               compatible = "st,lsm303dlhc-accel";
13176                                 st,drdy-int-pin = <1>;
13177 -                               drive-open-drain;
13178 -                               reg = <0x6a>; // 0x6a or 0x6b
13179 +                               reg = <0x19>;
13180                                 vdd-supply = <&ab8500_ldo_aux1_reg>;
13181                                 vddio-supply = <&db8500_vsmps2_reg>;
13182 +                               interrupt-parent = <&gpio2>;
13183 +                               interrupts = <18 IRQ_TYPE_EDGE_RISING>,
13184 +                                            <19 IRQ_TYPE_EDGE_RISING>;
13185 +                               pinctrl-names = "default";
13186 +                               pinctrl-0 = <&accel_tvk_mode>;
13187                         };
13188 -                       /*
13189 -                        * Marked:
13190 -                        * 2122
13191 -                        * C3H
13192 -                        * DQEEE
13193 -                        * LIS3DH?
13194 -                        */
13195 -                       lis3dh@18 {
13196 -                               /* Accelerometer */
13197 -                               compatible = "st,lis3dh-accel";
13198 +                       magnetometer@1e {
13199 +                               compatible = "st,lsm303dlm-magn";
13200                                 st,drdy-int-pin = <1>;
13201 -                               reg = <0x18>;
13202 +                               reg = <0x1e>;
13203                                 vdd-supply = <&ab8500_ldo_aux1_reg>;
13204                                 vddio-supply = <&db8500_vsmps2_reg>;
13205 +                               // This interrupt is not properly working with the driver
13206 +                               // interrupt-parent = <&gpio1>;
13207 +                               // interrupts = <0 IRQ_TYPE_EDGE_RISING>;
13208                                 pinctrl-names = "default";
13209 -                               pinctrl-0 = <&accel_tvk_mode>;
13210 +                               pinctrl-0 = <&magn_tvk_mode>;
13211 +                       };
13212 +                       gyroscope@68 {
13213 +                               /* Gyroscope */
13214 +                               compatible = "st,l3g4200d-gyro";
13215 +                               reg = <0x68>;
13216 +                               vdd-supply = <&ab8500_ldo_aux1_reg>;
13217 +                               vddio-supply = <&db8500_vsmps2_reg>;
13218 +                       };
13219 +                       pressure@5c {
13220 +                               /* Barometer/pressure sensor */
13221 +                               compatible = "st,lps001wp-press";
13222 +                               reg = <0x5c>;
13223 +                               vdd-supply = <&ab8500_ldo_aux1_reg>;
13224 +                               vddio-supply = <&db8500_vsmps2_reg>;
13225                         };
13226                 };
13228 @@ -54,5 +60,26 @@ panel {
13229                                 };
13230                         };
13231                 };
13233 +               pinctrl {
13234 +                       accelerometer {
13235 +                               accel_tvk_mode: accel_tvk {
13236 +                                       /* Accelerometer interrupt lines 1 & 2 */
13237 +                                       tvk_cfg {
13238 +                                               pins = "GPIO82_C1", "GPIO83_D3";
13239 +                                               ste,config = <&gpio_in_pd>;
13240 +                                       };
13241 +                               };
13242 +                       };
13243 +                       magnetometer {
13244 +                               magn_tvk_mode: magn_tvk {
13245 +                                       /* GPIO 32 used for DRDY, pull this down */
13246 +                                       tvk_cfg {
13247 +                                               pins = "GPIO32_V2";
13248 +                                               ste,config = <&gpio_in_pd>;
13249 +                                       };
13250 +                               };
13251 +                       };
13252 +               };
13253         };
13254  };
13255 diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
13256 index 7b4249ed1983..060baa8b7e9d 100644
13257 --- a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
13258 +++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
13259 @@ -1891,10 +1891,15 @@ pins2 {
13260         usart2_idle_pins_c: usart2-idle-2 {
13261                 pins1 {
13262                         pinmux = <STM32_PINMUX('D', 5, ANALOG)>, /* USART2_TX */
13263 -                                <STM32_PINMUX('D', 4, ANALOG)>, /* USART2_RTS */
13264                                  <STM32_PINMUX('D', 3, ANALOG)>; /* USART2_CTS_NSS */
13265                 };
13266                 pins2 {
13267 +                       pinmux = <STM32_PINMUX('D', 4, AF7)>; /* USART2_RTS */
13268 +                       bias-disable;
13269 +                       drive-push-pull;
13270 +                       slew-rate = <3>;
13271 +               };
13272 +               pins3 {
13273                         pinmux = <STM32_PINMUX('D', 6, AF7)>; /* USART2_RX */
13274                         bias-disable;
13275                 };
13276 @@ -1940,10 +1945,15 @@ pins2 {
13277         usart3_idle_pins_b: usart3-idle-1 {
13278                 pins1 {
13279                         pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */
13280 -                                <STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */
13281                                  <STM32_PINMUX('I', 10, ANALOG)>; /* USART3_CTS_NSS */
13282                 };
13283                 pins2 {
13284 +                       pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */
13285 +                       bias-disable;
13286 +                       drive-push-pull;
13287 +                       slew-rate = <0>;
13288 +               };
13289 +               pins3 {
13290                         pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
13291                         bias-disable;
13292                 };
13293 @@ -1976,10 +1986,15 @@ pins2 {
13294         usart3_idle_pins_c: usart3-idle-2 {
13295                 pins1 {
13296                         pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */
13297 -                                <STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */
13298                                  <STM32_PINMUX('B', 13, ANALOG)>; /* USART3_CTS_NSS */
13299                 };
13300                 pins2 {
13301 +                       pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */
13302 +                       bias-disable;
13303 +                       drive-push-pull;
13304 +                       slew-rate = <0>;
13305 +               };
13306 +               pins3 {
13307                         pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
13308                         bias-disable;
13309                 };
13310 diff --git a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
13311 index d3b99535d755..f9c0f6884cc1 100644
13312 --- a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
13313 +++ b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
13314 @@ -448,7 +448,7 @@ touchscreen@4c {
13316                         reset-gpios = <&gpio TEGRA_GPIO(Q, 7) GPIO_ACTIVE_LOW>;
13318 -                       avdd-supply = <&vdd_3v3_sys>;
13319 +                       vdda-supply = <&vdd_3v3_sys>;
13320                         vdd-supply  = <&vdd_3v3_sys>;
13321                 };
13323 diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
13324 index b0b15c97306b..e81e5937a60a 100644
13325 --- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
13326 +++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
13327 @@ -583,7 +583,7 @@ eth: ethernet@65000000 {
13328                         clocks = <&sys_clk 6>;
13329                         reset-names = "ether";
13330                         resets = <&sys_rst 6>;
13331 -                       phy-mode = "rgmii";
13332 +                       phy-mode = "rgmii-id";
13333                         local-mac-address = [00 00 00 00 00 00];
13334                         socionext,syscon-phy-mode = <&soc_glue 0>;
13336 diff --git a/arch/arm/crypto/blake2s-core.S b/arch/arm/crypto/blake2s-core.S
13337 index bed897e9a181..86345751bbf3 100644
13338 --- a/arch/arm/crypto/blake2s-core.S
13339 +++ b/arch/arm/crypto/blake2s-core.S
13340 @@ -8,6 +8,7 @@
13341   */
13343  #include <linux/linkage.h>
13344 +#include <asm/assembler.h>
13346         // Registers used to hold message words temporarily.  There aren't
13347         // enough ARM registers to hold the whole message block, so we have to
13348 @@ -38,6 +39,23 @@
13349  #endif
13350  .endm
13352 +.macro _le32_bswap     a, tmp
13353 +#ifdef __ARMEB__
13354 +       rev_l           \a, \tmp
13355 +#endif
13356 +.endm
13358 +.macro _le32_bswap_8x  a, b, c, d, e, f, g, h,  tmp
13359 +       _le32_bswap     \a, \tmp
13360 +       _le32_bswap     \b, \tmp
13361 +       _le32_bswap     \c, \tmp
13362 +       _le32_bswap     \d, \tmp
13363 +       _le32_bswap     \e, \tmp
13364 +       _le32_bswap     \f, \tmp
13365 +       _le32_bswap     \g, \tmp
13366 +       _le32_bswap     \h, \tmp
13367 +.endm
13369  // Execute a quarter-round of BLAKE2s by mixing two columns or two diagonals.
13370  // (a0, b0, c0, d0) and (a1, b1, c1, d1) give the registers containing the two
13371  // columns/diagonals.  s0-s1 are the word offsets to the message words the first
13372 @@ -180,8 +198,10 @@ ENTRY(blake2s_compress_arch)
13373         tst             r1, #3
13374         bne             .Lcopy_block_misaligned
13375         ldmia           r1!, {r2-r9}
13376 +       _le32_bswap_8x  r2, r3, r4, r5, r6, r7, r8, r9,  r14
13377         stmia           r12!, {r2-r9}
13378         ldmia           r1!, {r2-r9}
13379 +       _le32_bswap_8x  r2, r3, r4, r5, r6, r7, r8, r9,  r14
13380         stmia           r12, {r2-r9}
13381  .Lcopy_block_done:
13382         str             r1, [sp, #68]           // Update message pointer
13383 @@ -268,6 +288,7 @@ ENTRY(blake2s_compress_arch)
13384  1:
13385  #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13386         ldr             r3, [r1], #4
13387 +       _le32_bswap     r3, r4
13388  #else
13389         ldrb            r3, [r1, #0]
13390         ldrb            r4, [r1, #1]
13391 diff --git a/arch/arm/crypto/curve25519-core.S b/arch/arm/crypto/curve25519-core.S
13392 index be18af52e7dc..b697fa5d059a 100644
13393 --- a/arch/arm/crypto/curve25519-core.S
13394 +++ b/arch/arm/crypto/curve25519-core.S
13395 @@ -10,8 +10,8 @@
13396  #include <linux/linkage.h>
13398  .text
13399 -.fpu neon
13400  .arch armv7-a
13401 +.fpu neon
13402  .align 4
13404  ENTRY(curve25519_neon)
13405 diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c
13406 index 3023c1acfa19..c31bd8f7c092 100644
13407 --- a/arch/arm/crypto/poly1305-glue.c
13408 +++ b/arch/arm/crypto/poly1305-glue.c
13409 @@ -29,7 +29,7 @@ void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
13411  static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
13413 -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
13414 +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
13416         poly1305_init_arm(&dctx->h, key);
13417         dctx->s[0] = get_unaligned_le32(key + 16);
13418 diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
13419 index 08660ae9dcbc..b1423fb130ea 100644
13420 --- a/arch/arm/kernel/hw_breakpoint.c
13421 +++ b/arch/arm/kernel/hw_breakpoint.c
13422 @@ -886,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
13423                         info->trigger = addr;
13424                         pr_debug("breakpoint fired: address = 0x%x\n", addr);
13425                         perf_bp_event(bp, regs);
13426 -                       if (!bp->overflow_handler)
13427 +                       if (is_default_overflow_handler(bp))
13428                                 enable_single_step(bp, addr);
13429                         goto unlock;
13430                 }
13431 diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
13432 index dcc1191291a2..24a700535747 100644
13433 --- a/arch/arm/tools/syscall.tbl
13434 +++ b/arch/arm/tools/syscall.tbl
13435 @@ -456,3 +456,7 @@
13436  440    common  process_madvise                 sys_process_madvise
13437  441    common  epoll_pwait2                    sys_epoll_pwait2
13438  442    common  mount_setattr                   sys_mount_setattr
13439 +443    common  futex_wait                      sys_futex_wait
13440 +444    common  futex_wake                      sys_futex_wake
13441 +445    common  futex_waitv                     sys_futex_waitv
13442 +446    common  futex_requeue                   sys_futex_requeue
13443 diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
13444 index 6e4ad66ff536..8d5d368dbe90 100644
13445 --- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
13446 +++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
13447 @@ -65,6 +65,7 @@ port@3 {
13448         port@7 {
13449                 label = "sw";
13450                 reg = <7>;
13451 +               phy-mode = "rgmii";
13453                 fixed-link {
13454                         speed = <1000>;
13455 diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
13456 index 9354077f74cd..9e799328c6db 100644
13457 --- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
13458 +++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
13459 @@ -131,7 +131,7 @@ usb@d000 {
13460                         status = "disabled";
13461                 };
13463 -               ethernet-switch@80000 {
13464 +               bus@80000 {
13465                         compatible = "simple-bus";
13466                         #size-cells = <1>;
13467                         #address-cells = <1>;
13468 diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
13469 index 0d38327043f8..cd3c3edd48fa 100644
13470 --- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
13471 +++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
13472 @@ -28,6 +28,10 @@ &bq25895 {
13473         ti,termination-current = <144000>;  /* uA */
13474  };
13476 +&buck3_reg {
13477 +       regulator-always-on;
13480  &proximity {
13481         proximity-near-level = <25>;
13482  };
13483 diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
13484 index 7a2df148c6a3..456dcd4a7793 100644
13485 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
13486 +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
13487 @@ -156,7 +156,8 @@ uart1: serial@12200 {
13488                         };
13490                         nb_periph_clk: nb-periph-clk@13000 {
13491 -                               compatible = "marvell,armada-3700-periph-clock-nb";
13492 +                               compatible = "marvell,armada-3700-periph-clock-nb",
13493 +                                            "syscon";
13494                                 reg = <0x13000 0x100>;
13495                                 clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
13496                                 <&tbg 3>, <&xtalclk>;
13497 diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
13498 index 6dffada2e66b..28aa634c9780 100644
13499 --- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
13500 +++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
13501 @@ -294,7 +294,7 @@ &pwm0 {
13503  &pwrap {
13504         /* Only MT8173 E1 needs USB power domain */
13505 -       power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
13506 +       power-domains = <&spm MT8173_POWER_DOMAIN_USB>;
13508         pmic: mt6397 {
13509                 compatible = "mediatek,mt6397";
13510 diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
13511 index 7fa870e4386a..ecb37a7e6870 100644
13512 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
13513 +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
13514 @@ -1235,7 +1235,7 @@ dsi1: dsi@1401c000 {
13515                                  <&mmsys CLK_MM_DSI1_DIGITAL>,
13516                                  <&mipi_tx1>;
13517                         clock-names = "engine", "digital", "hs";
13518 -                       phy = <&mipi_tx1>;
13519 +                       phys = <&mipi_tx1>;
13520                         phy-names = "dphy";
13521                         status = "disabled";
13522                 };
13523 diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
13524 index 80519a145f13..16f4b1fc0fb9 100644
13525 --- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
13526 +++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
13527 @@ -983,6 +983,9 @@ mmsys: syscon@14000000 {
13528                         compatible = "mediatek,mt8183-mmsys", "syscon";
13529                         reg = <0 0x14000000 0 0x1000>;
13530                         #clock-cells = <1>;
13531 +                       mboxes = <&gce 0 CMDQ_THR_PRIO_HIGHEST>,
13532 +                                <&gce 1 CMDQ_THR_PRIO_HIGHEST>;
13533 +                       mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0 0x1000>;
13534                 };
13536                 ovl0: ovl@14008000 {
13537 @@ -1058,6 +1061,7 @@ ccorr0: ccorr@1400f000 {
13538                         interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_LOW>;
13539                         power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
13540                         clocks = <&mmsys CLK_MM_DISP_CCORR0>;
13541 +                       mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xf000 0x1000>;
13542                 };
13544                 aal0: aal@14010000 {
13545 @@ -1067,6 +1071,7 @@ aal0: aal@14010000 {
13546                         interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_LOW>;
13547                         power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
13548                         clocks = <&mmsys CLK_MM_DISP_AAL0>;
13549 +                       mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0 0x1000>;
13550                 };
13552                 gamma0: gamma@14011000 {
13553 @@ -1075,6 +1080,7 @@ gamma0: gamma@14011000 {
13554                         interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_LOW>;
13555                         power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
13556                         clocks = <&mmsys CLK_MM_DISP_GAMMA0>;
13557 +                       mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x1000 0x1000>;
13558                 };
13560                 dither0: dither@14012000 {
13561 @@ -1083,6 +1089,7 @@ dither0: dither@14012000 {
13562                         interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_LOW>;
13563                         power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
13564                         clocks = <&mmsys CLK_MM_DISP_DITHER0>;
13565 +                       mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x2000 0x1000>;
13566                 };
13568                 dsi0: dsi@14014000 {
13569 diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
13570 index 63fd70086bb8..9f27e7ed5e22 100644
13571 --- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
13572 +++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
13573 @@ -56,7 +56,7 @@ &i2c0 {
13574         tca6416: gpio@20 {
13575                 compatible = "ti,tca6416";
13576                 reg = <0x20>;
13577 -               reset-gpios = <&pio 65 GPIO_ACTIVE_HIGH>;
13578 +               reset-gpios = <&pio 65 GPIO_ACTIVE_LOW>;
13579                 pinctrl-names = "default";
13580                 pinctrl-0 = <&tca6416_pins>;
13582 diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
13583 index 07c8b2c926c0..b8f7cf5cbdab 100644
13584 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
13585 +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
13586 @@ -22,9 +22,11 @@ charger-thermal {
13587                         thermal-sensors = <&pm6150_adc_tm 1>;
13589                         trips {
13590 -                               temperature = <125000>;
13591 -                               hysteresis = <1000>;
13592 -                               type = "critical";
13593 +                               charger-crit {
13594 +                                       temperature = <125000>;
13595 +                                       hysteresis = <1000>;
13596 +                                       type = "critical";
13597 +                               };
13598                         };
13599                 };
13600         };
13601 @@ -768,17 +770,17 @@ &sdhc_2 {
13602  };
13604  &spi0 {
13605 -       pinctrl-0 = <&qup_spi0_cs_gpio>;
13606 +       pinctrl-0 = <&qup_spi0_cs_gpio_init_high>, <&qup_spi0_cs_gpio>;
13607         cs-gpios = <&tlmm 37 GPIO_ACTIVE_LOW>;
13608  };
13610  &spi6 {
13611 -       pinctrl-0 = <&qup_spi6_cs_gpio>;
13612 +       pinctrl-0 = <&qup_spi6_cs_gpio_init_high>, <&qup_spi6_cs_gpio>;
13613         cs-gpios = <&tlmm 62 GPIO_ACTIVE_LOW>;
13614  };
13616  ap_spi_fp: &spi10 {
13617 -       pinctrl-0 = <&qup_spi10_cs_gpio>;
13618 +       pinctrl-0 = <&qup_spi10_cs_gpio_init_high>, <&qup_spi10_cs_gpio>;
13619         cs-gpios = <&tlmm 89 GPIO_ACTIVE_LOW>;
13621         cros_ec_fp: ec@0 {
13622 @@ -1339,6 +1341,27 @@ pinconf {
13623                 };
13624         };
13626 +       qup_spi0_cs_gpio_init_high: qup-spi0-cs-gpio-init-high {
13627 +               pinconf {
13628 +                       pins = "gpio37";
13629 +                       output-high;
13630 +               };
13631 +       };
13633 +       qup_spi6_cs_gpio_init_high: qup-spi6-cs-gpio-init-high {
13634 +               pinconf {
13635 +                       pins = "gpio62";
13636 +                       output-high;
13637 +               };
13638 +       };
13640 +       qup_spi10_cs_gpio_init_high: qup-spi10-cs-gpio-init-high {
13641 +               pinconf {
13642 +                       pins = "gpio89";
13643 +                       output-high;
13644 +               };
13645 +       };
13647         qup_uart3_sleep: qup-uart3-sleep {
13648                 pinmux {
13649                         pins = "gpio38", "gpio39",
13650 diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
13651 index c4ac6f5dc008..96d36b38f269 100644
13652 --- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
13653 +++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
13654 @@ -1015,7 +1015,7 @@ swm: swm@c85 {
13655                 left_spkr: wsa8810-left{
13656                         compatible = "sdw10217201000";
13657                         reg = <0 1>;
13658 -                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
13659 +                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
13660                         #thermal-sensor-cells = <0>;
13661                         sound-name-prefix = "SpkrLeft";
13662                         #sound-dai-cells = <0>;
13663 @@ -1023,7 +1023,7 @@ left_spkr: wsa8810-left{
13665                 right_spkr: wsa8810-right{
13666                         compatible = "sdw10217201000";
13667 -                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
13668 +                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
13669                         reg = <0 2>;
13670                         #thermal-sensor-cells = <0>;
13671                         sound-name-prefix = "SpkrRight";
13672 diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
13673 index 454f794af547..6a2ed02d383d 100644
13674 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
13675 +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
13676 @@ -2382,7 +2382,7 @@ tlmm: pinctrl@3400000 {
13677                         #gpio-cells = <2>;
13678                         interrupt-controller;
13679                         #interrupt-cells = <2>;
13680 -                       gpio-ranges = <&tlmm 0 0 150>;
13681 +                       gpio-ranges = <&tlmm 0 0 151>;
13682                         wakeup-parent = <&pdc_intc>;
13684                         cci0_default: cci0-default {
13685 diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
13686 index e5bb17bc2f46..778613d3410b 100644
13687 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
13688 +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
13689 @@ -914,7 +914,7 @@ tlmm: pinctrl@3100000 {
13690                               <0x0 0x03D00000 0x0 0x300000>;
13691                         reg-names = "west", "east", "north", "south";
13692                         interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
13693 -                       gpio-ranges = <&tlmm 0 0 175>;
13694 +                       gpio-ranges = <&tlmm 0 0 176>;
13695                         gpio-controller;
13696                         #gpio-cells = <2>;
13697                         interrupt-controller;
13698 diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
13699 index 947e1accae3a..46a6c18cea91 100644
13700 --- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
13701 +++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
13702 @@ -279,7 +279,7 @@ mmcx_reg: mmcx-reg {
13704         pmu {
13705                 compatible = "arm,armv8-pmuv3";
13706 -               interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
13707 +               interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
13708         };
13710         psci {
13711 @@ -2327,10 +2327,9 @@ mdss: mdss@ae00000 {
13712                         reg = <0 0x0ae00000 0 0x1000>;
13713                         reg-names = "mdss";
13715 -                       interconnects = <&gem_noc MASTER_AMPSS_M0 &config_noc SLAVE_DISPLAY_CFG>,
13716 -                                       <&mmss_noc MASTER_MDP_PORT0 &mc_virt SLAVE_EBI_CH0>,
13717 +                       interconnects = <&mmss_noc MASTER_MDP_PORT0 &mc_virt SLAVE_EBI_CH0>,
13718                                         <&mmss_noc MASTER_MDP_PORT1 &mc_virt SLAVE_EBI_CH0>;
13719 -                       interconnect-names = "notused", "mdp0-mem", "mdp1-mem";
13720 +                       interconnect-names = "mdp0-mem", "mdp1-mem";
13722                         power-domains = <&dispcc MDSS_GDSC>;
13724 @@ -2580,7 +2579,7 @@ opp-358000000 {
13726                 dispcc: clock-controller@af00000 {
13727                         compatible = "qcom,sm8250-dispcc";
13728 -                       reg = <0 0x0af00000 0 0x20000>;
13729 +                       reg = <0 0x0af00000 0 0x10000>;
13730                         mmcx-supply = <&mmcx_reg>;
13731                         clocks = <&rpmhcc RPMH_CXO_CLK>,
13732                                  <&dsi0_phy 0>,
13733 @@ -2588,28 +2587,14 @@ dispcc: clock-controller@af00000 {
13734                                  <&dsi1_phy 0>,
13735                                  <&dsi1_phy 1>,
13736                                  <0>,
13737 -                                <0>,
13738 -                                <0>,
13739 -                                <0>,
13740 -                                <0>,
13741 -                                <0>,
13742 -                                <0>,
13743 -                                <0>,
13744 -                                <&sleep_clk>;
13745 +                                <0>;
13746                         clock-names = "bi_tcxo",
13747                                       "dsi0_phy_pll_out_byteclk",
13748                                       "dsi0_phy_pll_out_dsiclk",
13749                                       "dsi1_phy_pll_out_byteclk",
13750                                       "dsi1_phy_pll_out_dsiclk",
13751 -                                     "dp_link_clk_divsel_ten",
13752 -                                     "dp_vco_divided_clk_src_mux",
13753 -                                     "dptx1_phy_pll_link_clk",
13754 -                                     "dptx1_phy_pll_vco_div_clk",
13755 -                                     "dptx2_phy_pll_link_clk",
13756 -                                     "dptx2_phy_pll_vco_div_clk",
13757 -                                     "edp_phy_pll_link_clk",
13758 -                                     "edp_phy_pll_vco_div_clk",
13759 -                                     "sleep_clk";
13760 +                                     "dp_phy_pll_link_clk",
13761 +                                     "dp_phy_pll_vco_div_clk";
13762                         #clock-cells = <1>;
13763                         #reset-cells = <1>;
13764                         #power-domain-cells = <1>;
13765 @@ -2689,7 +2674,7 @@ tlmm: pinctrl@f100000 {
13766                         #gpio-cells = <2>;
13767                         interrupt-controller;
13768                         #interrupt-cells = <2>;
13769 -                       gpio-ranges = <&tlmm 0 0 180>;
13770 +                       gpio-ranges = <&tlmm 0 0 181>;
13771                         wakeup-parent = <&pdc>;
13773                         pri_mi2s_active: pri-mi2s-active {
13774 @@ -3754,7 +3739,7 @@ timer {
13775                                 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
13776                              <GIC_PPI 11
13777                                 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
13778 -                            <GIC_PPI 12
13779 +                            <GIC_PPI 10
13780                                 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
13781         };
13783 diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
13784 index 5ef460458f5c..e2fca420e518 100644
13785 --- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
13786 +++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
13787 @@ -153,7 +153,7 @@ memory@80000000 {
13789         pmu {
13790                 compatible = "arm,armv8-pmuv3";
13791 -               interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
13792 +               interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
13793         };
13795         psci {
13796 @@ -382,7 +382,7 @@ tlmm: pinctrl@f100000 {
13797                         #gpio-cells = <2>;
13798                         interrupt-controller;
13799                         #interrupt-cells = <2>;
13800 -                       gpio-ranges = <&tlmm 0 0 203>;
13801 +                       gpio-ranges = <&tlmm 0 0 204>;
13803                         qup_uart3_default_state: qup-uart3-default-state {
13804                                 rx {
13805 diff --git a/arch/arm64/boot/dts/renesas/hihope-common.dtsi b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
13806 index 7a3da9b06f67..0c7e6f790590 100644
13807 --- a/arch/arm64/boot/dts/renesas/hihope-common.dtsi
13808 +++ b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
13809 @@ -12,6 +12,9 @@ / {
13810         aliases {
13811                 serial0 = &scif2;
13812                 serial1 = &hscif0;
13813 +               mmc0 = &sdhi3;
13814 +               mmc1 = &sdhi0;
13815 +               mmc2 = &sdhi2;
13816         };
13818         chosen {
13819 diff --git a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
13820 index 501cb05da228..3cf2e076940f 100644
13821 --- a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
13822 +++ b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
13823 @@ -21,6 +21,9 @@ aliases {
13824                 serial4 = &hscif2;
13825                 serial5 = &scif5;
13826                 ethernet0 = &avb;
13827 +               mmc0 = &sdhi3;
13828 +               mmc1 = &sdhi0;
13829 +               mmc2 = &sdhi2;
13830         };
13832         chosen {
13833 diff --git a/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts b/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
13834 index 71763f4402a7..3c0d59def8ee 100644
13835 --- a/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
13836 +++ b/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
13837 @@ -22,6 +22,9 @@ aliases {
13838                 serial5 = &scif5;
13839                 serial6 = &scif4;
13840                 ethernet0 = &avb;
13841 +               mmc0 = &sdhi3;
13842 +               mmc1 = &sdhi0;
13843 +               mmc2 = &sdhi2;
13844         };
13846         chosen {
13847 diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
13848 index ea87cb5a459c..33257c6440b2 100644
13849 --- a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
13850 +++ b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
13851 @@ -17,6 +17,8 @@ / {
13852         aliases {
13853                 serial0 = &scif2;
13854                 serial1 = &hscif2;
13855 +               mmc0 = &sdhi0;
13856 +               mmc1 = &sdhi3;
13857         };
13859         chosen {
13860 diff --git a/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts b/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
13861 index 273f062f2909..7b6649a3ded0 100644
13862 --- a/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
13863 +++ b/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
13864 @@ -22,6 +22,9 @@ aliases {
13865                 serial5 = &scif5;
13866                 serial6 = &scif4;
13867                 ethernet0 = &avb;
13868 +               mmc0 = &sdhi3;
13869 +               mmc1 = &sdhi0;
13870 +               mmc2 = &sdhi2;
13871         };
13873         chosen {
13874 diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
13875 index ec7ca72399ec..1ffa4a995a7a 100644
13876 --- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
13877 +++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
13878 @@ -992,8 +992,8 @@ port@1 {
13880                                         reg = <1>;
13882 -                                       vin4csi41: endpoint@2 {
13883 -                                               reg = <2>;
13884 +                                       vin4csi41: endpoint@3 {
13885 +                                               reg = <3>;
13886                                                 remote-endpoint = <&csi41vin4>;
13887                                         };
13888                                 };
13889 @@ -1020,8 +1020,8 @@ port@1 {
13891                                         reg = <1>;
13893 -                                       vin5csi41: endpoint@2 {
13894 -                                               reg = <2>;
13895 +                                       vin5csi41: endpoint@3 {
13896 +                                               reg = <3>;
13897                                                 remote-endpoint = <&csi41vin5>;
13898                                         };
13899                                 };
13900 @@ -1048,8 +1048,8 @@ port@1 {
13902                                         reg = <1>;
13904 -                                       vin6csi41: endpoint@2 {
13905 -                                               reg = <2>;
13906 +                                       vin6csi41: endpoint@3 {
13907 +                                               reg = <3>;
13908                                                 remote-endpoint = <&csi41vin6>;
13909                                         };
13910                                 };
13911 @@ -1076,8 +1076,8 @@ port@1 {
13913                                         reg = <1>;
13915 -                                       vin7csi41: endpoint@2 {
13916 -                                               reg = <2>;
13917 +                                       vin7csi41: endpoint@3 {
13918 +                                               reg = <3>;
13919                                                 remote-endpoint = <&csi41vin7>;
13920                                         };
13921                                 };
13922 diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
13923 index f74f8b9993f1..6d6cdc4c324b 100644
13924 --- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
13925 +++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
13926 @@ -16,6 +16,9 @@ / {
13927         aliases {
13928                 serial0 = &scif2;
13929                 ethernet0 = &avb;
13930 +               mmc0 = &sdhi3;
13931 +               mmc1 = &sdhi0;
13932 +               mmc2 = &sdhi1;
13933         };
13935         chosen {
13936 diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
13937 index fa284a7260d6..e202e8aa6941 100644
13938 --- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
13939 +++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
13940 @@ -12,6 +12,14 @@ / {
13941         model = "Renesas Falcon CPU board";
13942         compatible = "renesas,falcon-cpu", "renesas,r8a779a0";
13944 +       aliases {
13945 +               serial0 = &scif0;
13946 +       };
13948 +       chosen {
13949 +               stdout-path = "serial0:115200n8";
13950 +       };
13952         memory@48000000 {
13953                 device_type = "memory";
13954                 /* first 128MB is reserved for secure area. */
13955 diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
13956 index 5617b81dd7dc..273857ae38f3 100644
13957 --- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
13958 +++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
13959 @@ -14,11 +14,6 @@ / {
13961         aliases {
13962                 ethernet0 = &avb0;
13963 -               serial0 = &scif0;
13964 -       };
13966 -       chosen {
13967 -               stdout-path = "serial0:115200n8";
13968         };
13969  };
13971 diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
13972 index dfd6ae8b564f..86ac48e2c849 100644
13973 --- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
13974 +++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
13975 @@ -60,10 +60,7 @@ extalr_clk: extalr {
13977         pmu_a76 {
13978                 compatible = "arm,cortex-a76-pmu";
13979 -               interrupts-extended = <&gic GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
13980 -                                     <&gic GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
13981 -                                     <&gic GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
13982 -                                     <&gic GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
13983 +               interrupts-extended = <&gic GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
13984         };
13986         /* External SCIF clock - to be overridden by boards that provide it */
13987 diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
13988 index c22bb38994e8..15bb1eeb6601 100644
13989 --- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
13990 +++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
13991 @@ -36,6 +36,9 @@ aliases {
13992                 serial0 = &scif2;
13993                 serial1 = &hscif1;
13994                 ethernet0 = &avb;
13995 +               mmc0 = &sdhi2;
13996 +               mmc1 = &sdhi0;
13997 +               mmc2 = &sdhi3;
13998         };
14000         chosen {
14001 diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
14002 index e9ed2597f1c2..61bd4df09df0 100644
14003 --- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
14004 +++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
14005 @@ -16,6 +16,7 @@ / {
14006         aliases {
14007                 serial1 = &hscif0;
14008                 serial2 = &scif1;
14009 +               mmc2 = &sdhi3;
14010         };
14012         clksndsel: clksndsel {
14013 diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
14014 index a04eae55dd6c..3d88e95c65a5 100644
14015 --- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
14016 +++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
14017 @@ -23,6 +23,8 @@ / {
14018         aliases {
14019                 serial0 = &scif2;
14020                 ethernet0 = &avb;
14021 +               mmc0 = &sdhi2;
14022 +               mmc1 = &sdhi0;
14023         };
14025         chosen {
14026 diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
14027 index a87b8a678719..8f2c1c1e2c64 100644
14028 --- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
14029 +++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
14030 @@ -734,7 +734,7 @@ eth: ethernet@65000000 {
14031                         clocks = <&sys_clk 6>;
14032                         reset-names = "ether";
14033                         resets = <&sys_rst 6>;
14034 -                       phy-mode = "rgmii";
14035 +                       phy-mode = "rgmii-id";
14036                         local-mac-address = [00 00 00 00 00 00];
14037                         socionext,syscon-phy-mode = <&soc_glue 0>;
14039 diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
14040 index 0e52dadf54b3..be97da132258 100644
14041 --- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
14042 +++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
14043 @@ -564,7 +564,7 @@ eth0: ethernet@65000000 {
14044                         clocks = <&sys_clk 6>;
14045                         reset-names = "ether";
14046                         resets = <&sys_rst 6>;
14047 -                       phy-mode = "rgmii";
14048 +                       phy-mode = "rgmii-id";
14049                         local-mac-address = [00 00 00 00 00 00];
14050                         socionext,syscon-phy-mode = <&soc_glue 0>;
14052 @@ -585,7 +585,7 @@ eth1: ethernet@65200000 {
14053                         clocks = <&sys_clk 7>;
14054                         reset-names = "ether";
14055                         resets = <&sys_rst 7>;
14056 -                       phy-mode = "rgmii";
14057 +                       phy-mode = "rgmii-id";
14058                         local-mac-address = [00 00 00 00 00 00];
14059                         socionext,syscon-phy-mode = <&soc_glue 1>;
14061 diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
14062 index 8c84dafb7125..f1e7da3dfa27 100644
14063 --- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
14064 +++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
14065 @@ -1042,13 +1042,16 @@ main_sdhci0: mmc@4f80000 {
14066                 assigned-clocks = <&k3_clks 91 1>;
14067                 assigned-clock-parents = <&k3_clks 91 2>;
14068                 bus-width = <8>;
14069 -               mmc-hs400-1_8v;
14070 +               mmc-hs200-1_8v;
14071                 mmc-ddr-1_8v;
14072                 ti,otap-del-sel-legacy = <0xf>;
14073                 ti,otap-del-sel-mmc-hs = <0xf>;
14074                 ti,otap-del-sel-ddr52 = <0x5>;
14075                 ti,otap-del-sel-hs200 = <0x6>;
14076                 ti,otap-del-sel-hs400 = <0x0>;
14077 +               ti,itap-del-sel-legacy = <0x10>;
14078 +               ti,itap-del-sel-mmc-hs = <0xa>;
14079 +               ti,itap-del-sel-ddr52 = <0x3>;
14080                 ti,trm-icp = <0x8>;
14081                 ti,strobe-sel = <0x77>;
14082                 dma-coherent;
14083 @@ -1069,9 +1072,15 @@ main_sdhci1: mmc@4fb0000 {
14084                 ti,otap-del-sel-sdr25 = <0xf>;
14085                 ti,otap-del-sel-sdr50 = <0xc>;
14086                 ti,otap-del-sel-ddr50 = <0xc>;
14087 +               ti,itap-del-sel-legacy = <0x0>;
14088 +               ti,itap-del-sel-sd-hs = <0x0>;
14089 +               ti,itap-del-sel-sdr12 = <0x0>;
14090 +               ti,itap-del-sel-sdr25 = <0x0>;
14091 +               ti,itap-del-sel-ddr50 = <0x2>;
14092                 ti,trm-icp = <0x8>;
14093                 ti,clkbuf-sel = <0x7>;
14094                 dma-coherent;
14095 +               sdhci-caps-mask = <0x2 0x0>;
14096         };
14098         main_sdhci2: mmc@4f98000 {
14099 @@ -1089,9 +1098,15 @@ main_sdhci2: mmc@4f98000 {
14100                 ti,otap-del-sel-sdr25 = <0xf>;
14101                 ti,otap-del-sel-sdr50 = <0xc>;
14102                 ti,otap-del-sel-ddr50 = <0xc>;
14103 +               ti,itap-del-sel-legacy = <0x0>;
14104 +               ti,itap-del-sel-sd-hs = <0x0>;
14105 +               ti,itap-del-sel-sdr12 = <0x0>;
14106 +               ti,itap-del-sel-sdr25 = <0x0>;
14107 +               ti,itap-del-sel-ddr50 = <0x2>;
14108                 ti,trm-icp = <0x8>;
14109                 ti,clkbuf-sel = <0x7>;
14110                 dma-coherent;
14111 +               sdhci-caps-mask = <0x2 0x0>;
14112         };
14114         usbss0: cdns-usb@4104000 {
14115 diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
14116 index bbdb54702aa7..247011356d11 100644
14117 --- a/arch/arm64/crypto/aes-modes.S
14118 +++ b/arch/arm64/crypto/aes-modes.S
14119 @@ -359,6 +359,7 @@ ST5(        mov             v4.16b, vctr.16b                )
14120         ins             vctr.d[0], x8
14122         /* apply carry to N counter blocks for N := x12 */
14123 +       cbz             x12, 2f
14124         adr             x16, 1f
14125         sub             x16, x16, x12, lsl #3
14126         br              x16
14127 diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c
14128 index 683de671741a..9c3d86e397bf 100644
14129 --- a/arch/arm64/crypto/poly1305-glue.c
14130 +++ b/arch/arm64/crypto/poly1305-glue.c
14131 @@ -25,7 +25,7 @@ asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce);
14133  static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
14135 -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
14136 +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
14138         poly1305_init_arm64(&dctx->h, key);
14139         dctx->s[0] = get_unaligned_le32(key + 16);
14140 diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
14141 index 1c26d7baa67f..cfdde3a56805 100644
14142 --- a/arch/arm64/include/asm/daifflags.h
14143 +++ b/arch/arm64/include/asm/daifflags.h
14144 @@ -131,6 +131,9 @@ static inline void local_daif_inherit(struct pt_regs *regs)
14145         if (interrupts_enabled(regs))
14146                 trace_hardirqs_on();
14148 +       if (system_uses_irq_prio_masking())
14149 +               gic_write_pmr(regs->pmr_save);
14151         /*
14152          * We can't use local_daif_restore(regs->pstate) here as
14153          * system_has_prio_mask_debugging() won't restore the I bit if it can
14154 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
14155 index 3d10e6527f7d..858c2fcfc043 100644
14156 --- a/arch/arm64/include/asm/kvm_host.h
14157 +++ b/arch/arm64/include/asm/kvm_host.h
14158 @@ -713,6 +713,7 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
14159  static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
14161  void kvm_arm_init_debug(void);
14162 +void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
14163  void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
14164  void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
14165  void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
14166 diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
14167 index 949788f5ba40..727bfc3be99b 100644
14168 --- a/arch/arm64/include/asm/unistd.h
14169 +++ b/arch/arm64/include/asm/unistd.h
14170 @@ -38,7 +38,7 @@
14171  #define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE + 5)
14172  #define __ARM_NR_COMPAT_END            (__ARM_NR_COMPAT_BASE + 0x800)
14174 -#define __NR_compat_syscalls           443
14175 +#define __NR_compat_syscalls           447
14176  #endif
14178  #define __ARCH_WANT_SYS_CLONE
14179 diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
14180 index 3d874f624056..e5015a2b9c94 100644
14181 --- a/arch/arm64/include/asm/unistd32.h
14182 +++ b/arch/arm64/include/asm/unistd32.h
14183 @@ -893,6 +893,14 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
14184  __SYSCALL(__NR_epoll_pwait2, compat_sys_epoll_pwait2)
14185  #define __NR_mount_setattr 442
14186  __SYSCALL(__NR_mount_setattr, sys_mount_setattr)
14187 +#define __NR_futex_wait 443
14188 +__SYSCALL(__NR_futex_wait, sys_futex_wait)
14189 +#define __NR_futex_wake 444
14190 +__SYSCALL(__NR_futex_wake, sys_futex_wake)
14191 +#define __NR_futex_waitv 445
14192 +__SYSCALL(__NR_futex_waitv, compat_sys_futex_waitv)
14193 +#define __NR_futex_waitv 446
14194 +__SYSCALL(__NR_futex_requeue, compat_sys_futex_requeue)
14196  /*
14197   * Please add new compat syscalls above this comment and update
14198 diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
14199 index 9d3588450473..117412bae915 100644
14200 --- a/arch/arm64/kernel/entry-common.c
14201 +++ b/arch/arm64/kernel/entry-common.c
14202 @@ -226,14 +226,6 @@ static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
14204         unsigned long far = read_sysreg(far_el1);
14206 -       /*
14207 -        * The CPU masked interrupts, and we are leaving them masked during
14208 -        * do_debug_exception(). Update PMR as if we had called
14209 -        * local_daif_mask().
14210 -        */
14211 -       if (system_uses_irq_prio_masking())
14212 -               gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
14214         arm64_enter_el1_dbg(regs);
14215         if (!cortex_a76_erratum_1463225_debug_handler(regs))
14216                 do_debug_exception(far, esr, regs);
14217 @@ -398,9 +390,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
14218         /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
14219         unsigned long far = read_sysreg(far_el1);
14221 -       if (system_uses_irq_prio_masking())
14222 -               gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
14224         enter_from_user_mode();
14225         do_debug_exception(far, esr, regs);
14226         local_daif_restore(DAIF_PROCCTX_NOIRQ);
14227 @@ -408,9 +397,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
14229  static void noinstr el0_svc(struct pt_regs *regs)
14231 -       if (system_uses_irq_prio_masking())
14232 -               gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
14234         enter_from_user_mode();
14235         cortex_a76_erratum_1463225_svc_handler();
14236         do_el0_svc(regs);
14237 @@ -486,9 +472,6 @@ static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
14239  static void noinstr el0_svc_compat(struct pt_regs *regs)
14241 -       if (system_uses_irq_prio_masking())
14242 -               gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
14244         enter_from_user_mode();
14245         cortex_a76_erratum_1463225_svc_handler();
14246         do_el0_svc_compat(regs);
14247 diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
14248 index 6acfc5e6b5e0..e03fba3ae2a0 100644
14249 --- a/arch/arm64/kernel/entry.S
14250 +++ b/arch/arm64/kernel/entry.S
14251 @@ -263,16 +263,16 @@ alternative_else_nop_endif
14252         stp     lr, x21, [sp, #S_LR]
14254         /*
14255 -        * For exceptions from EL0, terminate the callchain here.
14256 +        * For exceptions from EL0, create a terminal frame record.
14257          * For exceptions from EL1, create a synthetic frame record so the
14258          * interrupted code shows up in the backtrace.
14259          */
14260         .if \el == 0
14261 -       mov     x29, xzr
14262 +       stp     xzr, xzr, [sp, #S_STACKFRAME]
14263         .else
14264         stp     x29, x22, [sp, #S_STACKFRAME]
14265 -       add     x29, sp, #S_STACKFRAME
14266         .endif
14267 +       add     x29, sp, #S_STACKFRAME
14269  #ifdef CONFIG_ARM64_SW_TTBR0_PAN
14270  alternative_if_not ARM64_HAS_PAN
14271 @@ -292,6 +292,8 @@ alternative_else_nop_endif
14272  alternative_if ARM64_HAS_IRQ_PRIO_MASKING
14273         mrs_s   x20, SYS_ICC_PMR_EL1
14274         str     x20, [sp, #S_PMR_SAVE]
14275 +       mov     x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
14276 +       msr_s   SYS_ICC_PMR_EL1, x20
14277  alternative_else_nop_endif
14279         /* Re-enable tag checking (TCO set on exception entry) */
14280 @@ -493,8 +495,8 @@ tsk .req    x28             // current thread_info
14281  /*
14282   * Interrupt handling.
14283   */
14284 -       .macro  irq_handler
14285 -       ldr_l   x1, handle_arch_irq
14286 +       .macro  irq_handler, handler:req
14287 +       ldr_l   x1, \handler
14288         mov     x0, sp
14289         irq_stack_entry
14290         blr     x1
14291 @@ -524,13 +526,41 @@ alternative_endif
14292  #endif
14293         .endm
14295 -       .macro  gic_prio_irq_setup, pmr:req, tmp:req
14296 -#ifdef CONFIG_ARM64_PSEUDO_NMI
14297 -       alternative_if ARM64_HAS_IRQ_PRIO_MASKING
14298 -       orr     \tmp, \pmr, #GIC_PRIO_PSR_I_SET
14299 -       msr_s   SYS_ICC_PMR_EL1, \tmp
14300 -       alternative_else_nop_endif
14301 +       .macro el1_interrupt_handler, handler:req
14302 +       enable_da_f
14304 +       mov     x0, sp
14305 +       bl      enter_el1_irq_or_nmi
14307 +       irq_handler     \handler
14309 +#ifdef CONFIG_PREEMPTION
14310 +       ldr     x24, [tsk, #TSK_TI_PREEMPT]     // get preempt count
14311 +alternative_if ARM64_HAS_IRQ_PRIO_MASKING
14312 +       /*
14313 +        * DA_F were cleared at start of handling. If anything is set in DAIF,
14314 +        * we come back from an NMI, so skip preemption
14315 +        */
14316 +       mrs     x0, daif
14317 +       orr     x24, x24, x0
14318 +alternative_else_nop_endif
14319 +       cbnz    x24, 1f                         // preempt count != 0 || NMI return path
14320 +       bl      arm64_preempt_schedule_irq      // irq en/disable is done inside
14322  #endif
14324 +       mov     x0, sp
14325 +       bl      exit_el1_irq_or_nmi
14326 +       .endm
14328 +       .macro el0_interrupt_handler, handler:req
14329 +       user_exit_irqoff
14330 +       enable_da_f
14332 +       tbz     x22, #55, 1f
14333 +       bl      do_el0_irq_bp_hardening
14335 +       irq_handler     \handler
14336         .endm
14338         .text
14339 @@ -662,32 +692,7 @@ SYM_CODE_END(el1_sync)
14340         .align  6
14341  SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
14342         kernel_entry 1
14343 -       gic_prio_irq_setup pmr=x20, tmp=x1
14344 -       enable_da_f
14346 -       mov     x0, sp
14347 -       bl      enter_el1_irq_or_nmi
14349 -       irq_handler
14351 -#ifdef CONFIG_PREEMPTION
14352 -       ldr     x24, [tsk, #TSK_TI_PREEMPT]     // get preempt count
14353 -alternative_if ARM64_HAS_IRQ_PRIO_MASKING
14354 -       /*
14355 -        * DA_F were cleared at start of handling. If anything is set in DAIF,
14356 -        * we come back from an NMI, so skip preemption
14357 -        */
14358 -       mrs     x0, daif
14359 -       orr     x24, x24, x0
14360 -alternative_else_nop_endif
14361 -       cbnz    x24, 1f                         // preempt count != 0 || NMI return path
14362 -       bl      arm64_preempt_schedule_irq      // irq en/disable is done inside
14364 -#endif
14366 -       mov     x0, sp
14367 -       bl      exit_el1_irq_or_nmi
14369 +       el1_interrupt_handler handle_arch_irq
14370         kernel_exit 1
14371  SYM_CODE_END(el1_irq)
14373 @@ -727,22 +732,13 @@ SYM_CODE_END(el0_error_compat)
14374  SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
14375         kernel_entry 0
14376  el0_irq_naked:
14377 -       gic_prio_irq_setup pmr=x20, tmp=x0
14378 -       user_exit_irqoff
14379 -       enable_da_f
14381 -       tbz     x22, #55, 1f
14382 -       bl      do_el0_irq_bp_hardening
14384 -       irq_handler
14386 +       el0_interrupt_handler handle_arch_irq
14387         b       ret_to_user
14388  SYM_CODE_END(el0_irq)
14390  SYM_CODE_START_LOCAL(el1_error)
14391         kernel_entry 1
14392         mrs     x1, esr_el1
14393 -       gic_prio_kentry_setup tmp=x2
14394         enable_dbg
14395         mov     x0, sp
14396         bl      do_serror
14397 @@ -753,7 +749,6 @@ SYM_CODE_START_LOCAL(el0_error)
14398         kernel_entry 0
14399  el0_error_naked:
14400         mrs     x25, esr_el1
14401 -       gic_prio_kentry_setup tmp=x2
14402         user_exit_irqoff
14403         enable_dbg
14404         mov     x0, sp
14405 diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
14406 index d55bdfb7789c..7032a5f9e624 100644
14407 --- a/arch/arm64/kernel/stacktrace.c
14408 +++ b/arch/arm64/kernel/stacktrace.c
14409 @@ -44,10 +44,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
14410         unsigned long fp = frame->fp;
14411         struct stack_info info;
14413 -       /* Terminal record; nothing to unwind */
14414 -       if (!fp)
14415 -               return -ENOENT;
14417         if (fp & 0xf)
14418                 return -EINVAL;
14420 @@ -108,6 +104,12 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
14422         frame->pc = ptrauth_strip_insn_pac(frame->pc);
14424 +       /*
14425 +        * This is a terminal record, so we have finished unwinding.
14426 +        */
14427 +       if (!frame->fp && !frame->pc)
14428 +               return -ENOENT;
14430         return 0;
14432  NOKPROBE_SYMBOL(unwind_frame);
14433 diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
14434 index 61dbb4c838ef..a5e61e09ea92 100644
14435 --- a/arch/arm64/kernel/vdso/vdso.lds.S
14436 +++ b/arch/arm64/kernel/vdso/vdso.lds.S
14437 @@ -31,6 +31,13 @@ SECTIONS
14438         .gnu.version_d  : { *(.gnu.version_d) }
14439         .gnu.version_r  : { *(.gnu.version_r) }
14441 +       /*
14442 +        * Discard .note.gnu.property sections which are unused and have
14443 +        * different alignment requirement from vDSO note sections.
14444 +        */
14445 +       /DISCARD/       : {
14446 +               *(.note.GNU-stack .note.gnu.property)
14447 +       }
14448         .note           : { *(.note.*) }                :text   :note
14450         . = ALIGN(16);
14451 @@ -48,7 +55,6 @@ SECTIONS
14452         PROVIDE(end = .);
14454         /DISCARD/       : {
14455 -               *(.note.GNU-stack)
14456                 *(.data .data.* .gnu.linkonce.d.* .sdata*)
14457                 *(.bss .sbss .dynbss .dynsbss)
14458                 *(.eh_frame .eh_frame_hdr)
14459 diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
14460 index 7f06ba76698d..84b5f79c9eab 100644
14461 --- a/arch/arm64/kvm/arm.c
14462 +++ b/arch/arm64/kvm/arm.c
14463 @@ -580,6 +580,8 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
14465         vcpu->arch.has_run_once = true;
14467 +       kvm_arm_vcpu_init_debug(vcpu);
14469         if (likely(irqchip_in_kernel(kvm))) {
14470                 /*
14471                  * Map the VGIC hardware resources before running a vcpu the
14472 @@ -1808,8 +1810,10 @@ static int init_hyp_mode(void)
14473         if (is_protected_kvm_enabled()) {
14474                 init_cpu_logical_map();
14476 -               if (!init_psci_relay())
14477 +               if (!init_psci_relay()) {
14478 +                       err = -ENODEV;
14479                         goto out_err;
14480 +               }
14481         }
14483         return 0;
14484 diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
14485 index dbc890511631..2484b2cca74b 100644
14486 --- a/arch/arm64/kvm/debug.c
14487 +++ b/arch/arm64/kvm/debug.c
14488 @@ -68,6 +68,64 @@ void kvm_arm_init_debug(void)
14489         __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
14492 +/**
14493 + * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
14494 + *
14495 + * @vcpu:      the vcpu pointer
14496 + *
14497 + * This ensures we will trap access to:
14498 + *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
14499 + *  - Debug ROM Address (MDCR_EL2_TDRA)
14500 + *  - OS related registers (MDCR_EL2_TDOSA)
14501 + *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
14502 + *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
14503 + */
14504 +static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
14506 +       /*
14507 +        * This also clears MDCR_EL2_E2PB_MASK to disable guest access
14508 +        * to the profiling buffer.
14509 +        */
14510 +       vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
14511 +       vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
14512 +                               MDCR_EL2_TPMS |
14513 +                               MDCR_EL2_TTRF |
14514 +                               MDCR_EL2_TPMCR |
14515 +                               MDCR_EL2_TDRA |
14516 +                               MDCR_EL2_TDOSA);
14518 +       /* Is the VM being debugged by userspace? */
14519 +       if (vcpu->guest_debug)
14520 +               /* Route all software debug exceptions to EL2 */
14521 +               vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
14523 +       /*
14524 +        * Trap debug register access when one of the following is true:
14525 +        *  - Userspace is using the hardware to debug the guest
14526 +        *  (KVM_GUESTDBG_USE_HW is set).
14527 +        *  - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear).
14528 +        */
14529 +       if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
14530 +           !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
14531 +               vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
14533 +       trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
14536 +/**
14537 + * kvm_arm_vcpu_init_debug - setup vcpu debug traps
14538 + *
14539 + * @vcpu:      the vcpu pointer
14540 + *
14541 + * Set vcpu initial mdcr_el2 value.
14542 + */
14543 +void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
14545 +       preempt_disable();
14546 +       kvm_arm_setup_mdcr_el2(vcpu);
14547 +       preempt_enable();
14550  /**
14551   * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
14552   */
14553 @@ -83,13 +141,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
14554   * @vcpu:      the vcpu pointer
14555   *
14556   * This is called before each entry into the hypervisor to setup any
14557 - * debug related registers. Currently this just ensures we will trap
14558 - * access to:
14559 - *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
14560 - *  - Debug ROM Address (MDCR_EL2_TDRA)
14561 - *  - OS related registers (MDCR_EL2_TDOSA)
14562 - *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
14563 - *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
14564 + * debug related registers.
14565   *
14566   * Additionally, KVM only traps guest accesses to the debug registers if
14567   * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
14568 @@ -101,28 +153,14 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
14570  void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
14572 -       bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY);
14573         unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
14575         trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
14577 -       /*
14578 -        * This also clears MDCR_EL2_E2PB_MASK to disable guest access
14579 -        * to the profiling buffer.
14580 -        */
14581 -       vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
14582 -       vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
14583 -                               MDCR_EL2_TPMS |
14584 -                               MDCR_EL2_TTRF |
14585 -                               MDCR_EL2_TPMCR |
14586 -                               MDCR_EL2_TDRA |
14587 -                               MDCR_EL2_TDOSA);
14588 +       kvm_arm_setup_mdcr_el2(vcpu);
14590         /* Is Guest debugging in effect? */
14591         if (vcpu->guest_debug) {
14592 -               /* Route all software debug exceptions to EL2 */
14593 -               vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
14595                 /* Save guest debug state */
14596                 save_guest_debug_regs(vcpu);
14598 @@ -176,7 +214,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
14600                         vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
14601                         vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
14602 -                       trap_debug = true;
14604                         trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
14605                                                 &vcpu->arch.debug_ptr->dbg_bcr[0],
14606 @@ -191,10 +228,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
14607         BUG_ON(!vcpu->guest_debug &&
14608                 vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
14610 -       /* Trap debug register access */
14611 -       if (trap_debug)
14612 -               vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
14614         /* If KDE or MDE are set, perform a full save/restore cycle. */
14615         if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
14616                 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
14617 @@ -203,7 +236,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
14618         if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
14619                 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
14621 -       trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
14622         trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
14625 diff --git a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
14626 index ead02c6a7628..6bc88a756cb7 100644
14627 --- a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
14628 +++ b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
14629 @@ -50,6 +50,18 @@
14630  #ifndef R_AARCH64_ABS64
14631  #define R_AARCH64_ABS64                        257
14632  #endif
14633 +#ifndef R_AARCH64_PREL64
14634 +#define R_AARCH64_PREL64               260
14635 +#endif
14636 +#ifndef R_AARCH64_PREL32
14637 +#define R_AARCH64_PREL32               261
14638 +#endif
14639 +#ifndef R_AARCH64_PREL16
14640 +#define R_AARCH64_PREL16               262
14641 +#endif
14642 +#ifndef R_AARCH64_PLT32
14643 +#define R_AARCH64_PLT32                        314
14644 +#endif
14645  #ifndef R_AARCH64_LD_PREL_LO19
14646  #define R_AARCH64_LD_PREL_LO19         273
14647  #endif
14648 @@ -371,6 +383,12 @@ static void emit_rela_section(Elf64_Shdr *sh_rela)
14649                 case R_AARCH64_ABS64:
14650                         emit_rela_abs64(rela, sh_orig_name);
14651                         break;
14652 +               /* Allow position-relative data relocations. */
14653 +               case R_AARCH64_PREL64:
14654 +               case R_AARCH64_PREL32:
14655 +               case R_AARCH64_PREL16:
14656 +               case R_AARCH64_PLT32:
14657 +                       break;
14658                 /* Allow relocations to generate PC-relative addressing. */
14659                 case R_AARCH64_LD_PREL_LO19:
14660                 case R_AARCH64_ADR_PREL_LO21:
14661 diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
14662 index bd354cd45d28..4b5acd84b8c8 100644
14663 --- a/arch/arm64/kvm/reset.c
14664 +++ b/arch/arm64/kvm/reset.c
14665 @@ -242,6 +242,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
14667         /* Reset core registers */
14668         memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
14669 +       memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
14670 +       vcpu->arch.ctxt.spsr_abt = 0;
14671 +       vcpu->arch.ctxt.spsr_und = 0;
14672 +       vcpu->arch.ctxt.spsr_irq = 0;
14673 +       vcpu->arch.ctxt.spsr_fiq = 0;
14674         vcpu_gp_regs(vcpu)->pstate = pstate;
14676         /* Reset system registers */
14677 diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
14678 index 44419679f91a..7740995de982 100644
14679 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
14680 +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
14681 @@ -87,8 +87,8 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
14682                         r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
14683                         goto out;
14684                 }
14685 -               rdreg = list_first_entry(&vgic->rd_regions,
14686 -                                        struct vgic_redist_region, list);
14687 +               rdreg = list_first_entry_or_null(&vgic->rd_regions,
14688 +                                                struct vgic_redist_region, list);
14689                 if (!rdreg)
14690                         addr_ptr = &undef_value;
14691                 else
14692 @@ -226,6 +226,9 @@ static int vgic_get_common_attr(struct kvm_device *dev,
14693                 u64 addr;
14694                 unsigned long type = (unsigned long)attr->attr;
14696 +               if (copy_from_user(&addr, uaddr, sizeof(addr)))
14697 +                       return -EFAULT;
14699                 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
14700                 if (r)
14701                         return (r == -ENODEV) ? -ENXIO : r;
14702 diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
14703 index ac485163a4a7..6d44c028d1c9 100644
14704 --- a/arch/arm64/mm/flush.c
14705 +++ b/arch/arm64/mm/flush.c
14706 @@ -55,8 +55,10 @@ void __sync_icache_dcache(pte_t pte)
14708         struct page *page = pte_page(pte);
14710 -       if (!test_and_set_bit(PG_dcache_clean, &page->flags))
14711 +       if (!test_bit(PG_dcache_clean, &page->flags)) {
14712                 sync_icache_aliases(page_address(page), page_size(page));
14713 +               set_bit(PG_dcache_clean, &page->flags);
14714 +       }
14716  EXPORT_SYMBOL_GPL(__sync_icache_dcache);
14718 diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
14719 index c967bfd30d2b..b183216a591c 100644
14720 --- a/arch/arm64/mm/proc.S
14721 +++ b/arch/arm64/mm/proc.S
14722 @@ -444,6 +444,18 @@ SYM_FUNC_START(__cpu_setup)
14723         mov     x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
14724         msr_s   SYS_GCR_EL1, x10
14726 +       /*
14727 +        * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
14728 +        * RGSR_EL1.SEED must be non-zero for IRG to produce
14729 +        * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
14730 +        * must initialize it.
14731 +        */
14732 +       mrs     x10, CNTVCT_EL0
14733 +       ands    x10, x10, #SYS_RGSR_EL1_SEED_MASK
14734 +       csinc   x10, x10, xzr, ne
14735 +       lsl     x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
14736 +       msr_s   SYS_RGSR_EL1, x10
14738         /* clear any pending tag check faults in TFSR*_EL1 */
14739         msr_s   SYS_TFSR_EL1, xzr
14740         msr_s   SYS_TFSRE0_EL1, xzr
14741 diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h
14742 index 5a29652e6def..7271b9c5fc76 100644
14743 --- a/arch/ia64/include/asm/module.h
14744 +++ b/arch/ia64/include/asm/module.h
14745 @@ -14,16 +14,20 @@
14746  struct elf64_shdr;                     /* forward declration */
14748  struct mod_arch_specific {
14749 +       /* Used only at module load time. */
14750         struct elf64_shdr *core_plt;    /* core PLT section */
14751         struct elf64_shdr *init_plt;    /* init PLT section */
14752         struct elf64_shdr *got;         /* global offset table */
14753         struct elf64_shdr *opd;         /* official procedure descriptors */
14754         struct elf64_shdr *unwind;      /* unwind-table section */
14755         unsigned long gp;               /* global-pointer for module */
14756 +       unsigned int next_got_entry;    /* index of next available got entry */
14758 +       /* Used at module run and cleanup time. */
14759         void *core_unw_table;           /* core unwind-table cookie returned by unwinder */
14760         void *init_unw_table;           /* init unwind-table cookie returned by unwinder */
14761 -       unsigned int next_got_entry;    /* index of next available got entry */
14762 +       void *opd_addr;                 /* symbolize uses .opd to get to actual function */
14763 +       unsigned long opd_size;
14764  };
14766  #define ARCH_SHF_SMALL SHF_IA_64_SHORT
14767 diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
14768 index a5636524af76..e2af6b172200 100644
14769 --- a/arch/ia64/kernel/acpi.c
14770 +++ b/arch/ia64/kernel/acpi.c
14771 @@ -446,7 +446,8 @@ void __init acpi_numa_fixup(void)
14772         if (srat_num_cpus == 0) {
14773                 node_set_online(0);
14774                 node_cpuid[0].phys_id = hard_smp_processor_id();
14775 -               return;
14776 +               slit_distance(0, 0) = LOCAL_DISTANCE;
14777 +               goto out;
14778         }
14780         /*
14781 @@ -489,7 +490,7 @@ void __init acpi_numa_fixup(void)
14782                         for (j = 0; j < MAX_NUMNODES; j++)
14783                                 slit_distance(i, j) = i == j ?
14784                                         LOCAL_DISTANCE : REMOTE_DISTANCE;
14785 -               return;
14786 +               goto out;
14787         }
14789         memset(numa_slit, -1, sizeof(numa_slit));
14790 @@ -514,6 +515,8 @@ void __init acpi_numa_fixup(void)
14791                 printk("\n");
14792         }
14793  #endif
14794 +out:
14795 +       node_possible_map = node_online_map;
14797  #endif                         /* CONFIG_ACPI_NUMA */
14799 diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
14800 index c5fe21de46a8..31149e41f9be 100644
14801 --- a/arch/ia64/kernel/efi.c
14802 +++ b/arch/ia64/kernel/efi.c
14803 @@ -415,10 +415,10 @@ efi_get_pal_addr (void)
14804                 mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
14806                 printk(KERN_INFO "CPU %d: mapping PAL code "
14807 -                       "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
14808 -                       smp_processor_id(), md->phys_addr,
14809 -                       md->phys_addr + efi_md_size(md),
14810 -                       vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
14811 +                       "[0x%llx-0x%llx) into [0x%llx-0x%llx)\n",
14812 +                       smp_processor_id(), md->phys_addr,
14813 +                       md->phys_addr + efi_md_size(md),
14814 +                       vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
14815  #endif
14816                 return __va(md->phys_addr);
14817         }
14818 @@ -560,6 +560,7 @@ efi_init (void)
14819         {
14820                 efi_memory_desc_t *md;
14821                 void *p;
14822 +               unsigned int i;
14824                 for (i = 0, p = efi_map_start; p < efi_map_end;
14825                      ++i, p += efi_desc_size)
14826 @@ -586,7 +587,7 @@ efi_init (void)
14827                         }
14829                         printk("mem%02d: %s "
14830 -                              "range=[0x%016lx-0x%016lx) (%4lu%s)\n",
14831 +                              "range=[0x%016llx-0x%016llx) (%4lu%s)\n",
14832                                i, efi_md_typeattr_format(buf, sizeof(buf), md),
14833                                md->phys_addr,
14834                                md->phys_addr + efi_md_size(md), size, unit);
14835 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
14836 index 00a496cb346f..2cba53c1da82 100644
14837 --- a/arch/ia64/kernel/module.c
14838 +++ b/arch/ia64/kernel/module.c
14839 @@ -905,9 +905,31 @@ register_unwind_table (struct module *mod)
14840  int
14841  module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
14843 +       struct mod_arch_specific *mas = &mod->arch;
14845         DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
14846 -       if (mod->arch.unwind)
14847 +       if (mas->unwind)
14848                 register_unwind_table(mod);
14850 +       /*
14851 +        * ".opd" was already relocated to the final destination. Store
14852 +        * it's address for use in symbolizer.
14853 +        */
14854 +       mas->opd_addr = (void *)mas->opd->sh_addr;
14855 +       mas->opd_size = mas->opd->sh_size;
14857 +       /*
14858 +        * Module relocation was already done at this point. Section
14859 +        * headers are about to be deleted. Wipe out load-time context.
14860 +        */
14861 +       mas->core_plt = NULL;
14862 +       mas->init_plt = NULL;
14863 +       mas->got = NULL;
14864 +       mas->opd = NULL;
14865 +       mas->unwind = NULL;
14866 +       mas->gp = 0;
14867 +       mas->next_got_entry = 0;
14869         return 0;
14872 @@ -926,10 +948,9 @@ module_arch_cleanup (struct module *mod)
14874  void *dereference_module_function_descriptor(struct module *mod, void *ptr)
14876 -       Elf64_Shdr *opd = mod->arch.opd;
14877 +       struct mod_arch_specific *mas = &mod->arch;
14879 -       if (ptr < (void *)opd->sh_addr ||
14880 -                       ptr >= (void *)(opd->sh_addr + opd->sh_size))
14881 +       if (ptr < mas->opd_addr || ptr >= mas->opd_addr + mas->opd_size)
14882                 return ptr;
14884         return dereference_function_descriptor(ptr);
14885 diff --git a/arch/m68k/include/asm/mvme147hw.h b/arch/m68k/include/asm/mvme147hw.h
14886 index 257b29184af9..e28eb1c0e0bf 100644
14887 --- a/arch/m68k/include/asm/mvme147hw.h
14888 +++ b/arch/m68k/include/asm/mvme147hw.h
14889 @@ -66,6 +66,9 @@ struct pcc_regs {
14890  #define PCC_INT_ENAB           0x08
14892  #define PCC_TIMER_INT_CLR      0x80
14894 +#define PCC_TIMER_TIC_EN       0x01
14895 +#define PCC_TIMER_COC_EN       0x02
14896  #define PCC_TIMER_CLR_OVF      0x04
14898  #define PCC_LEVEL_ABORT                0x07
14899 diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
14900 index 1c235d8f53f3..f55bdcb8e4f1 100644
14901 --- a/arch/m68k/kernel/sys_m68k.c
14902 +++ b/arch/m68k/kernel/sys_m68k.c
14903 @@ -388,6 +388,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
14904                 ret = -EPERM;
14905                 if (!capable(CAP_SYS_ADMIN))
14906                         goto out;
14908 +               mmap_read_lock(current->mm);
14909         } else {
14910                 struct vm_area_struct *vma;
14912 diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
14913 index cfdc7f912e14..e1e90c49a496 100644
14914 --- a/arch/m68k/mvme147/config.c
14915 +++ b/arch/m68k/mvme147/config.c
14916 @@ -114,8 +114,10 @@ static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
14917         unsigned long flags;
14919         local_irq_save(flags);
14920 -       m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;
14921 -       m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF;
14922 +       m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
14923 +                            PCC_TIMER_TIC_EN;
14924 +       m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
14925 +                                PCC_LEVEL_TIMER1;
14926         clk_total += PCC_TIMER_CYCLES;
14927         legacy_timer_tick(1);
14928         local_irq_restore(flags);
14929 @@ -133,10 +135,10 @@ void mvme147_sched_init (void)
14930         /* Init the clock with a value */
14931         /* The clock counter increments until 0xFFFF then reloads */
14932         m147_pcc->t1_preload = PCC_TIMER_PRELOAD;
14933 -       m147_pcc->t1_cntrl = 0x0;       /* clear timer */
14934 -       m147_pcc->t1_cntrl = 0x3;       /* start timer */
14935 -       m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;  /* clear pending ints */
14936 -       m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
14937 +       m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
14938 +                            PCC_TIMER_TIC_EN;
14939 +       m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
14940 +                                PCC_LEVEL_TIMER1;
14942         clocksource_register_hz(&mvme147_clk, PCC_TIMER_CLOCK_FREQ);
14944 diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
14945 index 30357fe4ba6c..b59593c7cfb9 100644
14946 --- a/arch/m68k/mvme16x/config.c
14947 +++ b/arch/m68k/mvme16x/config.c
14948 @@ -366,6 +366,7 @@ static u32 clk_total;
14949  #define PCCTOVR1_COC_EN      0x02
14950  #define PCCTOVR1_OVR_CLR     0x04
14952 +#define PCCTIC1_INT_LEVEL    6
14953  #define PCCTIC1_INT_CLR      0x08
14954  #define PCCTIC1_INT_EN       0x10
14956 @@ -374,8 +375,8 @@ static irqreturn_t mvme16x_timer_int (int irq, void *dev_id)
14957         unsigned long flags;
14959         local_irq_save(flags);
14960 -       out_8(PCCTIC1, in_8(PCCTIC1) | PCCTIC1_INT_CLR);
14961 -       out_8(PCCTOVR1, PCCTOVR1_OVR_CLR);
14962 +       out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
14963 +       out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
14964         clk_total += PCC_TIMER_CYCLES;
14965         legacy_timer_tick(1);
14966         local_irq_restore(flags);
14967 @@ -389,14 +390,15 @@ void mvme16x_sched_init(void)
14968      int irq;
14970      /* Using PCCchip2 or MC2 chip tick timer 1 */
14971 -    out_be32(PCCTCNT1, 0);
14972 -    out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
14973 -    out_8(PCCTOVR1, in_8(PCCTOVR1) | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
14974 -    out_8(PCCTIC1, PCCTIC1_INT_EN | 6);
14975      if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, IRQF_TIMER, "timer",
14976                      NULL))
14977         panic ("Couldn't register timer int");
14979 +    out_be32(PCCTCNT1, 0);
14980 +    out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
14981 +    out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
14982 +    out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
14984      clocksource_register_hz(&mvme16x_clk, PCC_TIMER_CLOCK_FREQ);
14986      if (brdno == 0x0162 || brdno == 0x172)
14987 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
14988 index d89efba3d8a4..e89d63cd92d1 100644
14989 --- a/arch/mips/Kconfig
14990 +++ b/arch/mips/Kconfig
14991 @@ -6,6 +6,7 @@ config MIPS
14992         select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
14993         select ARCH_HAS_FORTIFY_SOURCE
14994         select ARCH_HAS_KCOV
14995 +       select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA
14996         select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI)
14997         select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
14998         select ARCH_HAS_UBSAN_SANITIZE_ALL
14999 diff --git a/arch/mips/boot/dts/brcm/bcm3368.dtsi b/arch/mips/boot/dts/brcm/bcm3368.dtsi
15000 index 69cbef472377..d4b2b430dad0 100644
15001 --- a/arch/mips/boot/dts/brcm/bcm3368.dtsi
15002 +++ b/arch/mips/boot/dts/brcm/bcm3368.dtsi
15003 @@ -59,7 +59,7 @@ clkctl: clock-controller@fff8c004 {
15005                 periph_cntl: syscon@fff8c008 {
15006                         compatible = "syscon";
15007 -                       reg = <0xfff8c000 0x4>;
15008 +                       reg = <0xfff8c008 0x4>;
15009                         native-endian;
15010                 };
15012 diff --git a/arch/mips/boot/dts/brcm/bcm63268.dtsi b/arch/mips/boot/dts/brcm/bcm63268.dtsi
15013 index e0021ff9f144..940594436872 100644
15014 --- a/arch/mips/boot/dts/brcm/bcm63268.dtsi
15015 +++ b/arch/mips/boot/dts/brcm/bcm63268.dtsi
15016 @@ -59,7 +59,7 @@ clkctl: clock-controller@10000004 {
15018                 periph_cntl: syscon@10000008 {
15019                         compatible = "syscon";
15020 -                       reg = <0x10000000 0xc>;
15021 +                       reg = <0x10000008 0x4>;
15022                         native-endian;
15023                 };
15025 diff --git a/arch/mips/boot/dts/brcm/bcm6358.dtsi b/arch/mips/boot/dts/brcm/bcm6358.dtsi
15026 index 9d93e7f5e6fc..d79c88c2fc9c 100644
15027 --- a/arch/mips/boot/dts/brcm/bcm6358.dtsi
15028 +++ b/arch/mips/boot/dts/brcm/bcm6358.dtsi
15029 @@ -59,7 +59,7 @@ clkctl: clock-controller@fffe0004 {
15031                 periph_cntl: syscon@fffe0008 {
15032                         compatible = "syscon";
15033 -                       reg = <0xfffe0000 0x4>;
15034 +                       reg = <0xfffe0008 0x4>;
15035                         native-endian;
15036                 };
15038 diff --git a/arch/mips/boot/dts/brcm/bcm6362.dtsi b/arch/mips/boot/dts/brcm/bcm6362.dtsi
15039 index eb10341b75ba..8a21cb761ffd 100644
15040 --- a/arch/mips/boot/dts/brcm/bcm6362.dtsi
15041 +++ b/arch/mips/boot/dts/brcm/bcm6362.dtsi
15042 @@ -59,7 +59,7 @@ clkctl: clock-controller@10000004 {
15044                 periph_cntl: syscon@10000008 {
15045                         compatible = "syscon";
15046 -                       reg = <0x10000000 0xc>;
15047 +                       reg = <0x10000008 0x4>;
15048                         native-endian;
15049                 };
15051 diff --git a/arch/mips/boot/dts/brcm/bcm6368.dtsi b/arch/mips/boot/dts/brcm/bcm6368.dtsi
15052 index 52c19f40b9cc..8e87867ebc04 100644
15053 --- a/arch/mips/boot/dts/brcm/bcm6368.dtsi
15054 +++ b/arch/mips/boot/dts/brcm/bcm6368.dtsi
15055 @@ -59,7 +59,7 @@ clkctl: clock-controller@10000004 {
15057                 periph_cntl: syscon@100000008 {
15058                         compatible = "syscon";
15059 -                       reg = <0x10000000 0xc>;
15060 +                       reg = <0x10000008 0x4>;
15061                         native-endian;
15062                 };
15064 diff --git a/arch/mips/crypto/poly1305-glue.c b/arch/mips/crypto/poly1305-glue.c
15065 index fc881b46d911..bc6110fb98e0 100644
15066 --- a/arch/mips/crypto/poly1305-glue.c
15067 +++ b/arch/mips/crypto/poly1305-glue.c
15068 @@ -17,7 +17,7 @@ asmlinkage void poly1305_init_mips(void *state, const u8 *key);
15069  asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
15070  asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce);
15072 -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
15073 +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
15075         poly1305_init_mips(&dctx->h, key);
15076         dctx->s[0] = get_unaligned_le32(key + 16);
15077 diff --git a/arch/mips/generic/board-boston.its.S b/arch/mips/generic/board-boston.its.S
15078 index a7f51f97b910..c45ad2759421 100644
15079 --- a/arch/mips/generic/board-boston.its.S
15080 +++ b/arch/mips/generic/board-boston.its.S
15081 @@ -1,22 +1,22 @@
15082  / {
15083         images {
15084 -               fdt@boston {
15085 +               fdt-boston {
15086                         description = "img,boston Device Tree";
15087                         data = /incbin/("boot/dts/img/boston.dtb");
15088                         type = "flat_dt";
15089                         arch = "mips";
15090                         compression = "none";
15091 -                       hash@0 {
15092 +                       hash {
15093                                 algo = "sha1";
15094                         };
15095                 };
15096         };
15098         configurations {
15099 -               conf@boston {
15100 +               conf-boston {
15101                         description = "Boston Linux kernel";
15102 -                       kernel = "kernel@0";
15103 -                       fdt = "fdt@boston";
15104 +                       kernel = "kernel";
15105 +                       fdt = "fdt-boston";
15106                 };
15107         };
15108  };
15109 diff --git a/arch/mips/generic/board-jaguar2.its.S b/arch/mips/generic/board-jaguar2.its.S
15110 index fb0e589eeff7..c2b8d479b26c 100644
15111 --- a/arch/mips/generic/board-jaguar2.its.S
15112 +++ b/arch/mips/generic/board-jaguar2.its.S
15113 @@ -1,23 +1,23 @@
15114  /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
15115  / {
15116         images {
15117 -               fdt@jaguar2_pcb110 {
15118 +               fdt-jaguar2_pcb110 {
15119                         description = "MSCC Jaguar2 PCB110 Device Tree";
15120                         data = /incbin/("boot/dts/mscc/jaguar2_pcb110.dtb");
15121                         type = "flat_dt";
15122                         arch = "mips";
15123                         compression = "none";
15124 -                       hash@0 {
15125 +                       hash {
15126                                 algo = "sha1";
15127                         };
15128                 };
15129 -               fdt@jaguar2_pcb111 {
15130 +               fdt-jaguar2_pcb111 {
15131                         description = "MSCC Jaguar2 PCB111 Device Tree";
15132                         data = /incbin/("boot/dts/mscc/jaguar2_pcb111.dtb");
15133                         type = "flat_dt";
15134                         arch = "mips";
15135                         compression = "none";
15136 -                       hash@0 {
15137 +                       hash {
15138                                 algo = "sha1";
15139                         };
15140                 };
15141 @@ -26,14 +26,14 @@
15142         configurations {
15143                 pcb110 {
15144                         description = "Jaguar2 Linux kernel";
15145 -                       kernel = "kernel@0";
15146 -                       fdt = "fdt@jaguar2_pcb110";
15147 +                       kernel = "kernel";
15148 +                       fdt = "fdt-jaguar2_pcb110";
15149                         ramdisk = "ramdisk";
15150                 };
15151                 pcb111 {
15152                         description = "Jaguar2 Linux kernel";
15153 -                       kernel = "kernel@0";
15154 -                       fdt = "fdt@jaguar2_pcb111";
15155 +                       kernel = "kernel";
15156 +                       fdt = "fdt-jaguar2_pcb111";
15157                         ramdisk = "ramdisk";
15158                 };
15159         };
15160 diff --git a/arch/mips/generic/board-luton.its.S b/arch/mips/generic/board-luton.its.S
15161 index 39a543f62f25..bd9837c9af97 100644
15162 --- a/arch/mips/generic/board-luton.its.S
15163 +++ b/arch/mips/generic/board-luton.its.S
15164 @@ -1,13 +1,13 @@
15165  /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
15166  / {
15167         images {
15168 -               fdt@luton_pcb091 {
15169 +               fdt-luton_pcb091 {
15170                         description = "MSCC Luton PCB091 Device Tree";
15171                         data = /incbin/("boot/dts/mscc/luton_pcb091.dtb");
15172                         type = "flat_dt";
15173                         arch = "mips";
15174                         compression = "none";
15175 -                       hash@0 {
15176 +                       hash {
15177                                 algo = "sha1";
15178                         };
15179                 };
15180 @@ -16,8 +16,8 @@
15181         configurations {
15182                 pcb091 {
15183                         description = "Luton Linux kernel";
15184 -                       kernel = "kernel@0";
15185 -                       fdt = "fdt@luton_pcb091";
15186 +                       kernel = "kernel";
15187 +                       fdt = "fdt-luton_pcb091";
15188                 };
15189         };
15190  };
15191 diff --git a/arch/mips/generic/board-ni169445.its.S b/arch/mips/generic/board-ni169445.its.S
15192 index e4cb4f95a8cc..0a2e8f7a8526 100644
15193 --- a/arch/mips/generic/board-ni169445.its.S
15194 +++ b/arch/mips/generic/board-ni169445.its.S
15195 @@ -1,22 +1,22 @@
15196  / {
15197         images {
15198 -               fdt@ni169445 {
15199 +               fdt-ni169445 {
15200                         description = "NI 169445 device tree";
15201                         data = /incbin/("boot/dts/ni/169445.dtb");
15202                         type = "flat_dt";
15203                         arch = "mips";
15204                         compression = "none";
15205 -                       hash@0 {
15206 +                       hash {
15207                                 algo = "sha1";
15208                         };
15209                 };
15210         };
15212         configurations {
15213 -               conf@ni169445 {
15214 +               conf-ni169445 {
15215                         description = "NI 169445 Linux Kernel";
15216 -                       kernel = "kernel@0";
15217 -                       fdt = "fdt@ni169445";
15218 +                       kernel = "kernel";
15219 +                       fdt = "fdt-ni169445";
15220                 };
15221         };
15222  };
15223 diff --git a/arch/mips/generic/board-ocelot.its.S b/arch/mips/generic/board-ocelot.its.S
15224 index 3da23988149a..8c7e3a1b68d3 100644
15225 --- a/arch/mips/generic/board-ocelot.its.S
15226 +++ b/arch/mips/generic/board-ocelot.its.S
15227 @@ -1,40 +1,40 @@
15228  /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
15229  / {
15230         images {
15231 -               fdt@ocelot_pcb123 {
15232 +               fdt-ocelot_pcb123 {
15233                         description = "MSCC Ocelot PCB123 Device Tree";
15234                         data = /incbin/("boot/dts/mscc/ocelot_pcb123.dtb");
15235                         type = "flat_dt";
15236                         arch = "mips";
15237                         compression = "none";
15238 -                       hash@0 {
15239 +                       hash {
15240                                 algo = "sha1";
15241                         };
15242                 };
15244 -               fdt@ocelot_pcb120 {
15245 +               fdt-ocelot_pcb120 {
15246                         description = "MSCC Ocelot PCB120 Device Tree";
15247                         data = /incbin/("boot/dts/mscc/ocelot_pcb120.dtb");
15248                         type = "flat_dt";
15249                         arch = "mips";
15250                         compression = "none";
15251 -                       hash@0 {
15252 +                       hash {
15253                                 algo = "sha1";
15254                         };
15255                 };
15256         };
15258         configurations {
15259 -               conf@ocelot_pcb123 {
15260 +               conf-ocelot_pcb123 {
15261                         description = "Ocelot Linux kernel";
15262 -                       kernel = "kernel@0";
15263 -                       fdt = "fdt@ocelot_pcb123";
15264 +                       kernel = "kernel";
15265 +                       fdt = "fdt-ocelot_pcb123";
15266                 };
15268 -               conf@ocelot_pcb120 {
15269 +               conf-ocelot_pcb120 {
15270                         description = "Ocelot Linux kernel";
15271 -                       kernel = "kernel@0";
15272 -                       fdt = "fdt@ocelot_pcb120";
15273 +                       kernel = "kernel";
15274 +                       fdt = "fdt-ocelot_pcb120";
15275                 };
15276         };
15277  };
15278 diff --git a/arch/mips/generic/board-serval.its.S b/arch/mips/generic/board-serval.its.S
15279 index 4ea4fc9d757f..dde833efe980 100644
15280 --- a/arch/mips/generic/board-serval.its.S
15281 +++ b/arch/mips/generic/board-serval.its.S
15282 @@ -1,13 +1,13 @@
15283  /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
15284  / {
15285         images {
15286 -               fdt@serval_pcb105 {
15287 +               fdt-serval_pcb105 {
15288                         description = "MSCC Serval PCB105 Device Tree";
15289                         data = /incbin/("boot/dts/mscc/serval_pcb105.dtb");
15290                         type = "flat_dt";
15291                         arch = "mips";
15292                         compression = "none";
15293 -                       hash@0 {
15294 +                       hash {
15295                                 algo = "sha1";
15296                         };
15297                 };
15298 @@ -16,8 +16,8 @@
15299         configurations {
15300                 pcb105 {
15301                         description = "Serval Linux kernel";
15302 -                       kernel = "kernel@0";
15303 -                       fdt = "fdt@serval_pcb105";
15304 +                       kernel = "kernel";
15305 +                       fdt = "fdt-serval_pcb105";
15306                         ramdisk = "ramdisk";
15307                 };
15308         };
15309 diff --git a/arch/mips/generic/board-xilfpga.its.S b/arch/mips/generic/board-xilfpga.its.S
15310 index a2e773d3f14f..08c1e900eb4e 100644
15311 --- a/arch/mips/generic/board-xilfpga.its.S
15312 +++ b/arch/mips/generic/board-xilfpga.its.S
15313 @@ -1,22 +1,22 @@
15314  / {
15315         images {
15316 -               fdt@xilfpga {
15317 +               fdt-xilfpga {
15318                         description = "MIPSfpga (xilfpga) Device Tree";
15319                         data = /incbin/("boot/dts/xilfpga/nexys4ddr.dtb");
15320                         type = "flat_dt";
15321                         arch = "mips";
15322                         compression = "none";
15323 -                       hash@0 {
15324 +                       hash {
15325                                 algo = "sha1";
15326                         };
15327                 };
15328         };
15330         configurations {
15331 -               conf@xilfpga {
15332 +               conf-xilfpga {
15333                         description = "MIPSfpga Linux kernel";
15334 -                       kernel = "kernel@0";
15335 -                       fdt = "fdt@xilfpga";
15336 +                       kernel = "kernel";
15337 +                       fdt = "fdt-xilfpga";
15338                 };
15339         };
15340  };
15341 diff --git a/arch/mips/generic/vmlinux.its.S b/arch/mips/generic/vmlinux.its.S
15342 index 1a08438fd893..3e254676540f 100644
15343 --- a/arch/mips/generic/vmlinux.its.S
15344 +++ b/arch/mips/generic/vmlinux.its.S
15345 @@ -6,7 +6,7 @@
15346         #address-cells = <ADDR_CELLS>;
15348         images {
15349 -               kernel@0 {
15350 +               kernel {
15351                         description = KERNEL_NAME;
15352                         data = /incbin/(VMLINUX_BINARY);
15353                         type = "kernel";
15354 @@ -15,18 +15,18 @@
15355                         compression = VMLINUX_COMPRESSION;
15356                         load = /bits/ ADDR_BITS <VMLINUX_LOAD_ADDRESS>;
15357                         entry = /bits/ ADDR_BITS <VMLINUX_ENTRY_ADDRESS>;
15358 -                       hash@0 {
15359 +                       hash {
15360                                 algo = "sha1";
15361                         };
15362                 };
15363         };
15365         configurations {
15366 -               default = "conf@default";
15367 +               default = "conf-default";
15369 -               conf@default {
15370 +               conf-default {
15371                         description = "Generic Linux kernel";
15372 -                       kernel = "kernel@0";
15373 +                       kernel = "kernel";
15374                 };
15375         };
15376  };
15377 diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
15378 index 86f2323ebe6b..ca83ada7015f 100644
15379 --- a/arch/mips/include/asm/asmmacro.h
15380 +++ b/arch/mips/include/asm/asmmacro.h
15381 @@ -44,8 +44,7 @@
15382         .endm
15383  #endif
15385 -#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
15386 -    defined(CONFIG_CPU_MIPSR6)
15387 +#ifdef CONFIG_CPU_HAS_DIEI
15388         .macro  local_irq_enable reg=t0
15389         ei
15390         irq_enable_hazard
15391 diff --git a/arch/mips/include/asm/div64.h b/arch/mips/include/asm/div64.h
15392 index dc5ea5736440..ceece76fc971 100644
15393 --- a/arch/mips/include/asm/div64.h
15394 +++ b/arch/mips/include/asm/div64.h
15395 @@ -1,5 +1,5 @@
15396  /*
15397 - * Copyright (C) 2000, 2004  Maciej W. Rozycki
15398 + * Copyright (C) 2000, 2004, 2021  Maciej W. Rozycki
15399   * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org)
15400   *
15401   * This file is subject to the terms and conditions of the GNU General Public
15402 @@ -9,25 +9,18 @@
15403  #ifndef __ASM_DIV64_H
15404  #define __ASM_DIV64_H
15406 -#include <asm-generic/div64.h>
15408 -#if BITS_PER_LONG == 64
15409 +#include <asm/bitsperlong.h>
15411 -#include <linux/types.h>
15412 +#if BITS_PER_LONG == 32
15414  /*
15415   * No traps on overflows for any of these...
15416   */
15418 -#define __div64_32(n, base)                                            \
15419 -({                                                                     \
15420 +#define do_div64_32(res, high, low, base) ({                           \
15421         unsigned long __cf, __tmp, __tmp2, __i;                         \
15422         unsigned long __quot32, __mod32;                                \
15423 -       unsigned long __high, __low;                                    \
15424 -       unsigned long long __n;                                         \
15425                                                                         \
15426 -       __high = *__n >> 32;                                            \
15427 -       __low = __n;                                                    \
15428         __asm__(                                                        \
15429         "       .set    push                                    \n"     \
15430         "       .set    noat                                    \n"     \
15431 @@ -51,18 +44,48 @@
15432         "       subu    %0, %0, %z6                             \n"     \
15433         "       addiu   %2, %2, 1                               \n"     \
15434         "3:                                                     \n"     \
15435 -       "       bnez    %4, 0b\n\t"                                     \
15436 -       "        srl    %5, %1, 0x1f\n\t"                               \
15437 +       "       bnez    %4, 0b                                  \n"     \
15438 +       "        srl    %5, %1, 0x1f                            \n"     \
15439         "       .set    pop"                                            \
15440         : "=&r" (__mod32), "=&r" (__tmp),                               \
15441           "=&r" (__quot32), "=&r" (__cf),                               \
15442           "=&r" (__i), "=&r" (__tmp2)                                   \
15443 -       : "Jr" (base), "0" (__high), "1" (__low));                      \
15444 +       : "Jr" (base), "0" (high), "1" (low));                          \
15445                                                                         \
15446 -       (__n) = __quot32;                                               \
15447 +       (res) = __quot32;                                               \
15448         __mod32;                                                        \
15449  })
15451 -#endif /* BITS_PER_LONG == 64 */
15452 +#define __div64_32(n, base) ({                                         \
15453 +       unsigned long __upper, __low, __high, __radix;                  \
15454 +       unsigned long long __quot;                                      \
15455 +       unsigned long long __div;                                       \
15456 +       unsigned long __mod;                                            \
15457 +                                                                       \
15458 +       __div = (*n);                                                   \
15459 +       __radix = (base);                                               \
15460 +                                                                       \
15461 +       __high = __div >> 32;                                           \
15462 +       __low = __div;                                                  \
15463 +                                                                       \
15464 +       if (__high < __radix) {                                         \
15465 +               __upper = __high;                                       \
15466 +               __high = 0;                                             \
15467 +       } else {                                                        \
15468 +               __upper = __high % __radix;                             \
15469 +               __high /= __radix;                                      \
15470 +       }                                                               \
15471 +                                                                       \
15472 +       __mod = do_div64_32(__low, __upper, __low, __radix);            \
15473 +                                                                       \
15474 +       __quot = __high;                                                \
15475 +       __quot = __quot << 32 | __low;                                  \
15476 +       (*n) = __quot;                                                  \
15477 +       __mod;                                                          \
15480 +#endif /* BITS_PER_LONG == 32 */
15482 +#include <asm-generic/div64.h>
15484  #endif /* __ASM_DIV64_H */
15485 diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h
15486 index 2203e2d0ae2a..44a45f3fa4b0 100644
15487 --- a/arch/mips/include/asm/vdso/gettimeofday.h
15488 +++ b/arch/mips/include/asm/vdso/gettimeofday.h
15489 @@ -20,6 +20,12 @@
15491  #define VDSO_HAS_CLOCK_GETRES          1
15493 +#if MIPS_ISA_REV < 6
15494 +#define VDSO_SYSCALL_CLOBBERS "hi", "lo",
15495 +#else
15496 +#define VDSO_SYSCALL_CLOBBERS
15497 +#endif
15499  static __always_inline long gettimeofday_fallback(
15500                                 struct __kernel_old_timeval *_tv,
15501                                 struct timezone *_tz)
15502 @@ -35,7 +41,9 @@ static __always_inline long gettimeofday_fallback(
15503         : "=r" (ret), "=r" (error)
15504         : "r" (tv), "r" (tz), "r" (nr)
15505         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15506 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15507 +         "$14", "$15", "$24", "$25",
15508 +         VDSO_SYSCALL_CLOBBERS
15509 +         "memory");
15511         return error ? -ret : ret;
15513 @@ -59,7 +67,9 @@ static __always_inline long clock_gettime_fallback(
15514         : "=r" (ret), "=r" (error)
15515         : "r" (clkid), "r" (ts), "r" (nr)
15516         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15517 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15518 +         "$14", "$15", "$24", "$25",
15519 +         VDSO_SYSCALL_CLOBBERS
15520 +         "memory");
15522         return error ? -ret : ret;
15524 @@ -83,7 +93,9 @@ static __always_inline int clock_getres_fallback(
15525         : "=r" (ret), "=r" (error)
15526         : "r" (clkid), "r" (ts), "r" (nr)
15527         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15528 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15529 +         "$14", "$15", "$24", "$25",
15530 +         VDSO_SYSCALL_CLOBBERS
15531 +         "memory");
15533         return error ? -ret : ret;
15535 @@ -105,7 +117,9 @@ static __always_inline long clock_gettime32_fallback(
15536         : "=r" (ret), "=r" (error)
15537         : "r" (clkid), "r" (ts), "r" (nr)
15538         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15539 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15540 +         "$14", "$15", "$24", "$25",
15541 +         VDSO_SYSCALL_CLOBBERS
15542 +         "memory");
15544         return error ? -ret : ret;
15546 @@ -125,7 +139,9 @@ static __always_inline int clock_getres32_fallback(
15547         : "=r" (ret), "=r" (error)
15548         : "r" (clkid), "r" (ts), "r" (nr)
15549         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15550 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15551 +         "$14", "$15", "$24", "$25",
15552 +         VDSO_SYSCALL_CLOBBERS
15553 +         "memory");
15555         return error ? -ret : ret;
15557 diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
15558 index b71892064f27..0ef240adefb5 100644
15559 --- a/arch/mips/kernel/cpu-probe.c
15560 +++ b/arch/mips/kernel/cpu-probe.c
15561 @@ -1752,7 +1752,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
15562                         set_isa(c, MIPS_CPU_ISA_M64R2);
15563                         break;
15564                 }
15565 -               c->writecombine = _CACHE_UNCACHED_ACCELERATED;
15566                 c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_EXT |
15567                                 MIPS_ASE_LOONGSON_EXT2);
15568                 break;
15569 @@ -1782,7 +1781,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
15570                  * register, we correct it here.
15571                  */
15572                 c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
15573 -               c->writecombine = _CACHE_UNCACHED_ACCELERATED;
15574                 c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
15575                         MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
15576                 c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
15577 @@ -1793,7 +1791,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
15578                 set_elf_platform(cpu, "loongson3a");
15579                 set_isa(c, MIPS_CPU_ISA_M64R2);
15580                 decode_cpucfg(c);
15581 -               c->writecombine = _CACHE_UNCACHED_ACCELERATED;
15582                 break;
15583         default:
15584                 panic("Unknown Loongson Processor ID!");
15585 diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
15586 index cfa788bca871..1c664b23c0f9 100644
15587 --- a/arch/mips/loongson64/init.c
15588 +++ b/arch/mips/loongson64/init.c
15589 @@ -126,7 +126,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_
15590                 return -ENOMEM;
15592         range->fwnode = fwnode;
15593 -       range->size = size;
15594 +       range->size = size = round_up(size, PAGE_SIZE);
15595         range->hw_start = hw_start;
15596         range->flags = LOGIC_PIO_CPU_MMIO;
15598 diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
15599 index 39052de915f3..3a909194284a 100644
15600 --- a/arch/mips/pci/pci-legacy.c
15601 +++ b/arch/mips/pci/pci-legacy.c
15602 @@ -166,8 +166,13 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
15603                         res = hose->mem_resource;
15604                         break;
15605                 }
15606 -               if (res != NULL)
15607 -                       of_pci_range_to_resource(&range, node, res);
15608 +               if (res != NULL) {
15609 +                       res->name = node->full_name;
15610 +                       res->flags = range.flags;
15611 +                       res->start = range.cpu_addr;
15612 +                       res->end = range.cpu_addr + range.size - 1;
15613 +                       res->parent = res->child = res->sibling = NULL;
15614 +               }
15615         }
15618 diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
15619 index d36061603752..e032932348d6 100644
15620 --- a/arch/mips/pci/pci-mt7620.c
15621 +++ b/arch/mips/pci/pci-mt7620.c
15622 @@ -30,6 +30,7 @@
15623  #define RALINK_GPIOMODE                        0x60
15625  #define PPLL_CFG1                      0x9c
15626 +#define PPLL_LD                                BIT(23)
15628  #define PPLL_DRV                       0xa0
15629  #define PDRV_SW_SET                    BIT(31)
15630 @@ -239,8 +240,8 @@ static int mt7620_pci_hw_init(struct platform_device *pdev)
15631         rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1);
15632         mdelay(100);
15634 -       if (!(rt_sysc_r32(PPLL_CFG1) & PDRV_SW_SET)) {
15635 -               dev_err(&pdev->dev, "MT7620 PPLL unlock\n");
15636 +       if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) {
15637 +               dev_err(&pdev->dev, "pcie PLL not locked, aborting init\n");
15638                 reset_control_assert(rstpcie0);
15639                 rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
15640                 return -1;
15641 diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
15642 index e1f12e398136..f1538d2be89e 100644
15643 --- a/arch/mips/pci/pci-rt2880.c
15644 +++ b/arch/mips/pci/pci-rt2880.c
15645 @@ -180,7 +180,6 @@ static inline void rt2880_pci_write_u32(unsigned long reg, u32 val)
15647  int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
15649 -       u16 cmd;
15650         int irq = -1;
15652         if (dev->bus->number != 0)
15653 @@ -188,8 +187,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
15655         switch (PCI_SLOT(dev->devfn)) {
15656         case 0x00:
15657 -               rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000);
15658 -               (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0);
15659                 break;
15660         case 0x11:
15661                 irq = RT288X_CPU_IRQ_PCI;
15662 @@ -201,16 +198,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
15663                 break;
15664         }
15666 -       pci_write_config_byte((struct pci_dev *) dev,
15667 -               PCI_CACHE_LINE_SIZE, 0x14);
15668 -       pci_write_config_byte((struct pci_dev *) dev, PCI_LATENCY_TIMER, 0xFF);
15669 -       pci_read_config_word((struct pci_dev *) dev, PCI_COMMAND, &cmd);
15670 -       cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
15671 -               PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK |
15672 -               PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY;
15673 -       pci_write_config_word((struct pci_dev *) dev, PCI_COMMAND, cmd);
15674 -       pci_write_config_byte((struct pci_dev *) dev, PCI_INTERRUPT_LINE,
15675 -                             dev->irq);
15676         return irq;
15679 @@ -251,6 +238,30 @@ static int rt288x_pci_probe(struct platform_device *pdev)
15681  int pcibios_plat_dev_init(struct pci_dev *dev)
15683 +       static bool slot0_init;
15685 +       /*
15686 +        * Nobody seems to initialize slot 0, but this platform requires it, so
15687 +        * do it once when some other slot is being enabled. The PCI subsystem
15688 +        * should configure other slots properly, so no need to do anything
15689 +        * special for those.
15690 +        */
15691 +       if (!slot0_init && dev->bus->number == 0) {
15692 +               u16 cmd;
15693 +               u32 bar0;
15695 +               slot0_init = true;
15697 +               pci_bus_write_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
15698 +                                          0x08000000);
15699 +               pci_bus_read_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
15700 +                                         &bar0);
15702 +               pci_bus_read_config_word(dev->bus, 0, PCI_COMMAND, &cmd);
15703 +               cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
15704 +               pci_bus_write_config_word(dev->bus, 0, PCI_COMMAND, cmd);
15705 +       }
15707         return 0;
15710 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
15711 index 386ae12d8523..57c0ab71d51e 100644
15712 --- a/arch/powerpc/Kconfig
15713 +++ b/arch/powerpc/Kconfig
15714 @@ -224,7 +224,7 @@ config PPC
15715         select HAVE_LIVEPATCH                   if HAVE_DYNAMIC_FTRACE_WITH_REGS
15716         select HAVE_MOD_ARCH_SPECIFIC
15717         select HAVE_NMI                         if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
15718 -       select HAVE_HARDLOCKUP_DETECTOR_ARCH    if (PPC64 && PPC_BOOK3S)
15719 +       select HAVE_HARDLOCKUP_DETECTOR_ARCH    if PPC64 && PPC_BOOK3S && SMP
15720         select HAVE_OPTPROBES                   if PPC64
15721         select HAVE_PERF_EVENTS
15722         select HAVE_PERF_EVENTS_NMI             if PPC64
15723 diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
15724 index ae084357994e..6342f9da4545 100644
15725 --- a/arch/powerpc/Kconfig.debug
15726 +++ b/arch/powerpc/Kconfig.debug
15727 @@ -353,6 +353,7 @@ config PPC_EARLY_DEBUG_CPM_ADDR
15728  config FAIL_IOMMU
15729         bool "Fault-injection capability for IOMMU"
15730         depends on FAULT_INJECTION
15731 +       depends on PCI || IBMVIO
15732         help
15733           Provide fault-injection capability for IOMMU. Each device can
15734           be selectively enabled via the fail_iommu property.
15735 diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
15736 index 058601efbc8a..b703330459b8 100644
15737 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
15738 +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
15739 @@ -7,6 +7,7 @@
15740  #ifndef __ASSEMBLY__
15741  #include <linux/mmdebug.h>
15742  #include <linux/bug.h>
15743 +#include <linux/sizes.h>
15744  #endif
15746  /*
15747 @@ -323,7 +324,8 @@ extern unsigned long pci_io_base;
15748  #define  PHB_IO_END    (KERN_IO_START + FULL_IO_SIZE)
15749  #define IOREMAP_BASE   (PHB_IO_END)
15750  #define IOREMAP_START  (ioremap_bot)
15751 -#define IOREMAP_END    (KERN_IO_END)
15752 +#define IOREMAP_END    (KERN_IO_END - FIXADDR_SIZE)
15753 +#define FIXADDR_SIZE   SZ_32M
15755  /* Advertise special mapping type for AGP */
15756  #define HAVE_PAGE_AGP
15757 diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
15758 index c7813dc628fc..59cab558e2f0 100644
15759 --- a/arch/powerpc/include/asm/book3s/64/radix.h
15760 +++ b/arch/powerpc/include/asm/book3s/64/radix.h
15761 @@ -222,8 +222,10 @@ static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
15762          * from ptesync, it should probably go into update_mmu_cache, rather
15763          * than set_pte_at (which is used to set ptes unrelated to faults).
15764          *
15765 -        * Spurious faults to vmalloc region are not tolerated, so there is
15766 -        * a ptesync in flush_cache_vmap.
15767 +        * Spurious faults from the kernel memory are not tolerated, so there
15768 +        * is a ptesync in flush_cache_vmap, and __map_kernel_page() follows
15769 +        * the pte update sequence from ISA Book III 6.10 Translation Table
15770 +        * Update Synchronization Requirements.
15771          */
15774 diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
15775 index 8d03c16a3663..947b5b9c4424 100644
15776 --- a/arch/powerpc/include/asm/fixmap.h
15777 +++ b/arch/powerpc/include/asm/fixmap.h
15778 @@ -23,12 +23,17 @@
15779  #include <asm/kmap_size.h>
15780  #endif
15782 +#ifdef CONFIG_PPC64
15783 +#define FIXADDR_TOP    (IOREMAP_END + FIXADDR_SIZE)
15784 +#else
15785 +#define FIXADDR_SIZE   0
15786  #ifdef CONFIG_KASAN
15787  #include <asm/kasan.h>
15788  #define FIXADDR_TOP    (KASAN_SHADOW_START - PAGE_SIZE)
15789  #else
15790  #define FIXADDR_TOP    ((unsigned long)(-PAGE_SIZE))
15791  #endif
15792 +#endif
15794  /*
15795   * Here we define all the compile-time 'special' virtual
15796 @@ -50,6 +55,7 @@
15797   */
15798  enum fixed_addresses {
15799         FIX_HOLE,
15800 +#ifdef CONFIG_PPC32
15801         /* reserve the top 128K for early debugging purposes */
15802         FIX_EARLY_DEBUG_TOP = FIX_HOLE,
15803         FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
15804 @@ -72,6 +78,7 @@ enum fixed_addresses {
15805                        FIX_IMMR_SIZE,
15806  #endif
15807         /* FIX_PCIE_MCFG, */
15808 +#endif /* CONFIG_PPC32 */
15809         __end_of_permanent_fixed_addresses,
15811  #define NR_FIX_BTMAPS          (SZ_256K / PAGE_SIZE)
15812 @@ -98,6 +105,8 @@ enum fixed_addresses {
15813  static inline void __set_fixmap(enum fixed_addresses idx,
15814                                 phys_addr_t phys, pgprot_t flags)
15816 +       BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC64) && __FIXADDR_SIZE > FIXADDR_SIZE);
15818         if (__builtin_constant_p(idx))
15819                 BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
15820         else if (WARN_ON(idx >= __end_of_fixed_addresses))
15821 diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
15822 index e8d09a841373..31ed5356590a 100644
15823 --- a/arch/powerpc/include/asm/interrupt.h
15824 +++ b/arch/powerpc/include/asm/interrupt.h
15825 @@ -138,6 +138,13 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
15826         local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
15827         local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
15829 +       if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !(regs->msr & MSR_PR) &&
15830 +                               regs->nip < (unsigned long)__end_interrupts) {
15831 +               // Kernel code running below __end_interrupts is
15832 +               // implicitly soft-masked.
15833 +               regs->softe = IRQS_ALL_DISABLED;
15834 +       }
15836         /* Don't do any per-CPU operations until interrupt state is fixed */
15837  #endif
15838         /* Allow DEC and PMI to be traced when they are soft-NMI */
15839 diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
15840 index 652ce85f9410..4bc45d3ed8b0 100644
15841 --- a/arch/powerpc/include/asm/mmu_context.h
15842 +++ b/arch/powerpc/include/asm/mmu_context.h
15843 @@ -263,7 +263,7 @@ extern void arch_exit_mmap(struct mm_struct *mm);
15844  static inline void arch_unmap(struct mm_struct *mm,
15845                               unsigned long start, unsigned long end)
15847 -       unsigned long vdso_base = (unsigned long)mm->context.vdso - PAGE_SIZE;
15848 +       unsigned long vdso_base = (unsigned long)mm->context.vdso;
15850         if (start <= vdso_base && vdso_base < end)
15851                 mm->context.vdso = NULL;
15852 diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
15853 index 6cb8aa357191..57cd3892bfe0 100644
15854 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h
15855 +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
15856 @@ -6,6 +6,8 @@
15857   * the ppc64 non-hashed page table.
15858   */
15860 +#include <linux/sizes.h>
15862  #include <asm/nohash/64/pgtable-4k.h>
15863  #include <asm/barrier.h>
15864  #include <asm/asm-const.h>
15865 @@ -54,7 +56,8 @@
15866  #define  PHB_IO_END    (KERN_IO_START + FULL_IO_SIZE)
15867  #define IOREMAP_BASE   (PHB_IO_END)
15868  #define IOREMAP_START  (ioremap_bot)
15869 -#define IOREMAP_END    (KERN_VIRT_START + KERN_VIRT_SIZE)
15870 +#define IOREMAP_END    (KERN_VIRT_START + KERN_VIRT_SIZE - FIXADDR_SIZE)
15871 +#define FIXADDR_SIZE   SZ_32M
15874  /*
15875 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
15876 index da103e92c112..37d0b8c76a59 100644
15877 --- a/arch/powerpc/include/asm/reg.h
15878 +++ b/arch/powerpc/include/asm/reg.h
15879 @@ -441,6 +441,7 @@
15880  #define   LPCR_VRMA_LP1                ASM_CONST(0x0000800000000000)
15881  #define   LPCR_RMLS            0x1C000000      /* Implementation dependent RMO limit sel */
15882  #define   LPCR_RMLS_SH         26
15883 +#define   LPCR_HAIL            ASM_CONST(0x0000000004000000)   /* HV AIL (ISAv3.1) */
15884  #define   LPCR_ILE             ASM_CONST(0x0000000002000000)   /* !HV irqs set MSR:LE */
15885  #define   LPCR_AIL             ASM_CONST(0x0000000001800000)   /* Alternate interrupt location */
15886  #define   LPCR_AIL_0           ASM_CONST(0x0000000000000000)   /* MMU off exception offset 0x0 */
15887 diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
15888 index 7a13bc20f0a0..47081a9e13ca 100644
15889 --- a/arch/powerpc/include/asm/smp.h
15890 +++ b/arch/powerpc/include/asm/smp.h
15891 @@ -121,6 +121,11 @@ static inline struct cpumask *cpu_sibling_mask(int cpu)
15892         return per_cpu(cpu_sibling_map, cpu);
15895 +static inline struct cpumask *cpu_core_mask(int cpu)
15897 +       return per_cpu(cpu_core_map, cpu);
15900  static inline struct cpumask *cpu_l2_cache_mask(int cpu)
15902         return per_cpu(cpu_l2_cache_map, cpu);
15903 diff --git a/arch/powerpc/include/uapi/asm/errno.h b/arch/powerpc/include/uapi/asm/errno.h
15904 index cc79856896a1..4ba87de32be0 100644
15905 --- a/arch/powerpc/include/uapi/asm/errno.h
15906 +++ b/arch/powerpc/include/uapi/asm/errno.h
15907 @@ -2,6 +2,7 @@
15908  #ifndef _ASM_POWERPC_ERRNO_H
15909  #define _ASM_POWERPC_ERRNO_H
15911 +#undef EDEADLOCK
15912  #include <asm-generic/errno.h>
15914  #undef EDEADLOCK
15915 diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
15916 index cd60bc1c8701..7040e430a124 100644
15917 --- a/arch/powerpc/kernel/eeh.c
15918 +++ b/arch/powerpc/kernel/eeh.c
15919 @@ -362,14 +362,11 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
15920         pa = pte_pfn(*ptep);
15922         /* On radix we can do hugepage mappings for io, so handle that */
15923 -       if (hugepage_shift) {
15924 -               pa <<= hugepage_shift;
15925 -               pa |= token & ((1ul << hugepage_shift) - 1);
15926 -       } else {
15927 -               pa <<= PAGE_SHIFT;
15928 -               pa |= token & (PAGE_SIZE - 1);
15929 -       }
15930 +       if (!hugepage_shift)
15931 +               hugepage_shift = PAGE_SHIFT;
15933 +       pa <<= PAGE_SHIFT;
15934 +       pa |= token & ((1ul << hugepage_shift) - 1);
15935         return pa;
15938 diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
15939 index 8482739d42f3..eddf362caedc 100644
15940 --- a/arch/powerpc/kernel/fadump.c
15941 +++ b/arch/powerpc/kernel/fadump.c
15942 @@ -292,7 +292,7 @@ static void fadump_show_config(void)
15943   * that is required for a kernel to boot successfully.
15944   *
15945   */
15946 -static inline u64 fadump_calculate_reserve_size(void)
15947 +static __init u64 fadump_calculate_reserve_size(void)
15949         u64 base, size, bootmem_min;
15950         int ret;
15951 diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
15952 index 5d4706c14572..cf8ca08295bf 100644
15953 --- a/arch/powerpc/kernel/head_32.h
15954 +++ b/arch/powerpc/kernel/head_32.h
15955 @@ -261,11 +261,7 @@
15956         lis     r1, emergency_ctx@ha
15957  #endif
15958         lwz     r1, emergency_ctx@l(r1)
15959 -       cmpwi   cr1, r1, 0
15960 -       bne     cr1, 1f
15961 -       lis     r1, init_thread_union@ha
15962 -       addi    r1, r1, init_thread_union@l
15963 -1:     addi    r1, r1, THREAD_SIZE - INT_FRAME_SIZE
15964 +       addi    r1, r1, THREAD_SIZE - INT_FRAME_SIZE
15965         EXCEPTION_PROLOG_2
15966         SAVE_NVGPRS(r11)
15967         addi    r3, r1, STACK_FRAME_OVERHEAD
15968 diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
15969 index c475a229a42a..352346e14a08 100644
15970 --- a/arch/powerpc/kernel/interrupt.c
15971 +++ b/arch/powerpc/kernel/interrupt.c
15972 @@ -34,11 +34,11 @@ notrace long system_call_exception(long r3, long r4, long r5,
15973         if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
15974                 BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
15976 +       trace_hardirqs_off(); /* finish reconciling */
15978         CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
15979         user_exit_irqoff();
15981 -       trace_hardirqs_off(); /* finish reconciling */
15983         if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
15984                 BUG_ON(!(regs->msr & MSR_RI));
15985         BUG_ON(!(regs->msr & MSR_PR));
15986 diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
15987 index c00214a4355c..4023f91defa6 100644
15988 --- a/arch/powerpc/kernel/iommu.c
15989 +++ b/arch/powerpc/kernel/iommu.c
15990 @@ -1096,7 +1096,7 @@ int iommu_take_ownership(struct iommu_table *tbl)
15992         spin_lock_irqsave(&tbl->large_pool.lock, flags);
15993         for (i = 0; i < tbl->nr_pools; i++)
15994 -               spin_lock(&tbl->pools[i].lock);
15995 +               spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
15997         iommu_table_release_pages(tbl);
15999 @@ -1124,7 +1124,7 @@ void iommu_release_ownership(struct iommu_table *tbl)
16001         spin_lock_irqsave(&tbl->large_pool.lock, flags);
16002         for (i = 0; i < tbl->nr_pools; i++)
16003 -               spin_lock(&tbl->pools[i].lock);
16004 +               spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
16006         memset(tbl->it_map, 0, sz);
16008 diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
16009 index 9a4797d1d40d..a8b2d6bfc1ca 100644
16010 --- a/arch/powerpc/kernel/prom.c
16011 +++ b/arch/powerpc/kernel/prom.c
16012 @@ -267,7 +267,7 @@ static struct feature_property {
16013  };
16015  #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
16016 -static inline void identical_pvr_fixup(unsigned long node)
16017 +static __init void identical_pvr_fixup(unsigned long node)
16019         unsigned int pvr;
16020         const char *model = of_get_flat_dt_prop(node, "model", NULL);
16021 diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
16022 index 8ba49a6bf515..d7c1f92152af 100644
16023 --- a/arch/powerpc/kernel/setup_32.c
16024 +++ b/arch/powerpc/kernel/setup_32.c
16025 @@ -164,7 +164,7 @@ void __init irqstack_early_init(void)
16028  #ifdef CONFIG_VMAP_STACK
16029 -void *emergency_ctx[NR_CPUS] __ro_after_init;
16030 +void *emergency_ctx[NR_CPUS] __ro_after_init = {[0] = &init_stack};
16032  void __init emergency_stack_init(void)
16034 diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
16035 index 560ed8b975e7..830fee91b2d9 100644
16036 --- a/arch/powerpc/kernel/setup_64.c
16037 +++ b/arch/powerpc/kernel/setup_64.c
16038 @@ -232,10 +232,23 @@ static void cpu_ready_for_interrupts(void)
16039          * If we are not in hypervisor mode the job is done once for
16040          * the whole partition in configure_exceptions().
16041          */
16042 -       if (cpu_has_feature(CPU_FTR_HVMODE) &&
16043 -           cpu_has_feature(CPU_FTR_ARCH_207S)) {
16044 +       if (cpu_has_feature(CPU_FTR_HVMODE)) {
16045                 unsigned long lpcr = mfspr(SPRN_LPCR);
16046 -               mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
16047 +               unsigned long new_lpcr = lpcr;
16049 +               if (cpu_has_feature(CPU_FTR_ARCH_31)) {
16050 +                       /* P10 DD1 does not have HAIL */
16051 +                       if (pvr_version_is(PVR_POWER10) &&
16052 +                                       (mfspr(SPRN_PVR) & 0xf00) == 0x100)
16053 +                               new_lpcr |= LPCR_AIL_3;
16054 +                       else
16055 +                               new_lpcr |= LPCR_HAIL;
16056 +               } else if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
16057 +                       new_lpcr |= LPCR_AIL_3;
16058 +               }
16060 +               if (new_lpcr != lpcr)
16061 +                       mtspr(SPRN_LPCR, new_lpcr);
16062         }
16064         /*
16065 diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
16066 index 5a4d59a1070d..c2473e20f5f5 100644
16067 --- a/arch/powerpc/kernel/smp.c
16068 +++ b/arch/powerpc/kernel/smp.c
16069 @@ -1057,17 +1057,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
16070                                 local_memory_node(numa_cpu_lookup_table[cpu]));
16071                 }
16072  #endif
16073 -               /*
16074 -                * cpu_core_map is now more updated and exists only since
16075 -                * its been exported for long. It only will have a snapshot
16076 -                * of cpu_cpu_mask.
16077 -                */
16078 -               cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
16079         }
16081         /* Init the cpumasks so the boot CPU is related to itself */
16082         cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
16083         cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
16084 +       cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
16086         if (has_coregroup_support())
16087                 cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
16088 @@ -1408,6 +1403,9 @@ static void remove_cpu_from_masks(int cpu)
16089                         set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
16090         }
16092 +       for_each_cpu(i, cpu_core_mask(cpu))
16093 +               set_cpus_unrelated(cpu, i, cpu_core_mask);
16095         if (has_coregroup_support()) {
16096                 for_each_cpu(i, cpu_coregroup_mask(cpu))
16097                         set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
16098 @@ -1468,8 +1466,11 @@ static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
16100  static void add_cpu_to_masks(int cpu)
16102 +       struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
16103         int first_thread = cpu_first_thread_sibling(cpu);
16104 +       int chip_id = cpu_to_chip_id(cpu);
16105         cpumask_var_t mask;
16106 +       bool ret;
16107         int i;
16109         /*
16110 @@ -1485,12 +1486,36 @@ static void add_cpu_to_masks(int cpu)
16111         add_cpu_to_smallcore_masks(cpu);
16113         /* In CPU-hotplug path, hence use GFP_ATOMIC */
16114 -       alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
16115 +       ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
16116         update_mask_by_l2(cpu, &mask);
16118         if (has_coregroup_support())
16119                 update_coregroup_mask(cpu, &mask);
16121 +       if (chip_id == -1 || !ret) {
16122 +               cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
16123 +               goto out;
16124 +       }
16126 +       if (shared_caches)
16127 +               submask_fn = cpu_l2_cache_mask;
16129 +       /* Update core_mask with all the CPUs that are part of submask */
16130 +       or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
16132 +       /* Skip all CPUs already part of current CPU core mask */
16133 +       cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
16135 +       for_each_cpu(i, mask) {
16136 +               if (chip_id == cpu_to_chip_id(i)) {
16137 +                       or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
16138 +                       cpumask_andnot(mask, mask, submask_fn(i));
16139 +               } else {
16140 +                       cpumask_andnot(mask, mask, cpu_core_mask(i));
16141 +               }
16142 +       }
16144 +out:
16145         free_cpumask_var(mask);
16148 @@ -1521,6 +1546,9 @@ void start_secondary(void *unused)
16150         vdso_getcpu_init();
16151  #endif
16152 +       set_numa_node(numa_cpu_lookup_table[cpu]);
16153 +       set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
16155         /* Update topology CPU masks */
16156         add_cpu_to_masks(cpu);
16158 @@ -1539,9 +1567,6 @@ void start_secondary(void *unused)
16159                         shared_caches = true;
16160         }
16162 -       set_numa_node(numa_cpu_lookup_table[cpu]);
16163 -       set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
16165         smp_wmb();
16166         notify_cpu_starting(cpu);
16167         set_cpu_online(cpu, true);
16168 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
16169 index e839a906fdf2..b14907209822 100644
16170 --- a/arch/powerpc/kernel/vdso.c
16171 +++ b/arch/powerpc/kernel/vdso.c
16172 @@ -55,10 +55,10 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc
16174         unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
16176 -       if (new_size != text_size + PAGE_SIZE)
16177 +       if (new_size != text_size)
16178                 return -EINVAL;
16180 -       current->mm->context.vdso = (void __user *)new_vma->vm_start + PAGE_SIZE;
16181 +       current->mm->context.vdso = (void __user *)new_vma->vm_start;
16183         return 0;
16185 @@ -73,6 +73,10 @@ static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_str
16186         return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
16189 +static struct vm_special_mapping vvar_spec __ro_after_init = {
16190 +       .name = "[vvar]",
16193  static struct vm_special_mapping vdso32_spec __ro_after_init = {
16194         .name = "[vdso]",
16195         .mremap = vdso32_mremap,
16196 @@ -89,11 +93,11 @@ static struct vm_special_mapping vdso64_spec __ro_after_init = {
16197   */
16198  static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
16200 -       struct mm_struct *mm = current->mm;
16201 +       unsigned long vdso_size, vdso_base, mappings_size;
16202         struct vm_special_mapping *vdso_spec;
16203 +       unsigned long vvar_size = PAGE_SIZE;
16204 +       struct mm_struct *mm = current->mm;
16205         struct vm_area_struct *vma;
16206 -       unsigned long vdso_size;
16207 -       unsigned long vdso_base;
16209         if (is_32bit_task()) {
16210                 vdso_spec = &vdso32_spec;
16211 @@ -110,8 +114,8 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
16212                 vdso_base = 0;
16213         }
16215 -       /* Add a page to the vdso size for the data page */
16216 -       vdso_size += PAGE_SIZE;
16217 +       mappings_size = vdso_size + vvar_size;
16218 +       mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK;
16220         /*
16221          * pick a base address for the vDSO in process space. We try to put it
16222 @@ -119,9 +123,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
16223          * and end up putting it elsewhere.
16224          * Add enough to the size so that the result can be aligned.
16225          */
16226 -       vdso_base = get_unmapped_area(NULL, vdso_base,
16227 -                                     vdso_size + ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
16228 -                                     0, 0);
16229 +       vdso_base = get_unmapped_area(NULL, vdso_base, mappings_size, 0, 0);
16230         if (IS_ERR_VALUE(vdso_base))
16231                 return vdso_base;
16233 @@ -133,7 +135,13 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
16234          * install_special_mapping or the perf counter mmap tracking code
16235          * will fail to recognise it as a vDSO.
16236          */
16237 -       mm->context.vdso = (void __user *)vdso_base + PAGE_SIZE;
16238 +       mm->context.vdso = (void __user *)vdso_base + vvar_size;
16240 +       vma = _install_special_mapping(mm, vdso_base, vvar_size,
16241 +                                      VM_READ | VM_MAYREAD | VM_IO |
16242 +                                      VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
16243 +       if (IS_ERR(vma))
16244 +               return PTR_ERR(vma);
16246         /*
16247          * our vma flags don't have VM_WRITE so by default, the process isn't
16248 @@ -145,9 +153,12 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
16249          * It's fine to use that for setting breakpoints in the vDSO code
16250          * pages though.
16251          */
16252 -       vma = _install_special_mapping(mm, vdso_base, vdso_size,
16253 +       vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
16254                                        VM_READ | VM_EXEC | VM_MAYREAD |
16255                                        VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
16256 +       if (IS_ERR(vma))
16257 +               do_munmap(mm, vdso_base, vvar_size, NULL);
16259         return PTR_ERR_OR_ZERO(vma);
16262 @@ -249,11 +260,22 @@ static struct page ** __init vdso_setup_pages(void *start, void *end)
16263         if (!pagelist)
16264                 panic("%s: Cannot allocate page list for VDSO", __func__);
16266 -       pagelist[0] = virt_to_page(vdso_data);
16268         for (i = 0; i < pages; i++)
16269 -               pagelist[i + 1] = virt_to_page(start + i * PAGE_SIZE);
16270 +               pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
16272 +       return pagelist;
16275 +static struct page ** __init vvar_setup_pages(void)
16277 +       struct page **pagelist;
16279 +       /* .pages is NULL-terminated */
16280 +       pagelist = kcalloc(2, sizeof(struct page *), GFP_KERNEL);
16281 +       if (!pagelist)
16282 +               panic("%s: Cannot allocate page list for VVAR", __func__);
16284 +       pagelist[0] = virt_to_page(vdso_data);
16285         return pagelist;
16288 @@ -295,6 +317,8 @@ static int __init vdso_init(void)
16289         if (IS_ENABLED(CONFIG_PPC64))
16290                 vdso64_spec.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
16292 +       vvar_spec.pages = vvar_setup_pages();
16294         smp_wmb();
16296         return 0;
16297 diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
16298 index 02b9e4d0dc40..a8a7cb71086b 100644
16299 --- a/arch/powerpc/kexec/file_load_64.c
16300 +++ b/arch/powerpc/kexec/file_load_64.c
16301 @@ -960,6 +960,93 @@ unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image)
16302         return fdt_size;
16305 +/**
16306 + * add_node_props - Reads node properties from device node structure and add
16307 + *                  them to fdt.
16308 + * @fdt:            Flattened device tree of the kernel
16309 + * @node_offset:    offset of the node to add a property at
16310 + * @dn:             device node pointer
16311 + *
16312 + * Returns 0 on success, negative errno on error.
16313 + */
16314 +static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
16316 +       int ret = 0;
16317 +       struct property *pp;
16319 +       if (!dn)
16320 +               return -EINVAL;
16322 +       for_each_property_of_node(dn, pp) {
16323 +               ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
16324 +               if (ret < 0) {
16325 +                       pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
16326 +                       return ret;
16327 +               }
16328 +       }
16329 +       return ret;
16332 +/**
16333 + * update_cpus_node - Update cpus node of flattened device tree using of_root
16334 + *                    device node.
16335 + * @fdt:              Flattened device tree of the kernel.
16336 + *
16337 + * Returns 0 on success, negative errno on error.
16338 + */
16339 +static int update_cpus_node(void *fdt)
16341 +       struct device_node *cpus_node, *dn;
16342 +       int cpus_offset, cpus_subnode_offset, ret = 0;
16344 +       cpus_offset = fdt_path_offset(fdt, "/cpus");
16345 +       if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
16346 +               pr_err("Malformed device tree: error reading /cpus node: %s\n",
16347 +                      fdt_strerror(cpus_offset));
16348 +               return cpus_offset;
16349 +       }
16351 +       if (cpus_offset > 0) {
16352 +               ret = fdt_del_node(fdt, cpus_offset);
16353 +               if (ret < 0) {
16354 +                       pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
16355 +                       return -EINVAL;
16356 +               }
16357 +       }
16359 +       /* Add cpus node to fdt */
16360 +       cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
16361 +       if (cpus_offset < 0) {
16362 +               pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
16363 +               return -EINVAL;
16364 +       }
16366 +       /* Add cpus node properties */
16367 +       cpus_node = of_find_node_by_path("/cpus");
16368 +       ret = add_node_props(fdt, cpus_offset, cpus_node);
16369 +       of_node_put(cpus_node);
16370 +       if (ret < 0)
16371 +               return ret;
16373 +       /* Loop through all subnodes of cpus and add them to fdt */
16374 +       for_each_node_by_type(dn, "cpu") {
16375 +               cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
16376 +               if (cpus_subnode_offset < 0) {
16377 +                       pr_err("Unable to add %s subnode: %s\n", dn->full_name,
16378 +                              fdt_strerror(cpus_subnode_offset));
16379 +                       ret = cpus_subnode_offset;
16380 +                       goto out;
16381 +               }
16383 +               ret = add_node_props(fdt, cpus_subnode_offset, dn);
16384 +               if (ret < 0)
16385 +                       goto out;
16386 +       }
16387 +out:
16388 +       of_node_put(dn);
16389 +       return ret;
16392  /**
16393   * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
16394   *                       being loaded.
16395 @@ -1020,6 +1107,11 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
16396                 }
16397         }
16399 +       /* Update cpus nodes information to account hotplug CPUs. */
16400 +       ret =  update_cpus_node(fdt);
16401 +       if (ret < 0)
16402 +               goto out;
16404         /* Update memory reserve map */
16405         ret = get_reserved_memory_ranges(&rmem);
16406         if (ret)
16407 diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
16408 index e452158a18d7..c3e31fef0be1 100644
16409 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c
16410 +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
16411 @@ -8,6 +8,7 @@
16412   */
16414  #include <linux/kvm_host.h>
16415 +#include <linux/pkeys.h>
16417  #include <asm/kvm_ppc.h>
16418  #include <asm/kvm_book3s.h>
16419 @@ -133,6 +134,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
16420         else
16421                 kvmppc_mmu_flush_icache(pfn);
16423 +       rflags |= pte_to_hpte_pkey_bits(0, HPTE_USE_KERNEL_KEY);
16424         rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;
16426         /*
16427 diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
16428 index 13bad6bf4c95..208a053c9adf 100644
16429 --- a/arch/powerpc/kvm/book3s_hv.c
16430 +++ b/arch/powerpc/kvm/book3s_hv.c
16431 @@ -3728,7 +3728,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
16432         vcpu->arch.dec_expires = dec + tb;
16433         vcpu->cpu = -1;
16434         vcpu->arch.thread_cpu = -1;
16435 +       /* Save guest CTRL register, set runlatch to 1 */
16436         vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
16437 +       if (!(vcpu->arch.ctrl & 1))
16438 +               mtspr(SPRN_CTRLT, vcpu->arch.ctrl | 1);
16440         vcpu->arch.iamr = mfspr(SPRN_IAMR);
16441         vcpu->arch.pspb = mfspr(SPRN_PSPB);
16442 diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
16443 index d4efc182662a..248f7c9e36fc 100644
16444 --- a/arch/powerpc/lib/Makefile
16445 +++ b/arch/powerpc/lib/Makefile
16446 @@ -5,6 +5,9 @@
16448  ccflags-$(CONFIG_PPC64)        := $(NO_MINIMAL_TOC)
16450 +CFLAGS_code-patching.o += -fno-stack-protector
16451 +CFLAGS_feature-fixups.o += -fno-stack-protector
16453  CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
16454  CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
16456 diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
16457 index 1fd31b4b0e13..0aefa6a4a259 100644
16458 --- a/arch/powerpc/lib/feature-fixups.c
16459 +++ b/arch/powerpc/lib/feature-fixups.c
16460 @@ -14,6 +14,7 @@
16461  #include <linux/string.h>
16462  #include <linux/init.h>
16463  #include <linux/sched/mm.h>
16464 +#include <linux/stop_machine.h>
16465  #include <asm/cputable.h>
16466  #include <asm/code-patching.h>
16467  #include <asm/page.h>
16468 @@ -227,11 +228,25 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
16469                                                            : "unknown");
16472 +static int __do_stf_barrier_fixups(void *data)
16474 +       enum stf_barrier_type *types = data;
16476 +       do_stf_entry_barrier_fixups(*types);
16477 +       do_stf_exit_barrier_fixups(*types);
16479 +       return 0;
16482  void do_stf_barrier_fixups(enum stf_barrier_type types)
16484 -       do_stf_entry_barrier_fixups(types);
16485 -       do_stf_exit_barrier_fixups(types);
16486 +       /*
16487 +        * The call to the fallback entry flush, and the fallback/sync-ori exit
16488 +        * flush can not be safely patched in/out while other CPUs are executing
16489 +        * them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
16490 +        * spin in the stop machine core with interrupts hard disabled.
16491 +        */
16492 +       stop_machine(__do_stf_barrier_fixups, &types, NULL);
16495  void do_uaccess_flush_fixups(enum l1d_flush_type types)
16496 @@ -284,8 +299,9 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
16497                                                 : "unknown");
16500 -void do_entry_flush_fixups(enum l1d_flush_type types)
16501 +static int __do_entry_flush_fixups(void *data)
16503 +       enum l1d_flush_type types = *(enum l1d_flush_type *)data;
16504         unsigned int instrs[3], *dest;
16505         long *start, *end;
16506         int i;
16507 @@ -354,6 +370,19 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
16508                                                         : "ori type" :
16509                 (types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
16510                                                 : "unknown");
16512 +       return 0;
16515 +void do_entry_flush_fixups(enum l1d_flush_type types)
16517 +       /*
16518 +        * The call to the fallback flush can not be safely patched in/out while
16519 +        * other CPUs are executing it. So call __do_entry_flush_fixups() on one
16520 +        * CPU while all other CPUs spin in the stop machine core with interrupts
16521 +        * hard disabled.
16522 +        */
16523 +       stop_machine(__do_entry_flush_fixups, &types, NULL);
16526  void do_rfi_flush_fixups(enum l1d_flush_type types)
16527 diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c
16528 index 567e0c6b3978..03819c259f0a 100644
16529 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c
16530 +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
16531 @@ -428,12 +428,14 @@ static bool hash__change_memory_range(unsigned long start, unsigned long end,
16533  void hash__mark_rodata_ro(void)
16535 -       unsigned long start, end;
16536 +       unsigned long start, end, pp;
16538         start = (unsigned long)_stext;
16539         end = (unsigned long)__init_begin;
16541 -       WARN_ON(!hash__change_memory_range(start, end, PP_RXXX));
16542 +       pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL_ROX), HPTE_USE_KERNEL_KEY);
16544 +       WARN_ON(!hash__change_memory_range(start, end, pp));
16547  void hash__mark_initmem_nx(void)
16548 diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
16549 index 581b20a2feaf..12de1906e97b 100644
16550 --- a/arch/powerpc/mm/book3s64/hash_utils.c
16551 +++ b/arch/powerpc/mm/book3s64/hash_utils.c
16552 @@ -338,7 +338,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
16553  int htab_remove_mapping(unsigned long vstart, unsigned long vend,
16554                       int psize, int ssize)
16556 -       unsigned long vaddr;
16557 +       unsigned long vaddr, time_limit;
16558         unsigned int step, shift;
16559         int rc;
16560         int ret = 0;
16561 @@ -351,8 +351,19 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
16563         /* Unmap the full range specificied */
16564         vaddr = ALIGN_DOWN(vstart, step);
16565 +       time_limit = jiffies + HZ;
16567         for (;vaddr < vend; vaddr += step) {
16568                 rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize);
16570 +               /*
16571 +                * For large number of mappings introduce a cond_resched()
16572 +                * to prevent softlockup warnings.
16573 +                */
16574 +               if (time_after(jiffies, time_limit)) {
16575 +                       cond_resched();
16576 +                       time_limit = jiffies + HZ;
16577 +               }
16578                 if (rc == -ENOENT) {
16579                         ret = -ENOENT;
16580                         continue;
16581 @@ -1545,10 +1556,10 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
16582         if (user_mode(regs) || (region_id == USER_REGION_ID))
16583                 access &= ~_PAGE_PRIVILEGED;
16585 -       if (regs->trap == 0x400)
16586 +       if (TRAP(regs) == 0x400)
16587                 access |= _PAGE_EXEC;
16589 -       err = hash_page_mm(mm, ea, access, regs->trap, flags);
16590 +       err = hash_page_mm(mm, ea, access, TRAP(regs), flags);
16591         if (unlikely(err < 0)) {
16592                 // failed to instert a hash PTE due to an hypervisor error
16593                 if (user_mode(regs)) {
16594 diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
16595 index 98f0b243c1ab..39d488a212a0 100644
16596 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c
16597 +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
16598 @@ -108,7 +108,7 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa,
16600  set_the_pte:
16601         set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
16602 -       smp_wmb();
16603 +       asm volatile("ptesync": : :"memory");
16604         return 0;
16607 @@ -168,7 +168,7 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa,
16609  set_the_pte:
16610         set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
16611 -       smp_wmb();
16612 +       asm volatile("ptesync": : :"memory");
16613         return 0;
16616 diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
16617 index 4e8ce6d85232..7a59a5c9aa5d 100644
16618 --- a/arch/powerpc/mm/mem.c
16619 +++ b/arch/powerpc/mm/mem.c
16620 @@ -54,7 +54,6 @@
16622  #include <mm/mmu_decl.h>
16624 -static DEFINE_MUTEX(linear_mapping_mutex);
16625  unsigned long long memory_limit;
16626  bool init_mem_is_free;
16628 @@ -72,6 +71,7 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
16629  EXPORT_SYMBOL(phys_mem_access_prot);
16631  #ifdef CONFIG_MEMORY_HOTPLUG
16632 +static DEFINE_MUTEX(linear_mapping_mutex);
16634  #ifdef CONFIG_NUMA
16635  int memory_add_physaddr_to_nid(u64 start)
16636 diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
16637 index e4f577da33d8..8b5eeb6fb2fb 100644
16638 --- a/arch/powerpc/perf/isa207-common.c
16639 +++ b/arch/powerpc/perf/isa207-common.c
16640 @@ -447,8 +447,8 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp,
16641          * EBB events are pinned & exclusive, so this should never actually
16642          * hit, but we leave it as a fallback in case.
16643          */
16644 -       mask  |= CNST_EBB_VAL(ebb);
16645 -       value |= CNST_EBB_MASK;
16646 +       mask  |= CNST_EBB_MASK;
16647 +       value |= CNST_EBB_VAL(ebb);
16649         *maskp = mask;
16650         *valp = value;
16651 diff --git a/arch/powerpc/perf/power10-events-list.h b/arch/powerpc/perf/power10-events-list.h
16652 index e45dafe818ed..93be7197d250 100644
16653 --- a/arch/powerpc/perf/power10-events-list.h
16654 +++ b/arch/powerpc/perf/power10-events-list.h
16655 @@ -75,5 +75,5 @@ EVENT(PM_RUN_INST_CMPL_ALT,                   0x00002);
16656   *     thresh end (TE)
16657   */
16659 -EVENT(MEM_LOADS,                               0x34340401e0);
16660 -EVENT(MEM_STORES,                              0x343c0401e0);
16661 +EVENT(MEM_LOADS,                               0x35340401e0);
16662 +EVENT(MEM_STORES,                              0x353c0401e0);
16663 diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S
16664 index 11475c58ea43..afee8b1515a8 100644
16665 --- a/arch/powerpc/platforms/52xx/lite5200_sleep.S
16666 +++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S
16667 @@ -181,7 +181,7 @@ sram_code:
16668    udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */
16669         mullw   r12, r12, r11
16670         mftb    r13     /* start */
16671 -       addi    r12, r13, r12 /* end */
16672 +       add     r12, r13, r12 /* end */
16673      1:
16674         mftb    r13     /* current */
16675         cmp     cr0, r13, r12
16676 diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
16677 index 019669eb21d2..4ab7c3ef5826 100644
16678 --- a/arch/powerpc/platforms/powernv/memtrace.c
16679 +++ b/arch/powerpc/platforms/powernv/memtrace.c
16680 @@ -88,8 +88,8 @@ static void memtrace_clear_range(unsigned long start_pfn,
16681          * Before we go ahead and use this range as cache inhibited range
16682          * flush the cache.
16683          */
16684 -       flush_dcache_range_chunked(PFN_PHYS(start_pfn),
16685 -                                  PFN_PHYS(start_pfn + nr_pages),
16686 +       flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
16687 +                                  (unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
16688                                    FLUSH_CHUNK_SIZE);
16691 diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
16692 index 12cbffd3c2e3..325f3b220f36 100644
16693 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
16694 +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
16695 @@ -47,9 +47,6 @@ static void rtas_stop_self(void)
16697         BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
16699 -       printk("cpu %u (hwid %u) Ready to die...\n",
16700 -              smp_processor_id(), hard_smp_processor_id());
16702         rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
16704         panic("Alas, I survived.\n");
16705 diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
16706 index 9fc5217f0c8e..836cbbe0ecc5 100644
16707 --- a/arch/powerpc/platforms/pseries/iommu.c
16708 +++ b/arch/powerpc/platforms/pseries/iommu.c
16709 @@ -1229,7 +1229,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
16710         if (pmem_present) {
16711                 if (query.largest_available_block >=
16712                     (1ULL << (MAX_PHYSMEM_BITS - page_shift)))
16713 -                       len = MAX_PHYSMEM_BITS - page_shift;
16714 +                       len = MAX_PHYSMEM_BITS;
16715                 else
16716                         dev_info(&dev->dev, "Skipping ibm,pmemory");
16717         }
16718 diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
16719 index 3805519a6469..cd38bd421f38 100644
16720 --- a/arch/powerpc/platforms/pseries/lpar.c
16721 +++ b/arch/powerpc/platforms/pseries/lpar.c
16722 @@ -977,11 +977,13 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
16723         slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
16724         BUG_ON(slot == -1);
16726 -       flags = newpp & 7;
16727 +       flags = newpp & (HPTE_R_PP | HPTE_R_N);
16728         if (mmu_has_feature(MMU_FTR_KERNEL_RO))
16729                 /* Move pp0 into bit 8 (IBM 55) */
16730                 flags |= (newpp & HPTE_R_PP0) >> 55;
16732 +       flags |= ((newpp & HPTE_R_KEY_HI) >> 48) | (newpp & HPTE_R_KEY_LO);
16734         lpar_rc = plpar_pte_protect(flags, slot, 0);
16736         BUG_ON(lpar_rc != H_SUCCESS);
16737 diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
16738 index f9ae17e8a0f4..a8f9140a24fa 100644
16739 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c
16740 +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
16741 @@ -50,6 +50,7 @@ EXPORT_SYMBOL_GPL(init_phb_dynamic);
16742  int remove_phb_dynamic(struct pci_controller *phb)
16744         struct pci_bus *b = phb->bus;
16745 +       struct pci_host_bridge *host_bridge = to_pci_host_bridge(b->bridge);
16746         struct resource *res;
16747         int rc, i;
16749 @@ -76,7 +77,8 @@ int remove_phb_dynamic(struct pci_controller *phb)
16750         /* Remove the PCI bus and unregister the bridge device from sysfs */
16751         phb->bus = NULL;
16752         pci_remove_bus(b);
16753 -       device_unregister(b->bridge);
16754 +       host_bridge->bus = NULL;
16755 +       device_unregister(&host_bridge->dev);
16757         /* Now release the IO resource */
16758         if (res->flags & IORESOURCE_IO)
16759 diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
16760 index 9cb4fc839fd5..429053d0402a 100644
16761 --- a/arch/powerpc/platforms/pseries/vio.c
16762 +++ b/arch/powerpc/platforms/pseries/vio.c
16763 @@ -1285,6 +1285,10 @@ static int vio_bus_remove(struct device *dev)
16764  int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
16765                           const char *mod_name)
16767 +       // vio_bus_type is only initialised for pseries
16768 +       if (!machine_is(pseries))
16769 +               return -ENODEV;
16771         pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
16773         /* fill in 'struct driver' fields */
16774 diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
16775 index 595310e056f4..31b657c37735 100644
16776 --- a/arch/powerpc/sysdev/xive/common.c
16777 +++ b/arch/powerpc/sysdev/xive/common.c
16778 @@ -253,17 +253,20 @@ notrace void xmon_xive_do_dump(int cpu)
16779         xmon_printf("\n");
16782 +static struct irq_data *xive_get_irq_data(u32 hw_irq)
16784 +       unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq);
16786 +       return irq ? irq_get_irq_data(irq) : NULL;
16789  int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
16791 -       struct irq_chip *chip = irq_data_get_irq_chip(d);
16792         int rc;
16793         u32 target;
16794         u8 prio;
16795         u32 lirq;
16797 -       if (!is_xive_irq(chip))
16798 -               return -EINVAL;
16800         rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
16801         if (rc) {
16802                 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
16803 @@ -273,6 +276,9 @@ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
16804         xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
16805                     hw_irq, target, prio, lirq);
16807 +       if (!d)
16808 +               d = xive_get_irq_data(hw_irq);
16810         if (d) {
16811                 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
16812                 u64 val = xive_esb_read(xd, XIVE_ESB_GET);
16813 @@ -1335,17 +1341,14 @@ static int xive_prepare_cpu(unsigned int cpu)
16815         xc = per_cpu(xive_cpu, cpu);
16816         if (!xc) {
16817 -               struct device_node *np;
16819                 xc = kzalloc_node(sizeof(struct xive_cpu),
16820                                   GFP_KERNEL, cpu_to_node(cpu));
16821                 if (!xc)
16822                         return -ENOMEM;
16823 -               np = of_get_cpu_node(cpu, NULL);
16824 -               if (np)
16825 -                       xc->chip_id = of_get_ibm_chip_id(np);
16826 -               of_node_put(np);
16827                 xc->hw_ipi = XIVE_BAD_IRQ;
16828 +               xc->chip_id = XIVE_INVALID_CHIP_ID;
16829 +               if (xive_ops->prepare_cpu)
16830 +                       xive_ops->prepare_cpu(cpu, xc);
16832                 per_cpu(xive_cpu, cpu) = xc;
16833         }
16834 @@ -1599,6 +1602,8 @@ static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data
16835         u32 target;
16836         u8 prio;
16837         u32 lirq;
16838 +       struct xive_irq_data *xd;
16839 +       u64 val;
16841         if (!is_xive_irq(chip))
16842                 return;
16843 @@ -1612,17 +1617,14 @@ static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data
16844         seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
16845                    hw_irq, target, prio, lirq);
16847 -       if (d) {
16848 -               struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
16849 -               u64 val = xive_esb_read(xd, XIVE_ESB_GET);
16851 -               seq_printf(m, "flags=%c%c%c PQ=%c%c",
16852 -                          xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
16853 -                          xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
16854 -                          xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
16855 -                          val & XIVE_ESB_VAL_P ? 'P' : '-',
16856 -                          val & XIVE_ESB_VAL_Q ? 'Q' : '-');
16857 -       }
16858 +       xd = irq_data_get_irq_handler_data(d);
16859 +       val = xive_esb_read(xd, XIVE_ESB_GET);
16860 +       seq_printf(m, "flags=%c%c%c PQ=%c%c",
16861 +                  xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
16862 +                  xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
16863 +                  xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
16864 +                  val & XIVE_ESB_VAL_P ? 'P' : '-',
16865 +                  val & XIVE_ESB_VAL_Q ? 'Q' : '-');
16866         seq_puts(m, "\n");
16869 diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
16870 index 05a800a3104e..57e3f1540435 100644
16871 --- a/arch/powerpc/sysdev/xive/native.c
16872 +++ b/arch/powerpc/sysdev/xive/native.c
16873 @@ -380,6 +380,11 @@ static void xive_native_update_pending(struct xive_cpu *xc)
16874         }
16877 +static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc)
16879 +       xc->chip_id = cpu_to_chip_id(cpu);
16882  static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
16884         s64 rc;
16885 @@ -462,6 +467,7 @@ static const struct xive_ops xive_native_ops = {
16886         .match                  = xive_native_match,
16887         .shutdown               = xive_native_shutdown,
16888         .update_pending         = xive_native_update_pending,
16889 +       .prepare_cpu            = xive_native_prepare_cpu,
16890         .setup_cpu              = xive_native_setup_cpu,
16891         .teardown_cpu           = xive_native_teardown_cpu,
16892         .sync_source            = xive_native_sync_source,
16893 diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h
16894 index 9cf57c722faa..6478be19b4d3 100644
16895 --- a/arch/powerpc/sysdev/xive/xive-internal.h
16896 +++ b/arch/powerpc/sysdev/xive/xive-internal.h
16897 @@ -46,6 +46,7 @@ struct xive_ops {
16898                                   u32 *sw_irq);
16899         int     (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
16900         void    (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
16901 +       void    (*prepare_cpu)(unsigned int cpu, struct xive_cpu *xc);
16902         void    (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
16903         void    (*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
16904         bool    (*match)(struct device_node *np);
16905 diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
16906 index 4515a10c5d22..d9522fc35ca5 100644
16907 --- a/arch/riscv/Kconfig
16908 +++ b/arch/riscv/Kconfig
16909 @@ -227,7 +227,7 @@ config ARCH_RV64I
16910         bool "RV64I"
16911         select 64BIT
16912         select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
16913 -       select HAVE_DYNAMIC_FTRACE if MMU
16914 +       select HAVE_DYNAMIC_FTRACE if MMU && $(cc-option,-fpatchable-function-entry=8)
16915         select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
16916         select HAVE_FTRACE_MCOUNT_RECORD
16917         select HAVE_FUNCTION_GRAPH_TRACER
16918 diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
16919 index 7e2c78e2ca6b..d71f7c49a721 100644
16920 --- a/arch/riscv/kernel/probes/kprobes.c
16921 +++ b/arch/riscv/kernel/probes/kprobes.c
16922 @@ -260,8 +260,10 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
16924                 if (kcb->kprobe_status == KPROBE_REENTER)
16925                         restore_previous_kprobe(kcb);
16926 -               else
16927 +               else {
16928 +                       kprobes_restore_local_irqflag(kcb, regs);
16929                         reset_current_kprobe();
16930 +               }
16932                 break;
16933         case KPROBE_HIT_ACTIVE:
16934 diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
16935 index ea028d9e0d24..d44567490d91 100644
16936 --- a/arch/riscv/kernel/smp.c
16937 +++ b/arch/riscv/kernel/smp.c
16938 @@ -54,7 +54,7 @@ int riscv_hartid_to_cpuid(int hartid)
16939                         return i;
16941         pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
16942 -       return i;
16943 +       return -ENOENT;
16946  void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
16947 diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c
16948 index 7b947728d57e..56007c763902 100644
16949 --- a/arch/s390/crypto/arch_random.c
16950 +++ b/arch/s390/crypto/arch_random.c
16951 @@ -54,6 +54,10 @@ static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
16953  bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
16955 +       /* max hunk is ARCH_RNG_BUF_SIZE */
16956 +       if (nbytes > ARCH_RNG_BUF_SIZE)
16957 +               return false;
16959         /* lock rng buffer */
16960         if (!spin_trylock(&arch_rng_lock))
16961                 return false;
16962 diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
16963 index d9215c7106f0..8fc52679543d 100644
16964 --- a/arch/s390/include/asm/qdio.h
16965 +++ b/arch/s390/include/asm/qdio.h
16966 @@ -246,21 +246,8 @@ struct slsb {
16967         u8 val[QDIO_MAX_BUFFERS_PER_Q];
16968  } __attribute__ ((packed, aligned(256)));
16970 -/**
16971 - * struct qdio_outbuf_state - SBAL related asynchronous operation information
16972 - *   (for communication with upper layer programs)
16973 - *   (only required for use with completion queues)
16974 - * @user: pointer to upper layer program's state information related to SBAL
16975 - *        (stored in user1 data of QAOB)
16976 - */
16977 -struct qdio_outbuf_state {
16978 -       void *user;
16981 -#define CHSC_AC1_INITIATE_INPUTQ       0x80
16984  /* qdio adapter-characteristics-1 flag */
16985 +#define CHSC_AC1_INITIATE_INPUTQ       0x80
16986  #define AC1_SIGA_INPUT_NEEDED          0x40    /* process input queues */
16987  #define AC1_SIGA_OUTPUT_NEEDED         0x20    /* process output queues */
16988  #define AC1_SIGA_SYNC_NEEDED           0x10    /* ask hypervisor to sync */
16989 @@ -338,7 +325,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
16990   * @int_parm: interruption parameter
16991   * @input_sbal_addr_array:  per-queue array, each element points to 128 SBALs
16992   * @output_sbal_addr_array: per-queue array, each element points to 128 SBALs
16993 - * @output_sbal_state_array: no_output_qs * 128 state info (for CQ or NULL)
16994   */
16995  struct qdio_initialize {
16996         unsigned char q_format;
16997 @@ -357,7 +343,6 @@ struct qdio_initialize {
16998         unsigned long int_parm;
16999         struct qdio_buffer ***input_sbal_addr_array;
17000         struct qdio_buffer ***output_sbal_addr_array;
17001 -       struct qdio_outbuf_state *output_sbal_state_array;
17002  };
17004  #define QDIO_STATE_INACTIVE            0x00000002 /* after qdio_cleanup */
17005 @@ -378,9 +363,10 @@ extern int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
17006  extern int qdio_establish(struct ccw_device *cdev,
17007                           struct qdio_initialize *init_data);
17008  extern int qdio_activate(struct ccw_device *);
17009 +extern struct qaob *qdio_allocate_aob(void);
17010  extern void qdio_release_aob(struct qaob *);
17011 -extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
17012 -                  unsigned int);
17013 +extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr,
17014 +                  unsigned int bufnr, unsigned int count, struct qaob *aob);
17015  extern int qdio_start_irq(struct ccw_device *cdev);
17016  extern int qdio_stop_irq(struct ccw_device *cdev);
17017  extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
17018 diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
17019 index a7eab7be4db0..5412efe328f8 100644
17020 --- a/arch/s390/kernel/dis.c
17021 +++ b/arch/s390/kernel/dis.c
17022 @@ -563,7 +563,7 @@ void show_code(struct pt_regs *regs)
17024  void print_fn_code(unsigned char *code, unsigned long len)
17026 -       char buffer[64], *ptr;
17027 +       char buffer[128], *ptr;
17028         int opsize, i;
17030         while (len) {
17031 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
17032 index 72134f9f6ff5..5aab59ad5688 100644
17033 --- a/arch/s390/kernel/setup.c
17034 +++ b/arch/s390/kernel/setup.c
17035 @@ -937,9 +937,9 @@ static int __init setup_hwcaps(void)
17036         if (MACHINE_HAS_VX) {
17037                 elf_hwcap |= HWCAP_S390_VXRS;
17038                 if (test_facility(134))
17039 -                       elf_hwcap |= HWCAP_S390_VXRS_EXT;
17040 -               if (test_facility(135))
17041                         elf_hwcap |= HWCAP_S390_VXRS_BCD;
17042 +               if (test_facility(135))
17043 +                       elf_hwcap |= HWCAP_S390_VXRS_EXT;
17044                 if (test_facility(148))
17045                         elf_hwcap |= HWCAP_S390_VXRS_EXT2;
17046                 if (test_facility(152))
17047 diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
17048 index 6d6b57059493..b9f85b2dc053 100644
17049 --- a/arch/s390/kvm/gaccess.c
17050 +++ b/arch/s390/kvm/gaccess.c
17051 @@ -976,7 +976,9 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
17052   * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
17053   * @sg: pointer to the shadow guest address space structure
17054   * @saddr: faulting address in the shadow gmap
17055 - * @pgt: pointer to the page table address result
17056 + * @pgt: pointer to the beginning of the page table for the given address if
17057 + *      successful (return value 0), or to the first invalid DAT entry in
17058 + *      case of exceptions (return value > 0)
17059   * @fake: pgt references contiguous guest memory block, not a pgtable
17060   */
17061  static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17062 @@ -1034,6 +1036,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17063                         rfte.val = ptr;
17064                         goto shadow_r2t;
17065                 }
17066 +               *pgt = ptr + vaddr.rfx * 8;
17067                 rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
17068                 if (rc)
17069                         return rc;
17070 @@ -1060,6 +1063,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17071                         rste.val = ptr;
17072                         goto shadow_r3t;
17073                 }
17074 +               *pgt = ptr + vaddr.rsx * 8;
17075                 rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
17076                 if (rc)
17077                         return rc;
17078 @@ -1087,6 +1091,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17079                         rtte.val = ptr;
17080                         goto shadow_sgt;
17081                 }
17082 +               *pgt = ptr + vaddr.rtx * 8;
17083                 rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
17084                 if (rc)
17085                         return rc;
17086 @@ -1123,6 +1128,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17087                         ste.val = ptr;
17088                         goto shadow_pgt;
17089                 }
17090 +               *pgt = ptr + vaddr.sx * 8;
17091                 rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
17092                 if (rc)
17093                         return rc;
17094 @@ -1157,6 +1163,8 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17095   * @vcpu: virtual cpu
17096   * @sg: pointer to the shadow guest address space structure
17097   * @saddr: faulting address in the shadow gmap
17098 + * @datptr: will contain the address of the faulting DAT table entry, or of
17099 + *         the valid leaf, plus some flags
17100   *
17101   * Returns: - 0 if the shadow fault was successfully resolved
17102   *         - > 0 (pgm exception code) on exceptions while faulting
17103 @@ -1165,11 +1173,11 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17104   *         - -ENOMEM if out of memory
17105   */
17106  int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
17107 -                         unsigned long saddr)
17108 +                         unsigned long saddr, unsigned long *datptr)
17110         union vaddress vaddr;
17111         union page_table_entry pte;
17112 -       unsigned long pgt;
17113 +       unsigned long pgt = 0;
17114         int dat_protection, fake;
17115         int rc;
17117 @@ -1191,8 +1199,20 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
17118                 pte.val = pgt + vaddr.px * PAGE_SIZE;
17119                 goto shadow_page;
17120         }
17121 -       if (!rc)
17122 -               rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
17124 +       switch (rc) {
17125 +       case PGM_SEGMENT_TRANSLATION:
17126 +       case PGM_REGION_THIRD_TRANS:
17127 +       case PGM_REGION_SECOND_TRANS:
17128 +       case PGM_REGION_FIRST_TRANS:
17129 +               pgt |= PEI_NOT_PTE;
17130 +               break;
17131 +       case 0:
17132 +               pgt += vaddr.px * 8;
17133 +               rc = gmap_read_table(sg->parent, pgt, &pte.val);
17134 +       }
17135 +       if (datptr)
17136 +               *datptr = pgt | dat_protection * PEI_DAT_PROT;
17137         if (!rc && pte.i)
17138                 rc = PGM_PAGE_TRANSLATION;
17139         if (!rc && pte.z)
17140 diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
17141 index f4c51756c462..7c72a5e3449f 100644
17142 --- a/arch/s390/kvm/gaccess.h
17143 +++ b/arch/s390/kvm/gaccess.h
17144 @@ -18,17 +18,14 @@
17146  /**
17147   * kvm_s390_real_to_abs - convert guest real address to guest absolute address
17148 - * @vcpu - guest virtual cpu
17149 + * @prefix - guest prefix
17150   * @gra - guest real address
17151   *
17152   * Returns the guest absolute address that corresponds to the passed guest real
17153 - * address @gra of a virtual guest cpu by applying its prefix.
17154 + * address @gra of by applying the given prefix.
17155   */
17156 -static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
17157 -                                                unsigned long gra)
17158 +static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra)
17160 -       unsigned long prefix  = kvm_s390_get_prefix(vcpu);
17162         if (gra < 2 * PAGE_SIZE)
17163                 gra += prefix;
17164         else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
17165 @@ -36,6 +33,43 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
17166         return gra;
17169 +/**
17170 + * kvm_s390_real_to_abs - convert guest real address to guest absolute address
17171 + * @vcpu - guest virtual cpu
17172 + * @gra - guest real address
17173 + *
17174 + * Returns the guest absolute address that corresponds to the passed guest real
17175 + * address @gra of a virtual guest cpu by applying its prefix.
17176 + */
17177 +static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
17178 +                                                unsigned long gra)
17180 +       return _kvm_s390_real_to_abs(kvm_s390_get_prefix(vcpu), gra);
17183 +/**
17184 + * _kvm_s390_logical_to_effective - convert guest logical to effective address
17185 + * @psw: psw of the guest
17186 + * @ga: guest logical address
17187 + *
17188 + * Convert a guest logical address to an effective address by applying the
17189 + * rules of the addressing mode defined by bits 31 and 32 of the given PSW
17190 + * (extendended/basic addressing mode).
17191 + *
17192 + * Depending on the addressing mode, the upper 40 bits (24 bit addressing
17193 + * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing
17194 + * mode) of @ga will be zeroed and the remaining bits will be returned.
17195 + */
17196 +static inline unsigned long _kvm_s390_logical_to_effective(psw_t *psw,
17197 +                                                          unsigned long ga)
17199 +       if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
17200 +               return ga;
17201 +       if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
17202 +               return ga & ((1UL << 31) - 1);
17203 +       return ga & ((1UL << 24) - 1);
17206  /**
17207   * kvm_s390_logical_to_effective - convert guest logical to effective address
17208   * @vcpu: guest virtual cpu
17209 @@ -52,13 +86,7 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
17210  static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
17211                                                           unsigned long ga)
17213 -       psw_t *psw = &vcpu->arch.sie_block->gpsw;
17215 -       if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
17216 -               return ga;
17217 -       if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
17218 -               return ga & ((1UL << 31) - 1);
17219 -       return ga & ((1UL << 24) - 1);
17220 +       return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga);
17223  /*
17224 @@ -359,7 +387,11 @@ void ipte_unlock(struct kvm_vcpu *vcpu);
17225  int ipte_lock_held(struct kvm_vcpu *vcpu);
17226  int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
17228 +/* MVPG PEI indication bits */
17229 +#define PEI_DAT_PROT 2
17230 +#define PEI_NOT_PTE 4
17232  int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
17233 -                         unsigned long saddr);
17234 +                         unsigned long saddr, unsigned long *datptr);
17236  #endif /* __KVM_S390_GACCESS_H */
17237 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
17238 index 2f09e9d7dc95..24ad447e648c 100644
17239 --- a/arch/s390/kvm/kvm-s390.c
17240 +++ b/arch/s390/kvm/kvm-s390.c
17241 @@ -4307,16 +4307,16 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu)
17242         kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
17243         kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
17244         if (MACHINE_HAS_GS) {
17245 +               preempt_disable();
17246                 __ctl_set_bit(2, 4);
17247                 if (vcpu->arch.gs_enabled)
17248                         save_gs_cb(current->thread.gs_cb);
17249 -               preempt_disable();
17250                 current->thread.gs_cb = vcpu->arch.host_gscb;
17251                 restore_gs_cb(vcpu->arch.host_gscb);
17252 -               preempt_enable();
17253                 if (!vcpu->arch.host_gscb)
17254                         __ctl_clear_bit(2, 4);
17255                 vcpu->arch.host_gscb = NULL;
17256 +               preempt_enable();
17257         }
17258         /* SIE will save etoken directly into SDNX and therefore kvm_run */
17260 diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
17261 index bd803e091918..4002a24bc43a 100644
17262 --- a/arch/s390/kvm/vsie.c
17263 +++ b/arch/s390/kvm/vsie.c
17264 @@ -417,11 +417,6 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17265                 memcpy((void *)((u64)scb_o + 0xc0),
17266                        (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
17267                 break;
17268 -       case ICPT_PARTEXEC:
17269 -               /* MVPG only */
17270 -               memcpy((void *)((u64)scb_o + 0xc0),
17271 -                      (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0);
17272 -               break;
17273         }
17275         if (scb_s->ihcpu != 0xffffU)
17276 @@ -620,10 +615,10 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17277         /* with mso/msl, the prefix lies at offset *mso* */
17278         prefix += scb_s->mso;
17280 -       rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
17281 +       rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL);
17282         if (!rc && (scb_s->ecb & ECB_TE))
17283                 rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
17284 -                                          prefix + PAGE_SIZE);
17285 +                                          prefix + PAGE_SIZE, NULL);
17286         /*
17287          * We don't have to mprotect, we will be called for all unshadows.
17288          * SIE will detect if protection applies and trigger a validity.
17289 @@ -914,7 +909,7 @@ static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17290                                     current->thread.gmap_addr, 1);
17292         rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
17293 -                                  current->thread.gmap_addr);
17294 +                                  current->thread.gmap_addr, NULL);
17295         if (rc > 0) {
17296                 rc = inject_fault(vcpu, rc,
17297                                   current->thread.gmap_addr,
17298 @@ -936,7 +931,7 @@ static void handle_last_fault(struct kvm_vcpu *vcpu,
17300         if (vsie_page->fault_addr)
17301                 kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
17302 -                                     vsie_page->fault_addr);
17303 +                                     vsie_page->fault_addr, NULL);
17304         vsie_page->fault_addr = 0;
17307 @@ -983,6 +978,98 @@ static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17308         return 0;
17312 + * Get a register for a nested guest.
17313 + * @vcpu the vcpu of the guest
17314 + * @vsie_page the vsie_page for the nested guest
17315 + * @reg the register number, the upper 4 bits are ignored.
17316 + * returns: the value of the register.
17317 + */
17318 +static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg)
17320 +       /* no need to validate the parameter and/or perform error handling */
17321 +       reg &= 0xf;
17322 +       switch (reg) {
17323 +       case 15:
17324 +               return vsie_page->scb_s.gg15;
17325 +       case 14:
17326 +               return vsie_page->scb_s.gg14;
17327 +       default:
17328 +               return vcpu->run->s.regs.gprs[reg];
17329 +       }
17332 +static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17334 +       struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
17335 +       unsigned long pei_dest, pei_src, src, dest, mask, prefix;
17336 +       u64 *pei_block = &vsie_page->scb_o->mcic;
17337 +       int edat, rc_dest, rc_src;
17338 +       union ctlreg0 cr0;
17340 +       cr0.val = vcpu->arch.sie_block->gcr[0];
17341 +       edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
17342 +       mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK);
17343 +       prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
17345 +       dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask;
17346 +       dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso;
17347 +       src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask;
17348 +       src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso;
17350 +       rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest);
17351 +       rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src);
17352 +       /*
17353 +        * Either everything went well, or something non-critical went wrong
17354 +        * e.g. because of a race. In either case, simply retry.
17355 +        */
17356 +       if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) {
17357 +               retry_vsie_icpt(vsie_page);
17358 +               return -EAGAIN;
17359 +       }
17360 +       /* Something more serious went wrong, propagate the error */
17361 +       if (rc_dest < 0)
17362 +               return rc_dest;
17363 +       if (rc_src < 0)
17364 +               return rc_src;
17366 +       /* The only possible suppressing exception: just deliver it */
17367 +       if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) {
17368 +               clear_vsie_icpt(vsie_page);
17369 +               rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC);
17370 +               WARN_ON_ONCE(rc_dest);
17371 +               return 1;
17372 +       }
17374 +       /*
17375 +        * Forward the PEI intercept to the guest if it was a page fault, or
17376 +        * also for segment and region table faults if EDAT applies.
17377 +        */
17378 +       if (edat) {
17379 +               rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0;
17380 +               rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0;
17381 +       } else {
17382 +               rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0;
17383 +               rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0;
17384 +       }
17385 +       if (!rc_dest && !rc_src) {
17386 +               pei_block[0] = pei_dest;
17387 +               pei_block[1] = pei_src;
17388 +               return 1;
17389 +       }
17391 +       retry_vsie_icpt(vsie_page);
17393 +       /*
17394 +        * The host has edat, and the guest does not, or it was an ASCE type
17395 +        * exception. The host needs to inject the appropriate DAT interrupts
17396 +        * into the guest.
17397 +        */
17398 +       if (rc_dest)
17399 +               return inject_fault(vcpu, rc_dest, dest, 1);
17400 +       return inject_fault(vcpu, rc_src, src, 0);
17403  /*
17404   * Run the vsie on a shadow scb and a shadow gmap, without any further
17405   * sanity checks, handling SIE faults.
17406 @@ -1071,6 +1158,10 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17407                 if ((scb_s->ipa & 0xf000) != 0xf000)
17408                         scb_s->ipa += 0x1000;
17409                 break;
17410 +       case ICPT_PARTEXEC:
17411 +               if (scb_s->ipa == 0xb254)
17412 +                       rc = vsie_handle_mvpg(vcpu, vsie_page);
17413 +               break;
17414         }
17415         return rc;
17417 diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
17418 index f5beecdac693..e76b22157099 100644
17419 --- a/arch/sh/kernel/traps.c
17420 +++ b/arch/sh/kernel/traps.c
17421 @@ -180,7 +180,6 @@ static inline void arch_ftrace_nmi_exit(void) { }
17423  BUILD_TRAP_HANDLER(nmi)
17425 -       unsigned int cpu = smp_processor_id();
17426         TRAP_HANDLER_DECL;
17428         arch_ftrace_nmi_enter();
17429 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
17430 index 2792879d398e..ab2e8502c27c 100644
17431 --- a/arch/x86/Kconfig
17432 +++ b/arch/x86/Kconfig
17433 @@ -163,6 +163,7 @@ config X86
17434         select HAVE_ARCH_TRACEHOOK
17435         select HAVE_ARCH_TRANSPARENT_HUGEPAGE
17436         select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
17437 +       select HAVE_ARCH_PARENT_PMD_YOUNG       if X86_64
17438         select HAVE_ARCH_USERFAULTFD_WP         if X86_64 && USERFAULTFD
17439         select HAVE_ARCH_VMAP_STACK             if X86_64
17440         select HAVE_ARCH_WITHIN_STACK_FRAMES
17441 @@ -571,6 +572,7 @@ config X86_UV
17442         depends on X86_EXTENDED_PLATFORM
17443         depends on NUMA
17444         depends on EFI
17445 +       depends on KEXEC_CORE
17446         depends on X86_X2APIC
17447         depends on PCI
17448         help
17449 @@ -1406,7 +1408,7 @@ config HIGHMEM4G
17451  config HIGHMEM64G
17452         bool "64GB"
17453 -       depends on !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
17454 +       depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
17455         select X86_PAE
17456         help
17457           Select this if you have a 32-bit processor and more than 4
17458 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
17459 index 814fe0d349b0..872b9cf598e3 100644
17460 --- a/arch/x86/Kconfig.cpu
17461 +++ b/arch/x86/Kconfig.cpu
17462 @@ -157,7 +157,7 @@ config MPENTIUM4
17465  config MK6
17466 -       bool "K6/K6-II/K6-III"
17467 +       bool "AMD K6/K6-II/K6-III"
17468         depends on X86_32
17469         help
17470           Select this for an AMD K6-family processor.  Enables use of
17471 @@ -165,7 +165,7 @@ config MK6
17472           flags to GCC.
17474  config MK7
17475 -       bool "Athlon/Duron/K7"
17476 +       bool "AMD Athlon/Duron/K7"
17477         depends on X86_32
17478         help
17479           Select this for an AMD Athlon K7-family processor.  Enables use of
17480 @@ -173,12 +173,98 @@ config MK7
17481           flags to GCC.
17483  config MK8
17484 -       bool "Opteron/Athlon64/Hammer/K8"
17485 +       bool "AMD Opteron/Athlon64/Hammer/K8"
17486         help
17487           Select this for an AMD Opteron or Athlon64 Hammer-family processor.
17488           Enables use of some extended instructions, and passes appropriate
17489           optimization flags to GCC.
17491 +config MK8SSE3
17492 +       bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
17493 +       help
17494 +         Select this for improved AMD Opteron or Athlon64 Hammer-family processors.
17495 +         Enables use of some extended instructions, and passes appropriate
17496 +         optimization flags to GCC.
17498 +config MK10
17499 +       bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
17500 +       help
17501 +         Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
17502 +         Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
17503 +         Enables use of some extended instructions, and passes appropriate
17504 +         optimization flags to GCC.
17506 +config MBARCELONA
17507 +       bool "AMD Barcelona"
17508 +       help
17509 +         Select this for AMD Family 10h Barcelona processors.
17511 +         Enables -march=barcelona
17513 +config MBOBCAT
17514 +       bool "AMD Bobcat"
17515 +       help
17516 +         Select this for AMD Family 14h Bobcat processors.
17518 +         Enables -march=btver1
17520 +config MJAGUAR
17521 +       bool "AMD Jaguar"
17522 +       help
17523 +         Select this for AMD Family 16h Jaguar processors.
17525 +         Enables -march=btver2
17527 +config MBULLDOZER
17528 +       bool "AMD Bulldozer"
17529 +       help
17530 +         Select this for AMD Family 15h Bulldozer processors.
17532 +         Enables -march=bdver1
17534 +config MPILEDRIVER
17535 +       bool "AMD Piledriver"
17536 +       help
17537 +         Select this for AMD Family 15h Piledriver processors.
17539 +         Enables -march=bdver2
17541 +config MSTEAMROLLER
17542 +       bool "AMD Steamroller"
17543 +       help
17544 +         Select this for AMD Family 15h Steamroller processors.
17546 +         Enables -march=bdver3
17548 +config MEXCAVATOR
17549 +       bool "AMD Excavator"
17550 +       help
17551 +         Select this for AMD Family 15h Excavator processors.
17553 +         Enables -march=bdver4
17555 +config MZEN
17556 +       bool "AMD Zen"
17557 +       help
17558 +         Select this for AMD Family 17h Zen processors.
17560 +         Enables -march=znver1
17562 +config MZEN2
17563 +       bool "AMD Zen 2"
17564 +       help
17565 +         Select this for AMD Family 17h Zen 2 processors.
17567 +         Enables -march=znver2
17569 +config MZEN3
17570 +       bool "AMD Zen 3"
17571 +       depends on GCC_VERSION > 100300
17572 +       help
17573 +         Select this for AMD Family 19h Zen 3 processors.
17575 +         Enables -march=znver3
17577  config MCRUSOE
17578         bool "Crusoe"
17579         depends on X86_32
17580 @@ -270,7 +356,7 @@ config MPSC
17581           in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
17583  config MCORE2
17584 -       bool "Core 2/newer Xeon"
17585 +       bool "Intel Core 2"
17586         help
17588           Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
17589 @@ -278,6 +364,8 @@ config MCORE2
17590           family in /proc/cpuinfo. Newer ones have 6 and older ones 15
17591           (not a typo)
17593 +         Enables -march=core2
17595  config MATOM
17596         bool "Intel Atom"
17597         help
17598 @@ -287,6 +375,182 @@ config MATOM
17599           accordingly optimized code. Use a recent GCC with specific Atom
17600           support in order to fully benefit from selecting this option.
17602 +config MNEHALEM
17603 +       bool "Intel Nehalem"
17604 +       select X86_P6_NOP
17605 +       help
17607 +         Select this for 1st Gen Core processors in the Nehalem family.
17609 +         Enables -march=nehalem
17611 +config MWESTMERE
17612 +       bool "Intel Westmere"
17613 +       select X86_P6_NOP
17614 +       help
17616 +         Select this for the Intel Westmere formerly Nehalem-C family.
17618 +         Enables -march=westmere
17620 +config MSILVERMONT
17621 +       bool "Intel Silvermont"
17622 +       select X86_P6_NOP
17623 +       help
17625 +         Select this for the Intel Silvermont platform.
17627 +         Enables -march=silvermont
17629 +config MGOLDMONT
17630 +       bool "Intel Goldmont"
17631 +       select X86_P6_NOP
17632 +       help
17634 +         Select this for the Intel Goldmont platform including Apollo Lake and Denverton.
17636 +         Enables -march=goldmont
17638 +config MGOLDMONTPLUS
17639 +       bool "Intel Goldmont Plus"
17640 +       select X86_P6_NOP
17641 +       help
17643 +         Select this for the Intel Goldmont Plus platform including Gemini Lake.
17645 +         Enables -march=goldmont-plus
17647 +config MSANDYBRIDGE
17648 +       bool "Intel Sandy Bridge"
17649 +       select X86_P6_NOP
17650 +       help
17652 +         Select this for 2nd Gen Core processors in the Sandy Bridge family.
17654 +         Enables -march=sandybridge
17656 +config MIVYBRIDGE
17657 +       bool "Intel Ivy Bridge"
17658 +       select X86_P6_NOP
17659 +       help
17661 +         Select this for 3rd Gen Core processors in the Ivy Bridge family.
17663 +         Enables -march=ivybridge
17665 +config MHASWELL
17666 +       bool "Intel Haswell"
17667 +       select X86_P6_NOP
17668 +       help
17670 +         Select this for 4th Gen Core processors in the Haswell family.
17672 +         Enables -march=haswell
17674 +config MBROADWELL
17675 +       bool "Intel Broadwell"
17676 +       select X86_P6_NOP
17677 +       help
17679 +         Select this for 5th Gen Core processors in the Broadwell family.
17681 +         Enables -march=broadwell
17683 +config MSKYLAKE
17684 +       bool "Intel Skylake"
17685 +       select X86_P6_NOP
17686 +       help
17688 +         Select this for 6th Gen Core processors in the Skylake family.
17690 +         Enables -march=skylake
17692 +config MSKYLAKEX
17693 +       bool "Intel Skylake X"
17694 +       select X86_P6_NOP
17695 +       help
17697 +         Select this for 6th Gen Core processors in the Skylake X family.
17699 +         Enables -march=skylake-avx512
17701 +config MCANNONLAKE
17702 +       bool "Intel Cannon Lake"
17703 +       select X86_P6_NOP
17704 +       help
17706 +         Select this for 8th Gen Core processors
17708 +         Enables -march=cannonlake
17710 +config MICELAKE
17711 +       bool "Intel Ice Lake"
17712 +       select X86_P6_NOP
17713 +       help
17715 +         Select this for 10th Gen Core processors in the Ice Lake family.
17717 +         Enables -march=icelake-client
17719 +config MCASCADELAKE
17720 +       bool "Intel Cascade Lake"
17721 +       select X86_P6_NOP
17722 +       help
17724 +         Select this for Xeon processors in the Cascade Lake family.
17726 +         Enables -march=cascadelake
17728 +config MCOOPERLAKE
17729 +       bool "Intel Cooper Lake"
17730 +       depends on GCC_VERSION > 100100
17731 +       select X86_P6_NOP
17732 +       help
17734 +         Select this for Xeon processors in the Cooper Lake family.
17736 +         Enables -march=cooperlake
17738 +config MTIGERLAKE
17739 +       bool "Intel Tiger Lake"
17740 +       depends on GCC_VERSION > 100100
17741 +       select X86_P6_NOP
17742 +       help
17744 +         Select this for third-generation 10 nm process processors in the Tiger Lake family.
17746 +         Enables -march=tigerlake
17748 +config MSAPPHIRERAPIDS
17749 +       bool "Intel Sapphire Rapids"
17750 +       depends on GCC_VERSION > 110000
17751 +       select X86_P6_NOP
17752 +       help
17754 +         Select this for third-generation 10 nm process processors in the Sapphire Rapids family.
17756 +         Enables -march=sapphirerapids
17758 +config MROCKETLAKE
17759 +       bool "Intel Rocket Lake"
17760 +       depends on GCC_VERSION > 110000
17761 +       select X86_P6_NOP
17762 +       help
17764 +         Select this for eleventh-generation processors in the Rocket Lake family.
17766 +         Enables -march=rocketlake
17768 +config MALDERLAKE
17769 +       bool "Intel Alder Lake"
17770 +       depends on GCC_VERSION > 110000
17771 +       select X86_P6_NOP
17772 +       help
17774 +         Select this for twelfth-generation processors in the Alder Lake family.
17776 +         Enables -march=alderlake
17778  config GENERIC_CPU
17779         bool "Generic-x86-64"
17780         depends on X86_64
17781 @@ -294,6 +558,50 @@ config GENERIC_CPU
17782           Generic x86-64 CPU.
17783           Run equally well on all x86-64 CPUs.
17785 +config GENERIC_CPU2
17786 +       bool "Generic-x86-64-v2"
17787 +       depends on GCC_VERSION > 110000
17788 +       depends on X86_64
17789 +       help
17790 +         Generic x86-64 CPU.
17791 +         Run equally well on all x86-64 CPUs with min support of x86-64-v2.
17793 +config GENERIC_CPU3
17794 +       bool "Generic-x86-64-v3"
17795 +       depends on GCC_VERSION > 110000
17796 +       depends on X86_64
17797 +       help
17798 +         Generic x86-64-v3 CPU with v3 instructions.
17799 +         Run equally well on all x86-64 CPUs with min support of x86-64-v3.
17801 +config GENERIC_CPU4
17802 +       bool "Generic-x86-64-v4"
17803 +       depends on GCC_VERSION > 110000
17804 +       depends on X86_64
17805 +       help
17806 +         Generic x86-64 CPU with v4 instructions.
17807 +         Run equally well on all x86-64 CPUs with min support of x86-64-v4.
17809 +config MNATIVE_INTEL
17810 +       bool "Intel-Native optimizations autodetected by GCC"
17811 +       help
17813 +         GCC 4.2 and above support -march=native, which automatically detects
17814 +         the optimum settings to use based on your processor. Do NOT use this
17815 +         for AMD CPUs.  Intel Only!
17817 +         Enables -march=native
17819 +config MNATIVE_AMD
17820 +       bool "AMD-Native optimizations autodetected by GCC"
17821 +       help
17823 +         GCC 4.2 and above support -march=native, which automatically detects
17824 +         the optimum settings to use based on your processor. Do NOT use this
17825 +         for Intel CPUs.  AMD Only!
17827 +         Enables -march=native
17829  endchoice
17831  config X86_GENERIC
17832 @@ -318,7 +626,7 @@ config X86_INTERNODE_CACHE_SHIFT
17833  config X86_L1_CACHE_SHIFT
17834         int
17835         default "7" if MPENTIUM4 || MPSC
17836 -       default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
17837 +       default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 || GENERIC_CPU3 || GENERIC_CPU4
17838         default "4" if MELAN || M486SX || M486 || MGEODEGX1
17839         default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
17841 @@ -336,11 +644,11 @@ config X86_ALIGNMENT_16
17843  config X86_INTEL_USERCOPY
17844         def_bool y
17845 -       depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
17846 +       depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL
17848  config X86_USE_PPRO_CHECKSUM
17849         def_bool y
17850 -       depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
17851 +       depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
17853  config X86_USE_3DNOW
17854         def_bool y
17855 @@ -360,26 +668,26 @@ config X86_USE_3DNOW
17856  config X86_P6_NOP
17857         def_bool y
17858         depends on X86_64
17859 -       depends on (MCORE2 || MPENTIUM4 || MPSC)
17860 +       depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL)
17862  config X86_TSC
17863         def_bool y
17864 -       depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
17865 +       depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD) || X86_64
17867  config X86_CMPXCHG64
17868         def_bool y
17869 -       depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
17870 +       depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
17872  # this should be set for all -march=.. options where the compiler
17873  # generates cmov.
17874  config X86_CMOV
17875         def_bool y
17876 -       depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
17877 +       depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
17879  config X86_MINIMUM_CPU_FAMILY
17880         int
17881         default "64" if X86_64
17882 -       default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
17883 +       default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 ||  MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
17884         default "5" if X86_32 && X86_CMPXCHG64
17885         default "4"
17887 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
17888 index 9a85eae37b17..ee0cd507af8b 100644
17889 --- a/arch/x86/Makefile
17890 +++ b/arch/x86/Makefile
17891 @@ -33,6 +33,7 @@ REALMODE_CFLAGS += -ffreestanding
17892  REALMODE_CFLAGS += -fno-stack-protector
17893  REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
17894  REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
17895 +REALMODE_CFLAGS += $(CLANG_FLAGS)
17896  export REALMODE_CFLAGS
17898  # BITS is used as extension for files which are available in a 32 bit
17899 @@ -113,11 +114,48 @@ else
17900          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
17901          cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
17902          cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
17904 -        cflags-$(CONFIG_MCORE2) += \
17905 -                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
17906 -       cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
17907 -               $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
17908 +        cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3)
17909 +        cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
17910 +        cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
17911 +        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
17912 +        cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
17913 +        cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
17914 +        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
17915 +        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-mno-tbm)
17916 +        cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3)
17917 +        cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-mno-tbm)
17918 +        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
17919 +        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-mno-tbm)
17920 +        cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1)
17921 +        cflags-$(CONFIG_MZEN2) += $(call cc-option,-march=znver2)
17922 +        cflags-$(CONFIG_MZEN3) += $(call cc-option,-march=znver3)
17924 +        cflags-$(CONFIG_MNATIVE_INTEL) += $(call cc-option,-march=native)
17925 +        cflags-$(CONFIG_MNATIVE_AMD) += $(call cc-option,-march=native)
17926 +        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell)
17927 +        cflags-$(CONFIG_MCORE2) += $(call cc-option,-march=core2)
17928 +        cflags-$(CONFIG_MNEHALEM) += $(call cc-option,-march=nehalem)
17929 +        cflags-$(CONFIG_MWESTMERE) += $(call cc-option,-march=westmere)
17930 +        cflags-$(CONFIG_MSILVERMONT) += $(call cc-option,-march=silvermont)
17931 +        cflags-$(CONFIG_MGOLDMONT) += $(call cc-option,-march=goldmont)
17932 +        cflags-$(CONFIG_MGOLDMONTPLUS) += $(call cc-option,-march=goldmont-plus)
17933 +        cflags-$(CONFIG_MSANDYBRIDGE) += $(call cc-option,-march=sandybridge)
17934 +        cflags-$(CONFIG_MIVYBRIDGE) += $(call cc-option,-march=ivybridge)
17935 +        cflags-$(CONFIG_MHASWELL) += $(call cc-option,-march=haswell)
17936 +        cflags-$(CONFIG_MBROADWELL) += $(call cc-option,-march=broadwell)
17937 +        cflags-$(CONFIG_MSKYLAKE) += $(call cc-option,-march=skylake)
17938 +        cflags-$(CONFIG_MSKYLAKEX) += $(call cc-option,-march=skylake-avx512)
17939 +        cflags-$(CONFIG_MCANNONLAKE) += $(call cc-option,-march=cannonlake)
17940 +        cflags-$(CONFIG_MICELAKE) += $(call cc-option,-march=icelake-client)
17941 +        cflags-$(CONFIG_MCASCADELAKE) += $(call cc-option,-march=cascadelake)
17942 +        cflags-$(CONFIG_MCOOPERLAKE) += $(call cc-option,-march=cooperlake)
17943 +        cflags-$(CONFIG_MTIGERLAKE) += $(call cc-option,-march=tigerlake)
17944 +        cflags-$(CONFIG_MSAPPHIRERAPIDS) += $(call cc-option,-march=sapphirerapids)
17945 +        cflags-$(CONFIG_MROCKETLAKE) += $(call cc-option,-march=rocketlake)
17946 +        cflags-$(CONFIG_MALDERLAKE) += $(call cc-option,-march=alderlake)
17947 +        cflags-$(CONFIG_GENERIC_CPU2) += $(call cc-option,-march=x86-64-v2)
17948 +        cflags-$(CONFIG_GENERIC_CPU3) += $(call cc-option,-march=x86-64-v3)
17949 +        cflags-$(CONFIG_GENERIC_CPU4) += $(call cc-option,-march=x86-64-v4)
17950          cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
17951          KBUILD_CFLAGS += $(cflags-y)
17953 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
17954 index e0bc3988c3fa..6e5522aebbbd 100644
17955 --- a/arch/x86/boot/compressed/Makefile
17956 +++ b/arch/x86/boot/compressed/Makefile
17957 @@ -46,6 +46,7 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS
17958  # Disable relocation relaxation in case the link is not PIE.
17959  KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
17960  KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
17961 +KBUILD_CFLAGS += $(CLANG_FLAGS)
17963  # sev-es.c indirectly inludes inat-table.h which is generated during
17964  # compilation and stored in $(objtree). Add the directory to the includes so
17965 diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
17966 index aa561795efd1..a6dea4e8a082 100644
17967 --- a/arch/x86/boot/compressed/mem_encrypt.S
17968 +++ b/arch/x86/boot/compressed/mem_encrypt.S
17969 @@ -23,12 +23,6 @@ SYM_FUNC_START(get_sev_encryption_bit)
17970         push    %ecx
17971         push    %edx
17973 -       /* Check if running under a hypervisor */
17974 -       movl    $1, %eax
17975 -       cpuid
17976 -       bt      $31, %ecx               /* Check the hypervisor bit */
17977 -       jnc     .Lno_sev
17979         movl    $0x80000000, %eax       /* CPUID to check the highest leaf */
17980         cpuid
17981         cmpl    $0x8000001f, %eax       /* See if 0x8000001f is available */
17982 diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
17983 index 646da46e8d10..1dfb8af48a3c 100644
17984 --- a/arch/x86/crypto/poly1305_glue.c
17985 +++ b/arch/x86/crypto/poly1305_glue.c
17986 @@ -16,7 +16,7 @@
17987  #include <asm/simd.h>
17989  asmlinkage void poly1305_init_x86_64(void *ctx,
17990 -                                    const u8 key[POLY1305_KEY_SIZE]);
17991 +                                    const u8 key[POLY1305_BLOCK_SIZE]);
17992  asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp,
17993                                        const size_t len, const u32 padbit);
17994  asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
17995 @@ -81,7 +81,7 @@ static void convert_to_base2_64(void *ctx)
17996         state->is_base2_26 = 0;
17999 -static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE])
18000 +static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE])
18002         poly1305_init_x86_64(ctx, key);
18004 @@ -129,7 +129,7 @@ static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
18005                 poly1305_emit_avx(ctx, mac, nonce);
18008 -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
18009 +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
18011         poly1305_simd_init(&dctx->h, key);
18012         dctx->s[0] = get_unaligned_le32(&key[16]);
18013 diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
18014 index a1c9f496fca6..4d0111f44d79 100644
18015 --- a/arch/x86/entry/syscalls/syscall_32.tbl
18016 +++ b/arch/x86/entry/syscalls/syscall_32.tbl
18017 @@ -447,3 +447,7 @@
18018  440    i386    process_madvise         sys_process_madvise
18019  441    i386    epoll_pwait2            sys_epoll_pwait2                compat_sys_epoll_pwait2
18020  442    i386    mount_setattr           sys_mount_setattr
18021 +443    i386    futex_wait              sys_futex_wait
18022 +444    i386    futex_wake              sys_futex_wake
18023 +445    i386    futex_waitv             sys_futex_waitv                 compat_sys_futex_waitv
18024 +446    i386    futex_requeue           sys_futex_requeue               compat_sys_futex_requeue
18025 diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
18026 index 7bf01cbe582f..61c0b47365e3 100644
18027 --- a/arch/x86/entry/syscalls/syscall_64.tbl
18028 +++ b/arch/x86/entry/syscalls/syscall_64.tbl
18029 @@ -364,6 +364,10 @@
18030  440    common  process_madvise         sys_process_madvise
18031  441    common  epoll_pwait2            sys_epoll_pwait2
18032  442    common  mount_setattr           sys_mount_setattr
18033 +443    common  futex_wait              sys_futex_wait
18034 +444    common  futex_wake              sys_futex_wake
18035 +445    common  futex_waitv             sys_futex_waitv
18036 +446    common  futex_requeue           sys_futex_requeue
18039  # Due to a historical design error, certain syscalls are numbered differently
18040 diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
18041 index 1c7cfac7e64a..5264daa8859f 100644
18042 --- a/arch/x86/entry/vdso/vdso2c.h
18043 +++ b/arch/x86/entry/vdso/vdso2c.h
18044 @@ -35,7 +35,7 @@ static void BITSFUNC(extract)(const unsigned char *data, size_t data_len,
18045         if (offset + len > data_len)
18046                 fail("section to extract overruns input data");
18048 -       fprintf(outfile, "static const unsigned char %s[%lu] = {", name, len);
18049 +       fprintf(outfile, "static const unsigned char %s[%zu] = {", name, len);
18050         BITSFUNC(copy)(outfile, data + offset, len);
18051         fprintf(outfile, "\n};\n\n");
18053 diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
18054 index be50ef8572cc..6a98a7651621 100644
18055 --- a/arch/x86/events/amd/iommu.c
18056 +++ b/arch/x86/events/amd/iommu.c
18057 @@ -81,12 +81,12 @@ static struct attribute_group amd_iommu_events_group = {
18058  };
18060  struct amd_iommu_event_desc {
18061 -       struct kobj_attribute attr;
18062 +       struct device_attribute attr;
18063         const char *event;
18064  };
18066 -static ssize_t _iommu_event_show(struct kobject *kobj,
18067 -                               struct kobj_attribute *attr, char *buf)
18068 +static ssize_t _iommu_event_show(struct device *dev,
18069 +                               struct device_attribute *attr, char *buf)
18071         struct amd_iommu_event_desc *event =
18072                 container_of(attr, struct amd_iommu_event_desc, attr);
18073 diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
18074 index 7f014d450bc2..582c0ffb5e98 100644
18075 --- a/arch/x86/events/amd/uncore.c
18076 +++ b/arch/x86/events/amd/uncore.c
18077 @@ -275,14 +275,14 @@ static struct attribute_group amd_uncore_attr_group = {
18078  };
18080  #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)                        \
18081 -static ssize_t __uncore_##_var##_show(struct kobject *kobj,            \
18082 -                               struct kobj_attribute *attr,            \
18083 +static ssize_t __uncore_##_var##_show(struct device *dev,              \
18084 +                               struct device_attribute *attr,          \
18085                                 char *page)                             \
18086  {                                                                      \
18087         BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
18088         return sprintf(page, _format "\n");                             \
18089  }                                                                      \
18090 -static struct kobj_attribute format_attr_##_var =                      \
18091 +static struct device_attribute format_attr_##_var =                    \
18092         __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
18094  DEFINE_UNCORE_FORMAT_ATTR(event12,     event,          "config:0-7,32-35");
18095 diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
18096 index 5eb3bdf36a41..06b0789d61b9 100644
18097 --- a/arch/x86/include/asm/idtentry.h
18098 +++ b/arch/x86/include/asm/idtentry.h
18099 @@ -588,6 +588,21 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC,  xenpv_exc_machine_check);
18100  #endif
18102  /* NMI */
18104 +#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
18106 + * Special NOIST entry point for VMX which invokes this on the kernel
18107 + * stack. asm_exc_nmi() requires an IST to work correctly vs. the NMI
18108 + * 'executing' marker.
18109 + *
18110 + * On 32bit this just uses the regular NMI entry point because 32-bit does
18111 + * not have ISTs.
18112 + */
18113 +DECLARE_IDTENTRY(X86_TRAP_NMI,         exc_nmi_noist);
18114 +#else
18115 +#define asm_exc_nmi_noist              asm_exc_nmi
18116 +#endif
18118  DECLARE_IDTENTRY_NMI(X86_TRAP_NMI,     exc_nmi);
18119  #ifdef CONFIG_XEN_PV
18120  DECLARE_IDTENTRY_RAW(X86_TRAP_NMI,     xenpv_exc_nmi);
18121 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
18122 index 3768819693e5..eec2dcca2f39 100644
18123 --- a/arch/x86/include/asm/kvm_host.h
18124 +++ b/arch/x86/include/asm/kvm_host.h
18125 @@ -1753,6 +1753,7 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
18126                     unsigned long icr, int op_64_bit);
18128  void kvm_define_user_return_msr(unsigned index, u32 msr);
18129 +int kvm_probe_user_return_msr(u32 msr);
18130  int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
18132  u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
18133 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
18134 index a02c67291cfc..a6b5cfe1fc5a 100644
18135 --- a/arch/x86/include/asm/pgtable.h
18136 +++ b/arch/x86/include/asm/pgtable.h
18137 @@ -846,7 +846,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
18139  static inline int pmd_bad(pmd_t pmd)
18141 -       return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
18142 +       return ((pmd_flags(pmd) | _PAGE_ACCESSED) & ~_PAGE_USER) != _KERNPG_TABLE;
18145  static inline unsigned long pages_to_mb(unsigned long npg)
18146 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18147 index f1b9ed5efaa9..908bcaea1361 100644
18148 --- a/arch/x86/include/asm/processor.h
18149 +++ b/arch/x86/include/asm/processor.h
18150 @@ -804,8 +804,10 @@ DECLARE_PER_CPU(u64, msr_misc_features_shadow);
18152  #ifdef CONFIG_CPU_SUP_AMD
18153  extern u32 amd_get_nodes_per_socket(void);
18154 +extern u32 amd_get_highest_perf(void);
18155  #else
18156  static inline u32 amd_get_nodes_per_socket(void)       { return 0; }
18157 +static inline u32 amd_get_highest_perf(void)           { return 0; }
18158  #endif
18160  static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18161 diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
18162 index 75884d2cdec3..4e6a08d4c7e5 100644
18163 --- a/arch/x86/include/asm/vermagic.h
18164 +++ b/arch/x86/include/asm/vermagic.h
18165 @@ -17,6 +17,48 @@
18166  #define MODULE_PROC_FAMILY "586MMX "
18167  #elif defined CONFIG_MCORE2
18168  #define MODULE_PROC_FAMILY "CORE2 "
18169 +#elif defined CONFIG_MNATIVE_INTEL
18170 +#define MODULE_PROC_FAMILY "NATIVE_INTEL "
18171 +#elif defined CONFIG_MNATIVE_AMD
18172 +#define MODULE_PROC_FAMILY "NATIVE_AMD "
18173 +#elif defined CONFIG_MNEHALEM
18174 +#define MODULE_PROC_FAMILY "NEHALEM "
18175 +#elif defined CONFIG_MWESTMERE
18176 +#define MODULE_PROC_FAMILY "WESTMERE "
18177 +#elif defined CONFIG_MSILVERMONT
18178 +#define MODULE_PROC_FAMILY "SILVERMONT "
18179 +#elif defined CONFIG_MGOLDMONT
18180 +#define MODULE_PROC_FAMILY "GOLDMONT "
18181 +#elif defined CONFIG_MGOLDMONTPLUS
18182 +#define MODULE_PROC_FAMILY "GOLDMONTPLUS "
18183 +#elif defined CONFIG_MSANDYBRIDGE
18184 +#define MODULE_PROC_FAMILY "SANDYBRIDGE "
18185 +#elif defined CONFIG_MIVYBRIDGE
18186 +#define MODULE_PROC_FAMILY "IVYBRIDGE "
18187 +#elif defined CONFIG_MHASWELL
18188 +#define MODULE_PROC_FAMILY "HASWELL "
18189 +#elif defined CONFIG_MBROADWELL
18190 +#define MODULE_PROC_FAMILY "BROADWELL "
18191 +#elif defined CONFIG_MSKYLAKE
18192 +#define MODULE_PROC_FAMILY "SKYLAKE "
18193 +#elif defined CONFIG_MSKYLAKEX
18194 +#define MODULE_PROC_FAMILY "SKYLAKEX "
18195 +#elif defined CONFIG_MCANNONLAKE
18196 +#define MODULE_PROC_FAMILY "CANNONLAKE "
18197 +#elif defined CONFIG_MICELAKE
18198 +#define MODULE_PROC_FAMILY "ICELAKE "
18199 +#elif defined CONFIG_MCASCADELAKE
18200 +#define MODULE_PROC_FAMILY "CASCADELAKE "
18201 +#elif defined CONFIG_MCOOPERLAKE
18202 +#define MODULE_PROC_FAMILY "COOPERLAKE "
18203 +#elif defined CONFIG_MTIGERLAKE
18204 +#define MODULE_PROC_FAMILY "TIGERLAKE "
18205 +#elif defined CONFIG_MSAPPHIRERAPIDS
18206 +#define MODULE_PROC_FAMILY "SAPPHIRERAPIDS "
18207 +#elif defined CONFIG_ROCKETLAKE
18208 +#define MODULE_PROC_FAMILY "ROCKETLAKE "
18209 +#elif defined CONFIG_MALDERLAKE
18210 +#define MODULE_PROC_FAMILY "ALDERLAKE "
18211  #elif defined CONFIG_MATOM
18212  #define MODULE_PROC_FAMILY "ATOM "
18213  #elif defined CONFIG_M686
18214 @@ -35,6 +77,30 @@
18215  #define MODULE_PROC_FAMILY "K7 "
18216  #elif defined CONFIG_MK8
18217  #define MODULE_PROC_FAMILY "K8 "
18218 +#elif defined CONFIG_MK8SSE3
18219 +#define MODULE_PROC_FAMILY "K8SSE3 "
18220 +#elif defined CONFIG_MK10
18221 +#define MODULE_PROC_FAMILY "K10 "
18222 +#elif defined CONFIG_MBARCELONA
18223 +#define MODULE_PROC_FAMILY "BARCELONA "
18224 +#elif defined CONFIG_MBOBCAT
18225 +#define MODULE_PROC_FAMILY "BOBCAT "
18226 +#elif defined CONFIG_MBULLDOZER
18227 +#define MODULE_PROC_FAMILY "BULLDOZER "
18228 +#elif defined CONFIG_MPILEDRIVER
18229 +#define MODULE_PROC_FAMILY "PILEDRIVER "
18230 +#elif defined CONFIG_MSTEAMROLLER
18231 +#define MODULE_PROC_FAMILY "STEAMROLLER "
18232 +#elif defined CONFIG_MJAGUAR
18233 +#define MODULE_PROC_FAMILY "JAGUAR "
18234 +#elif defined CONFIG_MEXCAVATOR
18235 +#define MODULE_PROC_FAMILY "EXCAVATOR "
18236 +#elif defined CONFIG_MZEN
18237 +#define MODULE_PROC_FAMILY "ZEN "
18238 +#elif defined CONFIG_MZEN2
18239 +#define MODULE_PROC_FAMILY "ZEN2 "
18240 +#elif defined CONFIG_MZEN3
18241 +#define MODULE_PROC_FAMILY "ZEN3 "
18242  #elif defined CONFIG_MELAN
18243  #define MODULE_PROC_FAMILY "ELAN "
18244  #elif defined CONFIG_MCRUSOE
18245 diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
18246 index 52bc217ca8c3..c9ddd233e32f 100644
18247 --- a/arch/x86/kernel/apic/x2apic_uv_x.c
18248 +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
18249 @@ -1671,6 +1671,9 @@ static __init int uv_system_init_hubless(void)
18250         if (rc < 0)
18251                 return rc;
18253 +       /* Set section block size for current node memory */
18254 +       set_block_size();
18256         /* Create user access node */
18257         if (rc >= 0)
18258                 uv_setup_proc_files(1);
18259 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
18260 index 347a956f71ca..eedb2b320946 100644
18261 --- a/arch/x86/kernel/cpu/amd.c
18262 +++ b/arch/x86/kernel/cpu/amd.c
18263 @@ -1170,3 +1170,19 @@ void set_dr_addr_mask(unsigned long mask, int dr)
18264                 break;
18265         }
18268 +u32 amd_get_highest_perf(void)
18270 +       struct cpuinfo_x86 *c = &boot_cpu_data;
18272 +       if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
18273 +                              (c->x86_model >= 0x70 && c->x86_model < 0x80)))
18274 +               return 166;
18276 +       if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
18277 +                              (c->x86_model >= 0x40 && c->x86_model < 0x70)))
18278 +               return 166;
18280 +       return 255;
18282 +EXPORT_SYMBOL_GPL(amd_get_highest_perf);
18283 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
18284 index ab640abe26b6..1e576cc831c1 100644
18285 --- a/arch/x86/kernel/cpu/common.c
18286 +++ b/arch/x86/kernel/cpu/common.c
18287 @@ -1850,7 +1850,7 @@ static inline void setup_getcpu(int cpu)
18288         unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
18289         struct desc_struct d = { };
18291 -       if (boot_cpu_has(X86_FEATURE_RDTSCP))
18292 +       if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
18293                 write_rdtscp_aux(cpudata);
18295         /* Store CPU and node number in limit. */
18296 diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
18297 index b935e1b5f115..6a6318e9590c 100644
18298 --- a/arch/x86/kernel/cpu/microcode/core.c
18299 +++ b/arch/x86/kernel/cpu/microcode/core.c
18300 @@ -629,16 +629,16 @@ static ssize_t reload_store(struct device *dev,
18301         if (val != 1)
18302                 return size;
18304 -       tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
18305 -       if (tmp_ret != UCODE_NEW)
18306 -               return size;
18308         get_online_cpus();
18310         ret = check_online_cpus();
18311         if (ret)
18312                 goto put;
18314 +       tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
18315 +       if (tmp_ret != UCODE_NEW)
18316 +               goto put;
18318         mutex_lock(&microcode_mutex);
18319         ret = microcode_reload_late();
18320         mutex_unlock(&microcode_mutex);
18321 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
18322 index 22aad412f965..629c4994f165 100644
18323 --- a/arch/x86/kernel/e820.c
18324 +++ b/arch/x86/kernel/e820.c
18325 @@ -31,8 +31,8 @@
18326   *       - inform the user about the firmware's notion of memory layout
18327   *         via /sys/firmware/memmap
18328   *
18329 - *       - the hibernation code uses it to generate a kernel-independent MD5
18330 - *         fingerprint of the physical memory layout of a system.
18331 + *       - the hibernation code uses it to generate a kernel-independent CRC32
18332 + *         checksum of the physical memory layout of a system.
18333   *
18334   * - 'e820_table_kexec': a slightly modified (by the kernel) firmware version
18335   *   passed to us by the bootloader - the major difference between
18336 diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
18337 index df776cdca327..0bb9fe021bbe 100644
18338 --- a/arch/x86/kernel/kprobes/core.c
18339 +++ b/arch/x86/kernel/kprobes/core.c
18340 @@ -139,6 +139,8 @@ NOKPROBE_SYMBOL(synthesize_relcall);
18341  int can_boost(struct insn *insn, void *addr)
18343         kprobe_opcode_t opcode;
18344 +       insn_byte_t prefix;
18345 +       int i;
18347         if (search_exception_tables((unsigned long)addr))
18348                 return 0;       /* Page fault may occur on this address. */
18349 @@ -151,9 +153,14 @@ int can_boost(struct insn *insn, void *addr)
18350         if (insn->opcode.nbytes != 1)
18351                 return 0;
18353 -       /* Can't boost Address-size override prefix */
18354 -       if (unlikely(inat_is_address_size_prefix(insn->attr)))
18355 -               return 0;
18356 +       for_each_insn_prefix(insn, i, prefix) {
18357 +               insn_attr_t attr;
18359 +               attr = inat_get_opcode_attribute(prefix);
18360 +               /* Can't boost Address-size override prefix and CS override prefix */
18361 +               if (prefix == 0x2e || inat_is_address_size_prefix(attr))
18362 +                       return 0;
18363 +       }
18365         opcode = insn->opcode.bytes[0];
18367 @@ -178,8 +185,8 @@ int can_boost(struct insn *insn, void *addr)
18368                 /* clear and set flags are boostable */
18369                 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
18370         default:
18371 -               /* CS override prefix and call are not boostable */
18372 -               return (opcode != 0x2e && opcode != 0x9a);
18373 +               /* call is not boostable */
18374 +               return opcode != 0x9a;
18375         }
18378 @@ -448,7 +455,11 @@ static void set_resume_flags(struct kprobe *p, struct insn *insn)
18379                 break;
18380  #endif
18381         case 0xff:
18382 -               opcode = insn->opcode.bytes[1];
18383 +               /*
18384 +                * Since the 0xff is an extended group opcode, the instruction
18385 +                * is determined by the MOD/RM byte.
18386 +                */
18387 +               opcode = insn->modrm.bytes[0];
18388                 if ((opcode & 0x30) == 0x10) {
18389                         /*
18390                          * call absolute, indirect
18391 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
18392 index bf250a339655..2ef961cf4cfc 100644
18393 --- a/arch/x86/kernel/nmi.c
18394 +++ b/arch/x86/kernel/nmi.c
18395 @@ -524,6 +524,16 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
18396                 mds_user_clear_cpu_buffers();
18399 +#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
18400 +DEFINE_IDTENTRY_RAW(exc_nmi_noist)
18402 +       exc_nmi(regs);
18404 +#endif
18405 +#if IS_MODULE(CONFIG_KVM_INTEL)
18406 +EXPORT_SYMBOL_GPL(asm_exc_nmi_noist);
18407 +#endif
18409  void stop_nmi(void)
18411         ignore_nmis++;
18412 diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c
18413 index cdc04d091242..387b71669818 100644
18414 --- a/arch/x86/kernel/sev-es-shared.c
18415 +++ b/arch/x86/kernel/sev-es-shared.c
18416 @@ -186,7 +186,6 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
18417          * make it accessible to the hypervisor.
18418          *
18419          * In particular, check for:
18420 -        *      - Hypervisor CPUID bit
18421          *      - Availability of CPUID leaf 0x8000001f
18422          *      - SEV CPUID bit.
18423          *
18424 @@ -194,10 +193,7 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
18425          * can't be checked here.
18426          */
18428 -       if ((fn == 1 && !(regs->cx & BIT(31))))
18429 -               /* Hypervisor bit */
18430 -               goto fail;
18431 -       else if (fn == 0x80000000 && (regs->ax < 0x8000001f))
18432 +       if (fn == 0x80000000 && (regs->ax < 0x8000001f))
18433                 /* SEV leaf check */
18434                 goto fail;
18435         else if ((fn == 0x8000001f && !(regs->ax & BIT(1))))
18436 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
18437 index 16703c35a944..363b36bbd791 100644
18438 --- a/arch/x86/kernel/smpboot.c
18439 +++ b/arch/x86/kernel/smpboot.c
18440 @@ -458,29 +458,52 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
18441         return false;
18444 +static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
18446 +       if (c->phys_proc_id == o->phys_proc_id &&
18447 +           c->cpu_die_id == o->cpu_die_id)
18448 +               return true;
18449 +       return false;
18452  /*
18453 - * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs.
18454 + * Unlike the other levels, we do not enforce keeping a
18455 + * multicore group inside a NUMA node.  If this happens, we will
18456 + * discard the MC level of the topology later.
18457 + */
18458 +static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
18460 +       if (c->phys_proc_id == o->phys_proc_id)
18461 +               return true;
18462 +       return false;
18466 + * Define intel_cod_cpu[] for Intel COD (Cluster-on-Die) CPUs.
18467   *
18468 - * These are Intel CPUs that enumerate an LLC that is shared by
18469 - * multiple NUMA nodes. The LLC on these systems is shared for
18470 - * off-package data access but private to the NUMA node (half
18471 - * of the package) for on-package access.
18472 + * Any Intel CPU that has multiple nodes per package and does not
18473 + * match intel_cod_cpu[] has the SNC (Sub-NUMA Cluster) topology.
18474   *
18475 - * CPUID (the source of the information about the LLC) can only
18476 - * enumerate the cache as being shared *or* unshared, but not
18477 - * this particular configuration. The CPU in this case enumerates
18478 - * the cache to be shared across the entire package (spanning both
18479 - * NUMA nodes).
18480 + * When in SNC mode, these CPUs enumerate an LLC that is shared
18481 + * by multiple NUMA nodes. The LLC is shared for off-package data
18482 + * access but private to the NUMA node (half of the package) for
18483 + * on-package access. CPUID (the source of the information about
18484 + * the LLC) can only enumerate the cache as shared or unshared,
18485 + * but not this particular configuration.
18486   */
18488 -static const struct x86_cpu_id snc_cpu[] = {
18489 -       X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
18490 +static const struct x86_cpu_id intel_cod_cpu[] = {
18491 +       X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0),       /* COD */
18492 +       X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0),     /* COD */
18493 +       X86_MATCH_INTEL_FAM6_MODEL(ANY, 1),             /* SNC */
18494         {}
18495  };
18497  static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
18499 +       const struct x86_cpu_id *id = x86_match_cpu(intel_cod_cpu);
18500         int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
18501 +       bool intel_snc = id && id->driver_data;
18503         /* Do not match if we do not have a valid APICID for cpu: */
18504         if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
18505 @@ -495,32 +518,12 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
18506          * means 'c' does not share the LLC of 'o'. This will be
18507          * reflected to userspace.
18508          */
18509 -       if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu))
18510 +       if (match_pkg(c, o) && !topology_same_node(c, o) && intel_snc)
18511                 return false;
18513         return topology_sane(c, o, "llc");
18517 - * Unlike the other levels, we do not enforce keeping a
18518 - * multicore group inside a NUMA node.  If this happens, we will
18519 - * discard the MC level of the topology later.
18520 - */
18521 -static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
18523 -       if (c->phys_proc_id == o->phys_proc_id)
18524 -               return true;
18525 -       return false;
18528 -static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
18530 -       if ((c->phys_proc_id == o->phys_proc_id) &&
18531 -               (c->cpu_die_id == o->cpu_die_id))
18532 -               return true;
18533 -       return false;
18537  #if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
18538  static inline int x86_sched_itmt_flags(void)
18539 @@ -592,14 +595,23 @@ void set_cpu_sibling_map(int cpu)
18540         for_each_cpu(i, cpu_sibling_setup_mask) {
18541                 o = &cpu_data(i);
18543 +               if (match_pkg(c, o) && !topology_same_node(c, o))
18544 +                       x86_has_numa_in_package = true;
18546                 if ((i == cpu) || (has_smt && match_smt(c, o)))
18547                         link_mask(topology_sibling_cpumask, cpu, i);
18549                 if ((i == cpu) || (has_mp && match_llc(c, o)))
18550                         link_mask(cpu_llc_shared_mask, cpu, i);
18552 +               if ((i == cpu) || (has_mp && match_die(c, o)))
18553 +                       link_mask(topology_die_cpumask, cpu, i);
18554         }
18556 +       threads = cpumask_weight(topology_sibling_cpumask(cpu));
18557 +       if (threads > __max_smt_threads)
18558 +               __max_smt_threads = threads;
18560         /*
18561          * This needs a separate iteration over the cpus because we rely on all
18562          * topology_sibling_cpumask links to be set-up.
18563 @@ -613,8 +625,7 @@ void set_cpu_sibling_map(int cpu)
18564                         /*
18565                          *  Does this new cpu bringup a new core?
18566                          */
18567 -                       if (cpumask_weight(
18568 -                           topology_sibling_cpumask(cpu)) == 1) {
18569 +                       if (threads == 1) {
18570                                 /*
18571                                  * for each core in package, increment
18572                                  * the booted_cores for this new cpu
18573 @@ -631,16 +642,7 @@ void set_cpu_sibling_map(int cpu)
18574                         } else if (i != cpu && !c->booted_cores)
18575                                 c->booted_cores = cpu_data(i).booted_cores;
18576                 }
18577 -               if (match_pkg(c, o) && !topology_same_node(c, o))
18578 -                       x86_has_numa_in_package = true;
18580 -               if ((i == cpu) || (has_mp && match_die(c, o)))
18581 -                       link_mask(topology_die_cpumask, cpu, i);
18582         }
18584 -       threads = cpumask_weight(topology_sibling_cpumask(cpu));
18585 -       if (threads > __max_smt_threads)
18586 -               __max_smt_threads = threads;
18589  /* maps the cpu to the sched domain representing multi-core */
18590 @@ -2044,7 +2046,7 @@ static bool amd_set_max_freq_ratio(void)
18591                 return false;
18592         }
18594 -       highest_perf = perf_caps.highest_perf;
18595 +       highest_perf = amd_get_highest_perf();
18596         nominal_perf = perf_caps.nominal_perf;
18598         if (!highest_perf || !nominal_perf) {
18599 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
18600 index 6bd2f8b830e4..62f795352c02 100644
18601 --- a/arch/x86/kvm/cpuid.c
18602 +++ b/arch/x86/kvm/cpuid.c
18603 @@ -589,7 +589,8 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
18604         case 7:
18605                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
18606                 entry->eax = 0;
18607 -               entry->ecx = F(RDPID);
18608 +               if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
18609 +                       entry->ecx = F(RDPID);
18610                 ++array->nent;
18611         default:
18612                 break;
18613 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18614 index f7970ba6219f..8fc71e70857d 100644
18615 --- a/arch/x86/kvm/emulate.c
18616 +++ b/arch/x86/kvm/emulate.c
18617 @@ -4220,7 +4220,7 @@ static bool valid_cr(int nr)
18618         }
18621 -static int check_cr_read(struct x86_emulate_ctxt *ctxt)
18622 +static int check_cr_access(struct x86_emulate_ctxt *ctxt)
18624         if (!valid_cr(ctxt->modrm_reg))
18625                 return emulate_ud(ctxt);
18626 @@ -4228,80 +4228,6 @@ static int check_cr_read(struct x86_emulate_ctxt *ctxt)
18627         return X86EMUL_CONTINUE;
18630 -static int check_cr_write(struct x86_emulate_ctxt *ctxt)
18632 -       u64 new_val = ctxt->src.val64;
18633 -       int cr = ctxt->modrm_reg;
18634 -       u64 efer = 0;
18636 -       static u64 cr_reserved_bits[] = {
18637 -               0xffffffff00000000ULL,
18638 -               0, 0, 0, /* CR3 checked later */
18639 -               CR4_RESERVED_BITS,
18640 -               0, 0, 0,
18641 -               CR8_RESERVED_BITS,
18642 -       };
18644 -       if (!valid_cr(cr))
18645 -               return emulate_ud(ctxt);
18647 -       if (new_val & cr_reserved_bits[cr])
18648 -               return emulate_gp(ctxt, 0);
18650 -       switch (cr) {
18651 -       case 0: {
18652 -               u64 cr4;
18653 -               if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
18654 -                   ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
18655 -                       return emulate_gp(ctxt, 0);
18657 -               cr4 = ctxt->ops->get_cr(ctxt, 4);
18658 -               ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
18660 -               if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
18661 -                   !(cr4 & X86_CR4_PAE))
18662 -                       return emulate_gp(ctxt, 0);
18664 -               break;
18665 -               }
18666 -       case 3: {
18667 -               u64 rsvd = 0;
18669 -               ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
18670 -               if (efer & EFER_LMA) {
18671 -                       u64 maxphyaddr;
18672 -                       u32 eax, ebx, ecx, edx;
18674 -                       eax = 0x80000008;
18675 -                       ecx = 0;
18676 -                       if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
18677 -                                                &edx, true))
18678 -                               maxphyaddr = eax & 0xff;
18679 -                       else
18680 -                               maxphyaddr = 36;
18681 -                       rsvd = rsvd_bits(maxphyaddr, 63);
18682 -                       if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
18683 -                               rsvd &= ~X86_CR3_PCID_NOFLUSH;
18684 -               }
18686 -               if (new_val & rsvd)
18687 -                       return emulate_gp(ctxt, 0);
18689 -               break;
18690 -               }
18691 -       case 4: {
18692 -               ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
18694 -               if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
18695 -                       return emulate_gp(ctxt, 0);
18697 -               break;
18698 -               }
18699 -       }
18701 -       return X86EMUL_CONTINUE;
18704  static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
18706         unsigned long dr7;
18707 @@ -4576,7 +4502,7 @@ static const struct opcode group8[] = {
18708   * from the register case of group9.
18709   */
18710  static const struct gprefix pfx_0f_c7_7 = {
18711 -       N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
18712 +       N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
18713  };
18716 @@ -4841,10 +4767,10 @@ static const struct opcode twobyte_table[256] = {
18717         D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
18718         D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
18719         /* 0x20 - 0x2F */
18720 -       DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
18721 +       DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
18722         DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
18723         IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
18724 -                                               check_cr_write),
18725 +                                               check_cr_access),
18726         IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
18727                                                 check_dr_write),
18728         N, N, N, N,
18729 diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
18730 index 0d359115429a..f016838faedd 100644
18731 --- a/arch/x86/kvm/kvm_emulate.h
18732 +++ b/arch/x86/kvm/kvm_emulate.h
18733 @@ -468,6 +468,7 @@ enum x86_intercept {
18734         x86_intercept_clgi,
18735         x86_intercept_skinit,
18736         x86_intercept_rdtscp,
18737 +       x86_intercept_rdpid,
18738         x86_intercept_icebp,
18739         x86_intercept_wbinvd,
18740         x86_intercept_monitor,
18741 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18742 index cc369b9ad8f1..fa023f3feb25 100644
18743 --- a/arch/x86/kvm/lapic.c
18744 +++ b/arch/x86/kvm/lapic.c
18745 @@ -296,6 +296,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
18747                 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
18748         }
18750 +       /* Check if there are APF page ready requests pending */
18751 +       if (enabled)
18752 +               kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
18755  static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
18756 @@ -1909,8 +1913,8 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
18757         if (!apic->lapic_timer.hv_timer_in_use)
18758                 goto out;
18759         WARN_ON(rcuwait_active(&vcpu->wait));
18760 -       cancel_hv_timer(apic);
18761         apic_timer_expired(apic, false);
18762 +       cancel_hv_timer(apic);
18764         if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
18765                 advance_periodic_target_expiration(apic);
18766 @@ -2261,6 +2265,8 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
18767                 if (value & MSR_IA32_APICBASE_ENABLE) {
18768                         kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
18769                         static_branch_slow_dec_deferred(&apic_hw_disabled);
18770 +                       /* Check if there are APF page ready requests pending */
18771 +                       kvm_make_request(KVM_REQ_APF_READY, vcpu);
18772                 } else {
18773                         static_branch_inc(&apic_hw_disabled.key);
18774                         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
18775 diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
18776 index 951dae4e7175..cd0faa187674 100644
18777 --- a/arch/x86/kvm/mmu/mmu.c
18778 +++ b/arch/x86/kvm/mmu/mmu.c
18779 @@ -3193,14 +3193,14 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
18780                 if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
18781                     (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
18782                         mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
18783 -               } else {
18784 +               } else if (mmu->pae_root) {
18785                         for (i = 0; i < 4; ++i)
18786                                 if (mmu->pae_root[i] != 0)
18787                                         mmu_free_root_page(kvm,
18788                                                            &mmu->pae_root[i],
18789                                                            &invalid_list);
18790 -                       mmu->root_hpa = INVALID_PAGE;
18791                 }
18792 +               mmu->root_hpa = INVALID_PAGE;
18793                 mmu->root_pgd = 0;
18794         }
18796 @@ -3312,9 +3312,23 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
18797          * the shadow page table may be a PAE or a long mode page table.
18798          */
18799         pm_mask = PT_PRESENT_MASK;
18800 -       if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
18801 +       if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
18802                 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
18804 +               /*
18805 +                * Allocate the page for the PDPTEs when shadowing 32-bit NPT
18806 +                * with 64-bit only when needed.  Unlike 32-bit NPT, it doesn't
18807 +                * need to be in low mem.  See also lm_root below.
18808 +                */
18809 +               if (!vcpu->arch.mmu->pae_root) {
18810 +                       WARN_ON_ONCE(!tdp_enabled);
18812 +                       vcpu->arch.mmu->pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
18813 +                       if (!vcpu->arch.mmu->pae_root)
18814 +                               return -ENOMEM;
18815 +               }
18816 +       }
18818         for (i = 0; i < 4; ++i) {
18819                 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
18820                 if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
18821 @@ -3337,21 +3351,19 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
18822         vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
18824         /*
18825 -        * If we shadow a 32 bit page table with a long mode page
18826 -        * table we enter this path.
18827 +        * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
18828 +        * tables are allocated and initialized at MMU creation as there is no
18829 +        * equivalent level in the guest's NPT to shadow.  Allocate the tables
18830 +        * on demand, as running a 32-bit L1 VMM is very rare.  The PDP is
18831 +        * handled above (to share logic with PAE), deal with the PML4 here.
18832          */
18833         if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
18834                 if (vcpu->arch.mmu->lm_root == NULL) {
18835 -                       /*
18836 -                        * The additional page necessary for this is only
18837 -                        * allocated on demand.
18838 -                        */
18840                         u64 *lm_root;
18842                         lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
18843 -                       if (lm_root == NULL)
18844 -                               return 1;
18845 +                       if (!lm_root)
18846 +                               return -ENOMEM;
18848                         lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
18850 @@ -3653,6 +3665,14 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
18851         struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
18852         bool async;
18854 +       /*
18855 +        * Retry the page fault if the gfn hit a memslot that is being deleted
18856 +        * or moved.  This ensures any existing SPTEs for the old memslot will
18857 +        * be zapped before KVM inserts a new MMIO SPTE for the gfn.
18858 +        */
18859 +       if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
18860 +               return true;
18862         /* Don't expose private memslots to L2. */
18863         if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
18864                 *pfn = KVM_PFN_NOSLOT;
18865 @@ -4615,12 +4635,17 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
18866         struct kvm_mmu *context = &vcpu->arch.guest_mmu;
18867         union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
18869 -       context->shadow_root_level = new_role.base.level;
18871         __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);
18873 -       if (new_role.as_u64 != context->mmu_role.as_u64)
18874 +       if (new_role.as_u64 != context->mmu_role.as_u64) {
18875                 shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
18877 +               /*
18878 +                * Override the level set by the common init helper, nested TDP
18879 +                * always uses the host's TDP configuration.
18880 +                */
18881 +               context->shadow_root_level = new_role.base.level;
18882 +       }
18884  EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
18886 @@ -5240,9 +5265,11 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
18887          * while the PDP table is a per-vCPU construct that's allocated at MMU
18888          * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
18889          * x86_64.  Therefore we need to allocate the PDP table in the first
18890 -        * 4GB of memory, which happens to fit the DMA32 zone.  Except for
18891 -        * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
18892 -        * skip allocating the PDP table.
18893 +        * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
18894 +        * generally doesn't use PAE paging and can skip allocating the PDP
18895 +        * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
18896 +        * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
18897 +        * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
18898          */
18899         if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
18900                 return 0;
18901 diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
18902 index 874ea309279f..dbc6214d69de 100644
18903 --- a/arch/x86/kvm/svm/sev.c
18904 +++ b/arch/x86/kvm/svm/sev.c
18905 @@ -87,7 +87,7 @@ static bool __sev_recycle_asids(int min_asid, int max_asid)
18906         return true;
18909 -static int sev_asid_new(struct kvm_sev_info *sev)
18910 +static int sev_asid_new(bool es_active)
18912         int pos, min_asid, max_asid;
18913         bool retry = true;
18914 @@ -98,8 +98,8 @@ static int sev_asid_new(struct kvm_sev_info *sev)
18915          * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
18916          * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
18917          */
18918 -       min_asid = sev->es_active ? 0 : min_sev_asid - 1;
18919 -       max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
18920 +       min_asid = es_active ? 0 : min_sev_asid - 1;
18921 +       max_asid = es_active ? min_sev_asid - 1 : max_sev_asid;
18922  again:
18923         pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid);
18924         if (pos >= max_asid) {
18925 @@ -179,13 +179,17 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
18926  static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
18928         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
18929 +       bool es_active = argp->id == KVM_SEV_ES_INIT;
18930         int asid, ret;
18932 +       if (kvm->created_vcpus)
18933 +               return -EINVAL;
18935         ret = -EBUSY;
18936         if (unlikely(sev->active))
18937                 return ret;
18939 -       asid = sev_asid_new(sev);
18940 +       asid = sev_asid_new(es_active);
18941         if (asid < 0)
18942                 return ret;
18944 @@ -194,6 +198,7 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
18945                 goto e_free;
18947         sev->active = true;
18948 +       sev->es_active = es_active;
18949         sev->asid = asid;
18950         INIT_LIST_HEAD(&sev->regions_list);
18952 @@ -204,16 +209,6 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
18953         return ret;
18956 -static int sev_es_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
18958 -       if (!sev_es)
18959 -               return -ENOTTY;
18961 -       to_kvm_svm(kvm)->sev_info.es_active = true;
18963 -       return sev_guest_init(kvm, argp);
18966  static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
18968         struct sev_data_activate *data;
18969 @@ -564,6 +559,7 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
18971         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
18972         struct sev_data_launch_update_vmsa *vmsa;
18973 +       struct kvm_vcpu *vcpu;
18974         int i, ret;
18976         if (!sev_es_guest(kvm))
18977 @@ -573,8 +569,8 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
18978         if (!vmsa)
18979                 return -ENOMEM;
18981 -       for (i = 0; i < kvm->created_vcpus; i++) {
18982 -               struct vcpu_svm *svm = to_svm(kvm->vcpus[i]);
18983 +       kvm_for_each_vcpu(i, vcpu, kvm) {
18984 +               struct vcpu_svm *svm = to_svm(vcpu);
18986                 /* Perform some pre-encryption checks against the VMSA */
18987                 ret = sev_es_sync_vmsa(svm);
18988 @@ -1127,12 +1123,15 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
18989         mutex_lock(&kvm->lock);
18991         switch (sev_cmd.id) {
18992 +       case KVM_SEV_ES_INIT:
18993 +               if (!sev_es) {
18994 +                       r = -ENOTTY;
18995 +                       goto out;
18996 +               }
18997 +               fallthrough;
18998         case KVM_SEV_INIT:
18999                 r = sev_guest_init(kvm, &sev_cmd);
19000                 break;
19001 -       case KVM_SEV_ES_INIT:
19002 -               r = sev_es_guest_init(kvm, &sev_cmd);
19003 -               break;
19004         case KVM_SEV_LAUNCH_START:
19005                 r = sev_launch_start(kvm, &sev_cmd);
19006                 break;
19007 @@ -1349,8 +1348,11 @@ void __init sev_hardware_setup(void)
19008                 goto out;
19010         sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
19011 -       if (!sev_reclaim_asid_bitmap)
19012 +       if (!sev_reclaim_asid_bitmap) {
19013 +               bitmap_free(sev_asid_bitmap);
19014 +               sev_asid_bitmap = NULL;
19015                 goto out;
19016 +       }
19018         pr_info("SEV supported: %u ASIDs\n", max_sev_asid - min_sev_asid + 1);
19019         sev_supported = true;
19020 @@ -1666,7 +1668,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
19021         return -EINVAL;
19024 -static void pre_sev_es_run(struct vcpu_svm *svm)
19025 +void sev_es_unmap_ghcb(struct vcpu_svm *svm)
19027         if (!svm->ghcb)
19028                 return;
19029 @@ -1702,9 +1704,6 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
19030         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
19031         int asid = sev_get_asid(svm->vcpu.kvm);
19033 -       /* Perform any SEV-ES pre-run actions */
19034 -       pre_sev_es_run(svm);
19036         /* Assign the asid allocated with this SEV guest */
19037         svm->asid = asid;
19039 @@ -2104,5 +2103,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
19040          * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
19041          * non-zero value.
19042          */
19043 +       if (!svm->ghcb)
19044 +               return;
19046         ghcb_set_sw_exit_info_2(svm->ghcb, 1);
19048 diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
19049 index 58a45bb139f8..48ee3deab64b 100644
19050 --- a/arch/x86/kvm/svm/svm.c
19051 +++ b/arch/x86/kvm/svm/svm.c
19052 @@ -564,9 +564,8 @@ static int svm_cpu_init(int cpu)
19053         clear_page(page_address(sd->save_area));
19055         if (svm_sev_enabled()) {
19056 -               sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
19057 -                                             sizeof(void *),
19058 -                                             GFP_KERNEL);
19059 +               sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *),
19060 +                                       GFP_KERNEL);
19061                 if (!sd->sev_vmcbs)
19062                         goto free_save_area;
19063         }
19064 @@ -969,21 +968,6 @@ static __init int svm_hardware_setup(void)
19065                 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
19066         }
19068 -       if (IS_ENABLED(CONFIG_KVM_AMD_SEV) && sev) {
19069 -               sev_hardware_setup();
19070 -       } else {
19071 -               sev = false;
19072 -               sev_es = false;
19073 -       }
19075 -       svm_adjust_mmio_mask();
19077 -       for_each_possible_cpu(cpu) {
19078 -               r = svm_cpu_init(cpu);
19079 -               if (r)
19080 -                       goto err;
19081 -       }
19083         /*
19084          * KVM's MMU doesn't support using 2-level paging for itself, and thus
19085          * NPT isn't supported if the host is using 2-level paging since host
19086 @@ -998,6 +982,21 @@ static __init int svm_hardware_setup(void)
19087         kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G);
19088         pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
19090 +       if (IS_ENABLED(CONFIG_KVM_AMD_SEV) && sev && npt_enabled) {
19091 +               sev_hardware_setup();
19092 +       } else {
19093 +               sev = false;
19094 +               sev_es = false;
19095 +       }
19097 +       svm_adjust_mmio_mask();
19099 +       for_each_possible_cpu(cpu) {
19100 +               r = svm_cpu_init(cpu);
19101 +               if (r)
19102 +                       goto err;
19103 +       }
19105         if (nrips) {
19106                 if (!boot_cpu_has(X86_FEATURE_NRIPS))
19107                         nrips = false;
19108 @@ -1417,6 +1416,9 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
19109         struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
19110         unsigned int i;
19112 +       if (sev_es_guest(vcpu->kvm))
19113 +               sev_es_unmap_ghcb(svm);
19115         if (svm->guest_state_loaded)
19116                 return;
19118 @@ -1898,7 +1900,7 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
19120  static int pf_interception(struct vcpu_svm *svm)
19122 -       u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
19123 +       u64 fault_address = svm->vmcb->control.exit_info_2;
19124         u64 error_code = svm->vmcb->control.exit_info_1;
19126         return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
19127 @@ -2738,6 +2740,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
19128         case MSR_TSC_AUX:
19129                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
19130                         return 1;
19131 +               if (!msr_info->host_initiated &&
19132 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
19133 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
19134 +                       return 1;
19135                 msr_info->data = svm->tsc_aux;
19136                 break;
19137         /*
19138 @@ -2809,7 +2815,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
19139  static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
19141         struct vcpu_svm *svm = to_svm(vcpu);
19142 -       if (!sev_es_guest(svm->vcpu.kvm) || !err)
19143 +       if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->ghcb))
19144                 return kvm_complete_insn_gp(&svm->vcpu, err);
19146         ghcb_set_sw_exit_info_1(svm->ghcb, 1);
19147 @@ -2946,6 +2952,11 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
19148                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
19149                         return 1;
19151 +               if (!msr->host_initiated &&
19152 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
19153 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
19154 +                       return 1;
19156                 /*
19157                  * This is rare, so we update the MSR here instead of using
19158                  * direct_access_msrs.  Doing that would require a rdmsr in
19159 diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
19160 index 39e071fdab0c..98da0b91f273 100644
19161 --- a/arch/x86/kvm/svm/svm.h
19162 +++ b/arch/x86/kvm/svm/svm.h
19163 @@ -571,6 +571,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm);
19164  void sev_es_create_vcpu(struct vcpu_svm *svm);
19165  void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
19166  void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
19167 +void sev_es_unmap_ghcb(struct vcpu_svm *svm);
19169  /* vmenter.S */
19171 diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
19172 index bcca0b80e0d0..4ba2a43e188b 100644
19173 --- a/arch/x86/kvm/vmx/nested.c
19174 +++ b/arch/x86/kvm/vmx/nested.c
19175 @@ -619,6 +619,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
19176         }
19178         /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
19179 +#ifdef CONFIG_X86_64
19180         nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
19181                                              MSR_FS_BASE, MSR_TYPE_RW);
19183 @@ -627,6 +628,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
19185         nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
19186                                              MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
19187 +#endif
19189         /*
19190          * Checking the L0->L1 bitmap is trying to verify two things:
19191 @@ -3098,15 +3100,8 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
19192                         nested_vmx_handle_enlightened_vmptrld(vcpu, false);
19194                 if (evmptrld_status == EVMPTRLD_VMFAIL ||
19195 -                   evmptrld_status == EVMPTRLD_ERROR) {
19196 -                       pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
19197 -                                            __func__);
19198 -                       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
19199 -                       vcpu->run->internal.suberror =
19200 -                               KVM_INTERNAL_ERROR_EMULATION;
19201 -                       vcpu->run->internal.ndata = 0;
19202 +                   evmptrld_status == EVMPTRLD_ERROR)
19203                         return false;
19204 -               }
19205         }
19207         return true;
19208 @@ -3194,8 +3189,16 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
19210  static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
19212 -       if (!nested_get_evmcs_page(vcpu))
19213 +       if (!nested_get_evmcs_page(vcpu)) {
19214 +               pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
19215 +                                    __func__);
19216 +               vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
19217 +               vcpu->run->internal.suberror =
19218 +                       KVM_INTERNAL_ERROR_EMULATION;
19219 +               vcpu->run->internal.ndata = 0;
19221                 return false;
19222 +       }
19224         if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
19225                 return false;
19226 @@ -4422,7 +4425,15 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
19227         /* trying to cancel vmlaunch/vmresume is a bug */
19228         WARN_ON_ONCE(vmx->nested.nested_run_pending);
19230 -       kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
19231 +       if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
19232 +               /*
19233 +                * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
19234 +                * Enlightened VMCS after migration and we still need to
19235 +                * do that when something is forcing L2->L1 exit prior to
19236 +                * the first L2 run.
19237 +                */
19238 +               (void)nested_get_evmcs_page(vcpu);
19239 +       }
19241         /* Service the TLB flush request for L2 before switching to L1. */
19242         if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
19243 @@ -4601,9 +4612,9 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
19244         else if (addr_size == 0)
19245                 off = (gva_t)sign_extend64(off, 15);
19246         if (base_is_valid)
19247 -               off += kvm_register_read(vcpu, base_reg);
19248 +               off += kvm_register_readl(vcpu, base_reg);
19249         if (index_is_valid)
19250 -               off += kvm_register_read(vcpu, index_reg) << scaling;
19251 +               off += kvm_register_readl(vcpu, index_reg) << scaling;
19252         vmx_get_segment(vcpu, &s, seg_reg);
19254         /*
19255 @@ -5479,16 +5490,11 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
19256                 if (!nested_vmx_check_eptp(vcpu, new_eptp))
19257                         return 1;
19259 -               kvm_mmu_unload(vcpu);
19260                 mmu->ept_ad = accessed_dirty;
19261                 mmu->mmu_role.base.ad_disabled = !accessed_dirty;
19262                 vmcs12->ept_pointer = new_eptp;
19263 -               /*
19264 -                * TODO: Check what's the correct approach in case
19265 -                * mmu reload fails. Currently, we just let the next
19266 -                * reload potentially fail
19267 -                */
19268 -               kvm_mmu_reload(vcpu);
19270 +               kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
19271         }
19273         return 0;
19274 @@ -5717,7 +5723,7 @@ static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
19276         /* Decode instruction info and find the field to access */
19277         vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
19278 -       field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
19279 +       field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
19281         /* Out-of-range fields always cause a VM exit from L2 to L1 */
19282         if (field >> 15)
19283 diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
19284 index 29b40e092d13..f68ed9a1abcc 100644
19285 --- a/arch/x86/kvm/vmx/vmx.c
19286 +++ b/arch/x86/kvm/vmx/vmx.c
19287 @@ -36,6 +36,7 @@
19288  #include <asm/debugreg.h>
19289  #include <asm/desc.h>
19290  #include <asm/fpu/internal.h>
19291 +#include <asm/idtentry.h>
19292  #include <asm/io.h>
19293  #include <asm/irq_remapping.h>
19294  #include <asm/kexec.h>
19295 @@ -156,9 +157,11 @@ static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
19296         MSR_IA32_SPEC_CTRL,
19297         MSR_IA32_PRED_CMD,
19298         MSR_IA32_TSC,
19299 +#ifdef CONFIG_X86_64
19300         MSR_FS_BASE,
19301         MSR_GS_BASE,
19302         MSR_KERNEL_GS_BASE,
19303 +#endif
19304         MSR_IA32_SYSENTER_CS,
19305         MSR_IA32_SYSENTER_ESP,
19306         MSR_IA32_SYSENTER_EIP,
19307 @@ -1731,7 +1734,8 @@ static void setup_msrs(struct vcpu_vmx *vmx)
19308         if (update_transition_efer(vmx))
19309                 vmx_setup_uret_msr(vmx, MSR_EFER);
19311 -       if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
19312 +       if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)  ||
19313 +           guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDPID))
19314                 vmx_setup_uret_msr(vmx, MSR_TSC_AUX);
19316         vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL);
19317 @@ -1930,7 +1934,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
19318                 break;
19319         case MSR_TSC_AUX:
19320                 if (!msr_info->host_initiated &&
19321 -                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
19322 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
19323 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
19324                         return 1;
19325                 goto find_uret_msr;
19326         case MSR_IA32_DEBUGCTLMSR:
19327 @@ -2227,7 +2232,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
19328                 break;
19329         case MSR_TSC_AUX:
19330                 if (!msr_info->host_initiated &&
19331 -                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
19332 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
19333 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
19334                         return 1;
19335                 /* Check reserved bit, higher 32 bits should be zero */
19336                 if ((data >> 32) != 0)
19337 @@ -4299,7 +4305,23 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
19338                                                   xsaves_enabled, false);
19339         }
19341 -       vmx_adjust_sec_exec_feature(vmx, &exec_control, rdtscp, RDTSCP);
19342 +       /*
19343 +        * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either
19344 +        * feature is exposed to the guest.  This creates a virtualization hole
19345 +        * if both are supported in hardware but only one is exposed to the
19346 +        * guest, but letting the guest execute RDTSCP or RDPID when either one
19347 +        * is advertised is preferable to emulating the advertised instruction
19348 +        * in KVM on #UD, and obviously better than incorrectly injecting #UD.
19349 +        */
19350 +       if (cpu_has_vmx_rdtscp()) {
19351 +               bool rdpid_or_rdtscp_enabled =
19352 +                       guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
19353 +                       guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
19355 +               vmx_adjust_secondary_exec_control(vmx, &exec_control,
19356 +                                                 SECONDARY_EXEC_ENABLE_RDTSCP,
19357 +                                                 rdpid_or_rdtscp_enabled, false);
19358 +       }
19359         vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
19361         vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
19362 @@ -5062,12 +5084,12 @@ static int handle_cr(struct kvm_vcpu *vcpu)
19363                 case 3:
19364                         WARN_ON_ONCE(enable_unrestricted_guest);
19365                         val = kvm_read_cr3(vcpu);
19366 -                       kvm_register_write(vcpu, reg, val);
19367 +                       kvm_register_writel(vcpu, reg, val);
19368                         trace_kvm_cr_read(cr, val);
19369                         return kvm_skip_emulated_instruction(vcpu);
19370                 case 8:
19371                         val = kvm_get_cr8(vcpu);
19372 -                       kvm_register_write(vcpu, reg, val);
19373 +                       kvm_register_writel(vcpu, reg, val);
19374                         trace_kvm_cr_read(cr, val);
19375                         return kvm_skip_emulated_instruction(vcpu);
19376                 }
19377 @@ -5140,7 +5162,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
19378                 unsigned long val;
19380                 kvm_get_dr(vcpu, dr, &val);
19381 -               kvm_register_write(vcpu, reg, val);
19382 +               kvm_register_writel(vcpu, reg, val);
19383                 err = 0;
19384         } else {
19385                 err = kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg));
19386 @@ -5792,7 +5814,6 @@ void dump_vmcs(void)
19387         u32 vmentry_ctl, vmexit_ctl;
19388         u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
19389         unsigned long cr4;
19390 -       u64 efer;
19392         if (!dump_invalid_vmcs) {
19393                 pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
19394 @@ -5804,7 +5825,6 @@ void dump_vmcs(void)
19395         cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
19396         pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
19397         cr4 = vmcs_readl(GUEST_CR4);
19398 -       efer = vmcs_read64(GUEST_IA32_EFER);
19399         secondary_exec_control = 0;
19400         if (cpu_has_secondary_exec_ctrls())
19401                 secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
19402 @@ -5816,9 +5836,7 @@ void dump_vmcs(void)
19403         pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
19404                cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
19405         pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
19406 -       if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
19407 -           (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
19408 -       {
19409 +       if (cpu_has_vmx_ept()) {
19410                 pr_err("PDPTR0 = 0x%016llx  PDPTR1 = 0x%016llx\n",
19411                        vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
19412                 pr_err("PDPTR2 = 0x%016llx  PDPTR3 = 0x%016llx\n",
19413 @@ -5844,7 +5862,8 @@ void dump_vmcs(void)
19414         if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
19415             (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
19416                 pr_err("EFER =     0x%016llx  PAT = 0x%016llx\n",
19417 -                      efer, vmcs_read64(GUEST_IA32_PAT));
19418 +                      vmcs_read64(GUEST_IA32_EFER),
19419 +                      vmcs_read64(GUEST_IA32_PAT));
19420         pr_err("DebugCtl = 0x%016llx  DebugExceptions = 0x%016lx\n",
19421                vmcs_read64(GUEST_IA32_DEBUGCTL),
19422                vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
19423 @@ -6395,18 +6414,17 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
19425  void vmx_do_interrupt_nmi_irqoff(unsigned long entry);
19427 -static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
19428 +static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu,
19429 +                                       unsigned long entry)
19431 -       unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
19432 -       gate_desc *desc = (gate_desc *)host_idt_base + vector;
19434         kvm_before_interrupt(vcpu);
19435 -       vmx_do_interrupt_nmi_irqoff(gate_offset(desc));
19436 +       vmx_do_interrupt_nmi_irqoff(entry);
19437         kvm_after_interrupt(vcpu);
19440  static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
19442 +       const unsigned long nmi_entry = (unsigned long)asm_exc_nmi_noist;
19443         u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
19445         /* if exit due to PF check for async PF */
19446 @@ -6417,18 +6435,20 @@ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
19447                 kvm_machine_check();
19448         /* We need to handle NMIs before interrupts are enabled */
19449         else if (is_nmi(intr_info))
19450 -               handle_interrupt_nmi_irqoff(&vmx->vcpu, intr_info);
19451 +               handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry);
19454  static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
19456         u32 intr_info = vmx_get_intr_info(vcpu);
19457 +       unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
19458 +       gate_desc *desc = (gate_desc *)host_idt_base + vector;
19460         if (WARN_ONCE(!is_external_intr(intr_info),
19461             "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info))
19462                 return;
19464 -       handle_interrupt_nmi_irqoff(vcpu, intr_info);
19465 +       handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
19468  static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
19469 @@ -6894,12 +6914,9 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
19471         for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) {
19472                 u32 index = vmx_uret_msrs_list[i];
19473 -               u32 data_low, data_high;
19474                 int j = vmx->nr_uret_msrs;
19476 -               if (rdmsr_safe(index, &data_low, &data_high) < 0)
19477 -                       continue;
19478 -               if (wrmsr_safe(index, data_low, data_high) < 0)
19479 +               if (kvm_probe_user_return_msr(index))
19480                         continue;
19482                 vmx->guest_uret_msrs[j].slot = i;
19483 @@ -6938,9 +6955,11 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
19484         bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
19486         vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
19487 +#ifdef CONFIG_X86_64
19488         vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
19489         vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
19490         vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
19491 +#endif
19492         vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
19493         vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
19494         vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
19495 @@ -7330,9 +7349,11 @@ static __init void vmx_set_cpu_caps(void)
19496         if (!cpu_has_vmx_xsaves())
19497                 kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
19499 -       /* CPUID 0x80000001 */
19500 -       if (!cpu_has_vmx_rdtscp())
19501 +       /* CPUID 0x80000001 and 0x7 (RDPID) */
19502 +       if (!cpu_has_vmx_rdtscp()) {
19503                 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
19504 +               kvm_cpu_cap_clear(X86_FEATURE_RDPID);
19505 +       }
19507         if (cpu_has_vmx_waitpkg())
19508                 kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
19509 @@ -7388,8 +7409,9 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
19510         /*
19511          * RDPID causes #UD if disabled through secondary execution controls.
19512          * Because it is marked as EmulateOnUD, we need to intercept it here.
19513 +        * Note, RDPID is hidden behind ENABLE_RDTSCP.
19514          */
19515 -       case x86_intercept_rdtscp:
19516 +       case x86_intercept_rdpid:
19517                 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) {
19518                         exception->vector = UD_VECTOR;
19519                         exception->error_code_valid = false;
19520 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
19521 index ee0dc58ac3a5..87311d39f914 100644
19522 --- a/arch/x86/kvm/x86.c
19523 +++ b/arch/x86/kvm/x86.c
19524 @@ -335,6 +335,22 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
19525         }
19528 +int kvm_probe_user_return_msr(u32 msr)
19530 +       u64 val;
19531 +       int ret;
19533 +       preempt_disable();
19534 +       ret = rdmsrl_safe(msr, &val);
19535 +       if (ret)
19536 +               goto out;
19537 +       ret = wrmsrl_safe(msr, val);
19538 +out:
19539 +       preempt_enable();
19540 +       return ret;
19542 +EXPORT_SYMBOL_GPL(kvm_probe_user_return_msr);
19544  void kvm_define_user_return_msr(unsigned slot, u32 msr)
19546         BUG_ON(slot >= KVM_MAX_NR_USER_RETURN_MSRS);
19547 @@ -1072,10 +1088,15 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
19548                 return 0;
19549         }
19551 -       if (is_long_mode(vcpu) && kvm_vcpu_is_illegal_gpa(vcpu, cr3))
19552 +       /*
19553 +        * Do not condition the GPA check on long mode, this helper is used to
19554 +        * stuff CR3, e.g. for RSM emulation, and there is no guarantee that
19555 +        * the current vCPU mode is accurate.
19556 +        */
19557 +       if (kvm_vcpu_is_illegal_gpa(vcpu, cr3))
19558                 return 1;
19559 -       else if (is_pae_paging(vcpu) &&
19560 -                !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
19562 +       if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
19563                 return 1;
19565         kvm_mmu_new_pgd(vcpu, cr3, skip_tlb_flush, skip_tlb_flush);
19566 @@ -5859,7 +5880,8 @@ static void kvm_init_msr_list(void)
19567                                 continue;
19568                         break;
19569                 case MSR_TSC_AUX:
19570 -                       if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
19571 +                       if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) &&
19572 +                           !kvm_cpu_cap_has(X86_FEATURE_RDPID))
19573                                 continue;
19574                         break;
19575                 case MSR_IA32_UMWAIT_CONTROL:
19576 @@ -7959,6 +7981,18 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
19578  static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
19581 + * Indirection to move queue_work() out of the tk_core.seq write held
19582 + * region to prevent possible deadlocks against time accessors which
19583 + * are invoked with work related locks held.
19584 + */
19585 +static void pvclock_irq_work_fn(struct irq_work *w)
19587 +       queue_work(system_long_wq, &pvclock_gtod_work);
19590 +static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
19592  /*
19593   * Notification about pvclock gtod data update.
19594   */
19595 @@ -7970,13 +8004,14 @@ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
19597         update_pvclock_gtod(tk);
19599 -       /* disable master clock if host does not trust, or does not
19600 -        * use, TSC based clocksource.
19601 +       /*
19602 +        * Disable master clock if host does not trust, or does not use,
19603 +        * TSC based clocksource. Delegate queue_work() to irq_work as
19604 +        * this is invoked with tk_core.seq write held.
19605          */
19606         if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
19607             atomic_read(&kvm_guest_has_master_clock) != 0)
19608 -               queue_work(system_long_wq, &pvclock_gtod_work);
19610 +               irq_work_queue(&pvclock_irq_work);
19611         return 0;
19614 @@ -8091,6 +8126,8 @@ void kvm_arch_exit(void)
19615         cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
19616  #ifdef CONFIG_X86_64
19617         pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
19618 +       irq_work_sync(&pvclock_irq_work);
19619 +       cancel_work_sync(&pvclock_gtod_work);
19620  #endif
19621         kvm_x86_ops.hardware_enable = NULL;
19622         kvm_mmu_module_exit();
19623 @@ -11020,6 +11057,9 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
19625  bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
19627 +       if (vcpu->arch.guest_state_protected)
19628 +               return true;
19630         return vcpu->arch.preempted_in_kernel;
19633 @@ -11290,7 +11330,7 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
19634         if (!kvm_pv_async_pf_enabled(vcpu))
19635                 return true;
19636         else
19637 -               return apf_pageready_slot_free(vcpu);
19638 +               return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu);
19641  void kvm_arch_start_assignment(struct kvm *kvm)
19642 @@ -11539,7 +11579,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
19644                 fallthrough;
19645         case INVPCID_TYPE_ALL_INCL_GLOBAL:
19646 -               kvm_mmu_unload(vcpu);
19647 +               kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
19648                 return kvm_skip_emulated_instruction(vcpu);
19650         default:
19651 diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
19652 index ae17250e1efe..7f27bb65a572 100644
19653 --- a/arch/x86/kvm/xen.c
19654 +++ b/arch/x86/kvm/xen.c
19655 @@ -673,7 +673,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
19656         bool longmode;
19657         u64 input, params[6];
19659 -       input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
19660 +       input = (u64)kvm_register_readl(vcpu, VCPU_REGS_RAX);
19662         /* Hyper-V hypercalls get bit 31 set in EAX */
19663         if ((input & 0x80000000) &&
19664 diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
19665 index 6c5eb6f3f14f..a19374d26101 100644
19666 --- a/arch/x86/mm/mem_encrypt_identity.c
19667 +++ b/arch/x86/mm/mem_encrypt_identity.c
19668 @@ -503,14 +503,10 @@ void __init sme_enable(struct boot_params *bp)
19670  #define AMD_SME_BIT    BIT(0)
19671  #define AMD_SEV_BIT    BIT(1)
19672 -       /*
19673 -        * Set the feature mask (SME or SEV) based on whether we are
19674 -        * running under a hypervisor.
19675 -        */
19676 -       eax = 1;
19677 -       ecx = 0;
19678 -       native_cpuid(&eax, &ebx, &ecx, &edx);
19679 -       feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT;
19681 +       /* Check the SEV MSR whether SEV or SME is enabled */
19682 +       sev_status   = __rdmsr(MSR_AMD64_SEV);
19683 +       feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
19685         /*
19686          * Check for the SME/SEV feature:
19687 @@ -530,19 +526,26 @@ void __init sme_enable(struct boot_params *bp)
19689         /* Check if memory encryption is enabled */
19690         if (feature_mask == AMD_SME_BIT) {
19691 +               /*
19692 +                * No SME if Hypervisor bit is set. This check is here to
19693 +                * prevent a guest from trying to enable SME. For running as a
19694 +                * KVM guest the MSR_K8_SYSCFG will be sufficient, but there
19695 +                * might be other hypervisors which emulate that MSR as non-zero
19696 +                * or even pass it through to the guest.
19697 +                * A malicious hypervisor can still trick a guest into this
19698 +                * path, but there is no way to protect against that.
19699 +                */
19700 +               eax = 1;
19701 +               ecx = 0;
19702 +               native_cpuid(&eax, &ebx, &ecx, &edx);
19703 +               if (ecx & BIT(31))
19704 +                       return;
19706                 /* For SME, check the SYSCFG MSR */
19707                 msr = __rdmsr(MSR_K8_SYSCFG);
19708                 if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
19709                         return;
19710         } else {
19711 -               /* For SEV, check the SEV MSR */
19712 -               msr = __rdmsr(MSR_AMD64_SEV);
19713 -               if (!(msr & MSR_AMD64_SEV_ENABLED))
19714 -                       return;
19716 -               /* Save SEV_STATUS to avoid reading MSR again */
19717 -               sev_status = msr;
19719                 /* SEV state cannot be controlled by a command line option */
19720                 sme_me_mask = me_mask;
19721                 sev_enabled = true;
19722 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
19723 index f6a9e2e36642..1c27e6f43f80 100644
19724 --- a/arch/x86/mm/pgtable.c
19725 +++ b/arch/x86/mm/pgtable.c
19726 @@ -550,7 +550,7 @@ int ptep_test_and_clear_young(struct vm_area_struct *vma,
19727         return ret;
19730 -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
19731 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG)
19732  int pmdp_test_and_clear_young(struct vm_area_struct *vma,
19733                               unsigned long addr, pmd_t *pmdp)
19735 @@ -562,6 +562,9 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma,
19737         return ret;
19739 +#endif
19741 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
19742  int pudp_test_and_clear_young(struct vm_area_struct *vma,
19743                               unsigned long addr, pud_t *pudp)
19745 diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
19746 index cd3914fc9f3d..e94e0050a583 100644
19747 --- a/arch/x86/power/hibernate.c
19748 +++ b/arch/x86/power/hibernate.c
19749 @@ -13,8 +13,8 @@
19750  #include <linux/kdebug.h>
19751  #include <linux/cpu.h>
19752  #include <linux/pgtable.h>
19754 -#include <crypto/hash.h>
19755 +#include <linux/types.h>
19756 +#include <linux/crc32.h>
19758  #include <asm/e820/api.h>
19759  #include <asm/init.h>
19760 @@ -54,95 +54,33 @@ int pfn_is_nosave(unsigned long pfn)
19761         return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
19765 -#define MD5_DIGEST_SIZE 16
19767  struct restore_data_record {
19768         unsigned long jump_address;
19769         unsigned long jump_address_phys;
19770         unsigned long cr3;
19771         unsigned long magic;
19772 -       u8 e820_digest[MD5_DIGEST_SIZE];
19773 +       unsigned long e820_checksum;
19774  };
19776 -#if IS_BUILTIN(CONFIG_CRYPTO_MD5)
19777  /**
19778 - * get_e820_md5 - calculate md5 according to given e820 table
19779 + * compute_e820_crc32 - calculate crc32 of a given e820 table
19780   *
19781   * @table: the e820 table to be calculated
19782 - * @buf: the md5 result to be stored to
19783 + *
19784 + * Return: the resulting checksum
19785   */
19786 -static int get_e820_md5(struct e820_table *table, void *buf)
19787 +static inline u32 compute_e820_crc32(struct e820_table *table)
19789 -       struct crypto_shash *tfm;
19790 -       struct shash_desc *desc;
19791 -       int size;
19792 -       int ret = 0;
19794 -       tfm = crypto_alloc_shash("md5", 0, 0);
19795 -       if (IS_ERR(tfm))
19796 -               return -ENOMEM;
19798 -       desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
19799 -                      GFP_KERNEL);
19800 -       if (!desc) {
19801 -               ret = -ENOMEM;
19802 -               goto free_tfm;
19803 -       }
19805 -       desc->tfm = tfm;
19807 -       size = offsetof(struct e820_table, entries) +
19808 +       int size = offsetof(struct e820_table, entries) +
19809                 sizeof(struct e820_entry) * table->nr_entries;
19811 -       if (crypto_shash_digest(desc, (u8 *)table, size, buf))
19812 -               ret = -EINVAL;
19814 -       kfree_sensitive(desc);
19816 -free_tfm:
19817 -       crypto_free_shash(tfm);
19818 -       return ret;
19821 -static int hibernation_e820_save(void *buf)
19823 -       return get_e820_md5(e820_table_firmware, buf);
19826 -static bool hibernation_e820_mismatch(void *buf)
19828 -       int ret;
19829 -       u8 result[MD5_DIGEST_SIZE];
19831 -       memset(result, 0, MD5_DIGEST_SIZE);
19832 -       /* If there is no digest in suspend kernel, let it go. */
19833 -       if (!memcmp(result, buf, MD5_DIGEST_SIZE))
19834 -               return false;
19836 -       ret = get_e820_md5(e820_table_firmware, result);
19837 -       if (ret)
19838 -               return true;
19840 -       return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
19842 -#else
19843 -static int hibernation_e820_save(void *buf)
19845 -       return 0;
19848 -static bool hibernation_e820_mismatch(void *buf)
19850 -       /* If md5 is not builtin for restore kernel, let it go. */
19851 -       return false;
19852 +       return ~crc32_le(~0, (unsigned char const *)table, size);
19854 -#endif
19856  #ifdef CONFIG_X86_64
19857 -#define RESTORE_MAGIC  0x23456789ABCDEF01UL
19858 +#define RESTORE_MAGIC  0x23456789ABCDEF02UL
19859  #else
19860 -#define RESTORE_MAGIC  0x12345678UL
19861 +#define RESTORE_MAGIC  0x12345679UL
19862  #endif
19864  /**
19865 @@ -179,7 +117,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
19866          */
19867         rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
19869 -       return hibernation_e820_save(rdr->e820_digest);
19870 +       rdr->e820_checksum = compute_e820_crc32(e820_table_firmware);
19871 +       return 0;
19874  /**
19875 @@ -200,7 +139,7 @@ int arch_hibernation_header_restore(void *addr)
19876         jump_address_phys = rdr->jump_address_phys;
19877         restore_cr3 = rdr->cr3;
19879 -       if (hibernation_e820_mismatch(rdr->e820_digest)) {
19880 +       if (rdr->e820_checksum != compute_e820_crc32(e820_table_firmware)) {
19881                 pr_crit("Hibernate inconsistent memory map detected!\n");
19882                 return -ENODEV;
19883         }
19884 diff --git a/block/Kconfig b/block/Kconfig
19885 index a2297edfdde8..f688ea5f0dbd 100644
19886 --- a/block/Kconfig
19887 +++ b/block/Kconfig
19888 @@ -83,7 +83,7 @@ config BLK_DEV_INTEGRITY_T10
19890  config BLK_DEV_ZONED
19891         bool "Zoned block device support"
19892 -       select MQ_IOSCHED_DEADLINE
19893 +       select IOSCHED_BFQ
19894         help
19895         Block layer zoned block device support. This option enables
19896         support for ZAC/ZBC/ZNS host-managed and host-aware zoned block
19897 diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
19898 index 2f2158e05a91..e58b2953ac16 100644
19899 --- a/block/Kconfig.iosched
19900 +++ b/block/Kconfig.iosched
19901 @@ -5,13 +5,11 @@ menu "IO Schedulers"
19903  config MQ_IOSCHED_DEADLINE
19904         tristate "MQ deadline I/O scheduler"
19905 -       default y
19906         help
19907           MQ version of the deadline IO scheduler.
19909  config MQ_IOSCHED_KYBER
19910         tristate "Kyber I/O scheduler"
19911 -       default y
19912         help
19913           The Kyber I/O scheduler is a low-overhead scheduler suitable for
19914           multiqueue and other fast devices. Given target latencies for reads and
19915 @@ -20,6 +18,7 @@ config MQ_IOSCHED_KYBER
19917  config IOSCHED_BFQ
19918         tristate "BFQ I/O scheduler"
19919 +       default y
19920         help
19921         BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
19922         of the device among all processes according to their weights,
19923 diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
19924 index 95586137194e..bc319931d2b3 100644
19925 --- a/block/bfq-iosched.c
19926 +++ b/block/bfq-iosched.c
19927 @@ -1012,7 +1012,7 @@ static void
19928  bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
19929                       struct bfq_io_cq *bic, bool bfq_already_existing)
19931 -       unsigned int old_wr_coeff = bfqq->wr_coeff;
19932 +       unsigned int old_wr_coeff = 1;
19933         bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
19935         if (bic->saved_has_short_ttime)
19936 @@ -1033,7 +1033,13 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
19937         bfqq->ttime = bic->saved_ttime;
19938         bfqq->io_start_time = bic->saved_io_start_time;
19939         bfqq->tot_idle_time = bic->saved_tot_idle_time;
19940 -       bfqq->wr_coeff = bic->saved_wr_coeff;
19941 +       /*
19942 +        * Restore weight coefficient only if low_latency is on
19943 +        */
19944 +       if (bfqd->low_latency) {
19945 +               old_wr_coeff = bfqq->wr_coeff;
19946 +               bfqq->wr_coeff = bic->saved_wr_coeff;
19947 +       }
19948         bfqq->service_from_wr = bic->saved_service_from_wr;
19949         bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
19950         bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
19951 @@ -2257,10 +2263,9 @@ static void bfq_remove_request(struct request_queue *q,
19955 -static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
19956 +static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
19957                 unsigned int nr_segs)
19959 -       struct request_queue *q = hctx->queue;
19960         struct bfq_data *bfqd = q->elevator->elevator_data;
19961         struct request *free = NULL;
19962         /*
19963 diff --git a/block/blk-iocost.c b/block/blk-iocost.c
19964 index 98d656bdb42b..4fbc875f7cb2 100644
19965 --- a/block/blk-iocost.c
19966 +++ b/block/blk-iocost.c
19967 @@ -1073,7 +1073,17 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
19969         lockdep_assert_held(&ioc->lock);
19971 -       inuse = clamp_t(u32, inuse, 1, active);
19972 +       /*
19973 +        * For an active leaf node, its inuse shouldn't be zero or exceed
19974 +        * @active. An active internal node's inuse is solely determined by the
19975 +        * inuse to active ratio of its children regardless of @inuse.
19976 +        */
19977 +       if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
19978 +               inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
19979 +                                          iocg->child_active_sum);
19980 +       } else {
19981 +               inuse = clamp_t(u32, inuse, 1, active);
19982 +       }
19984         iocg->last_inuse = iocg->inuse;
19985         if (save)
19986 @@ -1090,7 +1100,7 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
19987                 /* update the level sums */
19988                 parent->child_active_sum += (s32)(active - child->active);
19989                 parent->child_inuse_sum += (s32)(inuse - child->inuse);
19990 -               /* apply the udpates */
19991 +               /* apply the updates */
19992                 child->active = active;
19993                 child->inuse = inuse;
19995 diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
19996 index e1e997af89a0..fdeb9773b55c 100644
19997 --- a/block/blk-mq-sched.c
19998 +++ b/block/blk-mq-sched.c
19999 @@ -348,14 +348,16 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
20000                 unsigned int nr_segs)
20002         struct elevator_queue *e = q->elevator;
20003 -       struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
20004 -       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
20005 +       struct blk_mq_ctx *ctx;
20006 +       struct blk_mq_hw_ctx *hctx;
20007         bool ret = false;
20008         enum hctx_type type;
20010         if (e && e->type->ops.bio_merge)
20011 -               return e->type->ops.bio_merge(hctx, bio, nr_segs);
20012 +               return e->type->ops.bio_merge(q, bio, nr_segs);
20014 +       ctx = blk_mq_get_ctx(q);
20015 +       hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
20016         type = hctx->type;
20017         if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
20018             list_empty_careful(&ctx->rq_lists[type]))
20019 diff --git a/block/blk-mq.c b/block/blk-mq.c
20020 index d4d7c1caa439..0e120547ccb7 100644
20021 --- a/block/blk-mq.c
20022 +++ b/block/blk-mq.c
20023 @@ -2216,8 +2216,9 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
20024                 /* Bypass scheduler for flush requests */
20025                 blk_insert_flush(rq);
20026                 blk_mq_run_hw_queue(data.hctx, true);
20027 -       } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
20028 -                               !blk_queue_nonrot(q))) {
20029 +       } else if (plug && (q->nr_hw_queues == 1 ||
20030 +                  blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) ||
20031 +                  q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
20032                 /*
20033                  * Use plugging if we have a ->commit_rqs() hook as well, as
20034                  * we know the driver uses bd->last in a smart fashion.
20035 @@ -3269,10 +3270,12 @@ EXPORT_SYMBOL(blk_mq_init_allocated_queue);
20036  /* tags can _not_ be used after returning from blk_mq_exit_queue */
20037  void blk_mq_exit_queue(struct request_queue *q)
20039 -       struct blk_mq_tag_set   *set = q->tag_set;
20040 +       struct blk_mq_tag_set *set = q->tag_set;
20042 -       blk_mq_del_queue_tag_set(q);
20043 +       /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
20044         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
20045 +       /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
20046 +       blk_mq_del_queue_tag_set(q);
20049  static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
20050 diff --git a/block/elevator.c b/block/elevator.c
20051 index 293c5c81397a..71111fa80628 100644
20052 --- a/block/elevator.c
20053 +++ b/block/elevator.c
20054 @@ -616,15 +616,15 @@ static inline bool elv_support_iosched(struct request_queue *q)
20057  /*
20058 - * For single queue devices, default to using mq-deadline. If we have multiple
20059 - * queues or mq-deadline is not available, default to "none".
20060 + * For single queue devices, default to using bfq. If we have multiple
20061 + * queues or bfq is not available, default to "none".
20062   */
20063  static struct elevator_type *elevator_get_default(struct request_queue *q)
20065         if (q->nr_hw_queues != 1)
20066                 return NULL;
20068 -       return elevator_get(q, "mq-deadline", false);
20069 +       return elevator_get(q, "bfq", false);
20072  /*
20073 diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
20074 index 33d34d69cade..79b69d7046d6 100644
20075 --- a/block/kyber-iosched.c
20076 +++ b/block/kyber-iosched.c
20077 @@ -560,11 +560,12 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
20078         }
20081 -static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
20082 +static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
20083                 unsigned int nr_segs)
20085 +       struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
20086 +       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
20087         struct kyber_hctx_data *khd = hctx->sched_data;
20088 -       struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
20089         struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
20090         unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
20091         struct list_head *rq_list = &kcq->rq_list[sched_domain];
20092 diff --git a/block/mq-deadline.c b/block/mq-deadline.c
20093 index f3631a287466..3aabcd2a7893 100644
20094 --- a/block/mq-deadline.c
20095 +++ b/block/mq-deadline.c
20096 @@ -461,10 +461,9 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
20097         return ELEVATOR_NO_MERGE;
20100 -static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
20101 +static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
20102                 unsigned int nr_segs)
20104 -       struct request_queue *q = hctx->queue;
20105         struct deadline_data *dd = q->elevator->elevator_data;
20106         struct request *free = NULL;
20107         bool ret;
20108 diff --git a/crypto/api.c b/crypto/api.c
20109 index ed08cbd5b9d3..c4eda56cff89 100644
20110 --- a/crypto/api.c
20111 +++ b/crypto/api.c
20112 @@ -562,7 +562,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
20114         struct crypto_alg *alg;
20116 -       if (unlikely(!mem))
20117 +       if (IS_ERR_OR_NULL(mem))
20118                 return;
20120         alg = tfm->__crt_alg;
20121 diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
20122 index a057ecb1288d..6cd7f7025df4 100644
20123 --- a/crypto/async_tx/async_xor.c
20124 +++ b/crypto/async_tx/async_xor.c
20125 @@ -233,6 +233,7 @@ async_xor_offs(struct page *dest, unsigned int offset,
20126                 if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
20127                         src_cnt--;
20128                         src_list++;
20129 +                       src_offs++;
20130                 }
20132                 /* wait for any prerequisite operations */
20133 diff --git a/crypto/rng.c b/crypto/rng.c
20134 index a888d84b524a..fea082b25fe4 100644
20135 --- a/crypto/rng.c
20136 +++ b/crypto/rng.c
20137 @@ -34,22 +34,18 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
20138         u8 *buf = NULL;
20139         int err;
20141 -       crypto_stats_get(alg);
20142         if (!seed && slen) {
20143                 buf = kmalloc(slen, GFP_KERNEL);
20144 -               if (!buf) {
20145 -                       crypto_alg_put(alg);
20146 +               if (!buf)
20147                         return -ENOMEM;
20148 -               }
20150                 err = get_random_bytes_wait(buf, slen);
20151 -               if (err) {
20152 -                       crypto_alg_put(alg);
20153 +               if (err)
20154                         goto out;
20155 -               }
20156                 seed = buf;
20157         }
20159 +       crypto_stats_get(alg);
20160         err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
20161         crypto_stats_rng_seed(alg, err);
20162  out:
20163 diff --git a/crypto/zstd.c b/crypto/zstd.c
20164 index 1a3309f066f7..154a969c83a8 100644
20165 --- a/crypto/zstd.c
20166 +++ b/crypto/zstd.c
20167 @@ -18,22 +18,22 @@
20168  #define ZSTD_DEF_LEVEL 3
20170  struct zstd_ctx {
20171 -       ZSTD_CCtx *cctx;
20172 -       ZSTD_DCtx *dctx;
20173 +       zstd_cctx *cctx;
20174 +       zstd_dctx *dctx;
20175         void *cwksp;
20176         void *dwksp;
20177  };
20179 -static ZSTD_parameters zstd_params(void)
20180 +static zstd_parameters zstd_params(void)
20182 -       return ZSTD_getParams(ZSTD_DEF_LEVEL, 0, 0);
20183 +       return zstd_get_params(ZSTD_DEF_LEVEL, 0);
20186  static int zstd_comp_init(struct zstd_ctx *ctx)
20188         int ret = 0;
20189 -       const ZSTD_parameters params = zstd_params();
20190 -       const size_t wksp_size = ZSTD_CCtxWorkspaceBound(params.cParams);
20191 +       const zstd_parameters params = zstd_params();
20192 +       const size_t wksp_size = zstd_cctx_workspace_bound(&params.cParams);
20194         ctx->cwksp = vzalloc(wksp_size);
20195         if (!ctx->cwksp) {
20196 @@ -41,7 +41,7 @@ static int zstd_comp_init(struct zstd_ctx *ctx)
20197                 goto out;
20198         }
20200 -       ctx->cctx = ZSTD_initCCtx(ctx->cwksp, wksp_size);
20201 +       ctx->cctx = zstd_init_cctx(ctx->cwksp, wksp_size);
20202         if (!ctx->cctx) {
20203                 ret = -EINVAL;
20204                 goto out_free;
20205 @@ -56,7 +56,7 @@ static int zstd_comp_init(struct zstd_ctx *ctx)
20206  static int zstd_decomp_init(struct zstd_ctx *ctx)
20208         int ret = 0;
20209 -       const size_t wksp_size = ZSTD_DCtxWorkspaceBound();
20210 +       const size_t wksp_size = zstd_dctx_workspace_bound();
20212         ctx->dwksp = vzalloc(wksp_size);
20213         if (!ctx->dwksp) {
20214 @@ -64,7 +64,7 @@ static int zstd_decomp_init(struct zstd_ctx *ctx)
20215                 goto out;
20216         }
20218 -       ctx->dctx = ZSTD_initDCtx(ctx->dwksp, wksp_size);
20219 +       ctx->dctx = zstd_init_dctx(ctx->dwksp, wksp_size);
20220         if (!ctx->dctx) {
20221                 ret = -EINVAL;
20222                 goto out_free;
20223 @@ -152,10 +152,10 @@ static int __zstd_compress(const u8 *src, unsigned int slen,
20225         size_t out_len;
20226         struct zstd_ctx *zctx = ctx;
20227 -       const ZSTD_parameters params = zstd_params();
20228 +       const zstd_parameters params = zstd_params();
20230 -       out_len = ZSTD_compressCCtx(zctx->cctx, dst, *dlen, src, slen, params);
20231 -       if (ZSTD_isError(out_len))
20232 +       out_len = zstd_compress_cctx(zctx->cctx, dst, *dlen, src, slen, &params);
20233 +       if (zstd_is_error(out_len))
20234                 return -EINVAL;
20235         *dlen = out_len;
20236         return 0;
20237 @@ -182,8 +182,8 @@ static int __zstd_decompress(const u8 *src, unsigned int slen,
20238         size_t out_len;
20239         struct zstd_ctx *zctx = ctx;
20241 -       out_len = ZSTD_decompressDCtx(zctx->dctx, dst, *dlen, src, slen);
20242 -       if (ZSTD_isError(out_len))
20243 +       out_len = zstd_decompress_dctx(zctx->dctx, dst, *dlen, src, slen);
20244 +       if (zstd_is_error(out_len))
20245                 return -EINVAL;
20246         *dlen = out_len;
20247         return 0;
20248 diff --git a/drivers/accessibility/speakup/speakup_acntpc.c b/drivers/accessibility/speakup/speakup_acntpc.c
20249 index c1ec087dca13..b2d0d4266f62 100644
20250 --- a/drivers/accessibility/speakup/speakup_acntpc.c
20251 +++ b/drivers/accessibility/speakup/speakup_acntpc.c
20252 @@ -198,7 +198,7 @@ static void do_catch_up(struct spk_synth *synth)
20253                 full_time_val = full_time->u.n.value;
20254                 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
20255                 if (synth_full()) {
20256 -                       schedule_timeout(msecs_to_jiffies(full_time_val));
20257 +                       schedule_msec_hrtimeout((full_time_val));
20258                         continue;
20259                 }
20260                 set_current_state(TASK_RUNNING);
20261 @@ -226,7 +226,7 @@ static void do_catch_up(struct spk_synth *synth)
20262                         jiffy_delta_val = jiffy_delta->u.n.value;
20263                         delay_time_val = delay_time->u.n.value;
20264                         spin_unlock_irqrestore(&speakup_info.spinlock, flags);
20265 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
20266 +                       schedule_msec_hrtimeout(delay_time_val);
20267                         jiff_max = jiffies + jiffy_delta_val;
20268                 }
20269         }
20270 diff --git a/drivers/accessibility/speakup/speakup_apollo.c b/drivers/accessibility/speakup/speakup_apollo.c
20271 index cd63581b2e99..d636157a2844 100644
20272 --- a/drivers/accessibility/speakup/speakup_apollo.c
20273 +++ b/drivers/accessibility/speakup/speakup_apollo.c
20274 @@ -165,7 +165,7 @@ static void do_catch_up(struct spk_synth *synth)
20275                 if (!synth->io_ops->synth_out(synth, ch)) {
20276                         synth->io_ops->tiocmset(synth, 0, UART_MCR_RTS);
20277                         synth->io_ops->tiocmset(synth, UART_MCR_RTS, 0);
20278 -                       schedule_timeout(msecs_to_jiffies(full_time_val));
20279 +                       schedule_msec_hrtimeout(full_time_val);
20280                         continue;
20281                 }
20282                 if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
20283 diff --git a/drivers/accessibility/speakup/speakup_decext.c b/drivers/accessibility/speakup/speakup_decext.c
20284 index 092cfd08a9e1..e7fc85f8ce5c 100644
20285 --- a/drivers/accessibility/speakup/speakup_decext.c
20286 +++ b/drivers/accessibility/speakup/speakup_decext.c
20287 @@ -180,7 +180,7 @@ static void do_catch_up(struct spk_synth *synth)
20288                 if (ch == '\n')
20289                         ch = 0x0D;
20290                 if (synth_full() || !synth->io_ops->synth_out(synth, ch)) {
20291 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
20292 +                       schedule_msec_hrtimeout(delay_time_val);
20293                         continue;
20294                 }
20295                 set_current_state(TASK_RUNNING);
20296 diff --git a/drivers/accessibility/speakup/speakup_decpc.c b/drivers/accessibility/speakup/speakup_decpc.c
20297 index dec314dee214..2a5deb5256b2 100644
20298 --- a/drivers/accessibility/speakup/speakup_decpc.c
20299 +++ b/drivers/accessibility/speakup/speakup_decpc.c
20300 @@ -398,7 +398,7 @@ static void do_catch_up(struct spk_synth *synth)
20301                 if (ch == '\n')
20302                         ch = 0x0D;
20303                 if (dt_sendchar(ch)) {
20304 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
20305 +                       schedule_msec_hrtimeout((delay_time_val));
20306                         continue;
20307                 }
20308                 set_current_state(TASK_RUNNING);
20309 diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c
20310 index 580ec796816b..67c156b90ddb 100644
20311 --- a/drivers/accessibility/speakup/speakup_dectlk.c
20312 +++ b/drivers/accessibility/speakup/speakup_dectlk.c
20313 @@ -256,7 +256,7 @@ static void do_catch_up(struct spk_synth *synth)
20314                 if (ch == '\n')
20315                         ch = 0x0D;
20316                 if (synth_full_val || !synth->io_ops->synth_out(synth, ch)) {
20317 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
20318 +                       schedule_msec_hrtimeout(delay_time_val);
20319                         continue;
20320                 }
20321                 set_current_state(TASK_RUNNING);
20322 diff --git a/drivers/accessibility/speakup/speakup_dtlk.c b/drivers/accessibility/speakup/speakup_dtlk.c
20323 index 92838d3ae9eb..b687cb4d3268 100644
20324 --- a/drivers/accessibility/speakup/speakup_dtlk.c
20325 +++ b/drivers/accessibility/speakup/speakup_dtlk.c
20326 @@ -211,7 +211,7 @@ static void do_catch_up(struct spk_synth *synth)
20327                 delay_time_val = delay_time->u.n.value;
20328                 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
20329                 if (synth_full()) {
20330 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
20331 +                       schedule_msec_hrtimeout((delay_time_val));
20332                         continue;
20333                 }
20334                 set_current_state(TASK_RUNNING);
20335 @@ -227,7 +227,7 @@ static void do_catch_up(struct spk_synth *synth)
20336                         delay_time_val = delay_time->u.n.value;
20337                         jiffy_delta_val = jiffy_delta->u.n.value;
20338                         spin_unlock_irqrestore(&speakup_info.spinlock, flags);
20339 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
20340 +                       schedule_msec_hrtimeout((delay_time_val));
20341                         jiff_max = jiffies + jiffy_delta_val;
20342                 }
20343         }
20344 diff --git a/drivers/accessibility/speakup/speakup_keypc.c b/drivers/accessibility/speakup/speakup_keypc.c
20345 index 311f4aa0be22..99c523fdcc98 100644
20346 --- a/drivers/accessibility/speakup/speakup_keypc.c
20347 +++ b/drivers/accessibility/speakup/speakup_keypc.c
20348 @@ -199,7 +199,7 @@ static void do_catch_up(struct spk_synth *synth)
20349                 full_time_val = full_time->u.n.value;
20350                 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
20351                 if (synth_full()) {
20352 -                       schedule_timeout(msecs_to_jiffies(full_time_val));
20353 +                       schedule_msec_hrtimeout((full_time_val));
20354                         continue;
20355                 }
20356                 set_current_state(TASK_RUNNING);
20357 @@ -232,7 +232,7 @@ static void do_catch_up(struct spk_synth *synth)
20358                         jiffy_delta_val = jiffy_delta->u.n.value;
20359                         delay_time_val = delay_time->u.n.value;
20360                         spin_unlock_irqrestore(&speakup_info.spinlock, flags);
20361 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
20362 +                       schedule_msec_hrtimeout(delay_time_val);
20363                         jiff_max = jiffies + jiffy_delta_val;
20364                 }
20365         }
20366 diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c
20367 index 2b8699673bac..bf0cbdaf564f 100644
20368 --- a/drivers/accessibility/speakup/synth.c
20369 +++ b/drivers/accessibility/speakup/synth.c
20370 @@ -93,12 +93,8 @@ static void _spk_do_catch_up(struct spk_synth *synth, int unicode)
20371                 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
20372                 if (ch == '\n')
20373                         ch = synth->procspeech;
20374 -               if (unicode)
20375 -                       ret = synth->io_ops->synth_out_unicode(synth, ch);
20376 -               else
20377 -                       ret = synth->io_ops->synth_out(synth, ch);
20378 -               if (!ret) {
20379 -                       schedule_timeout(msecs_to_jiffies(full_time_val));
20380 +               if (!synth->io_ops->synth_out(synth, ch)) {
20381 +                       schedule_msec_hrtimeout(full_time_val);
20382                         continue;
20383                 }
20384                 if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
20385 @@ -108,11 +104,9 @@ static void _spk_do_catch_up(struct spk_synth *synth, int unicode)
20386                         full_time_val = full_time->u.n.value;
20387                         spin_unlock_irqrestore(&speakup_info.spinlock, flags);
20388                         if (synth->io_ops->synth_out(synth, synth->procspeech))
20389 -                               schedule_timeout(
20390 -                                       msecs_to_jiffies(delay_time_val));
20391 +                               schedule_msec_hrtimeout(delay_time_val);
20392                         else
20393 -                               schedule_timeout(
20394 -                                       msecs_to_jiffies(full_time_val));
20395 +                               schedule_msec_hrtimeout(full_time_val);
20396                         jiff_max = jiffies + jiffy_delta_val;
20397                 }
20398                 set_current_state(TASK_RUNNING);
20399 diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
20400 index f2d0e5915dab..0a0a982f9c28 100644
20401 --- a/drivers/acpi/arm64/gtdt.c
20402 +++ b/drivers/acpi/arm64/gtdt.c
20403 @@ -329,7 +329,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
20404                                         int index)
20406         struct platform_device *pdev;
20407 -       int irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
20408 +       int irq;
20410         /*
20411          * According to SBSA specification the size of refresh and control
20412 @@ -338,7 +338,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
20413         struct resource res[] = {
20414                 DEFINE_RES_MEM(wd->control_frame_address, SZ_4K),
20415                 DEFINE_RES_MEM(wd->refresh_frame_address, SZ_4K),
20416 -               DEFINE_RES_IRQ(irq),
20417 +               {},
20418         };
20419         int nr_res = ARRAY_SIZE(res);
20421 @@ -348,10 +348,11 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
20423         if (!(wd->refresh_frame_address && wd->control_frame_address)) {
20424                 pr_err(FW_BUG "failed to get the Watchdog base address.\n");
20425 -               acpi_unregister_gsi(wd->timer_interrupt);
20426                 return -EINVAL;
20427         }
20429 +       irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
20430 +       res[2] = (struct resource)DEFINE_RES_IRQ(irq);
20431         if (irq <= 0) {
20432                 pr_warn("failed to map the Watchdog interrupt.\n");
20433                 nr_res--;
20434 @@ -364,7 +365,8 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
20435          */
20436         pdev = platform_device_register_simple("sbsa-gwdt", index, res, nr_res);
20437         if (IS_ERR(pdev)) {
20438 -               acpi_unregister_gsi(wd->timer_interrupt);
20439 +               if (irq > 0)
20440 +                       acpi_unregister_gsi(wd->timer_interrupt);
20441                 return PTR_ERR(pdev);
20442         }
20444 diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
20445 index 69057fcd2c04..a5e6fd0bafa1 100644
20446 --- a/drivers/acpi/cppc_acpi.c
20447 +++ b/drivers/acpi/cppc_acpi.c
20448 @@ -119,23 +119,15 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
20449   */
20450  #define NUM_RETRIES 500ULL
20452 -struct cppc_attr {
20453 -       struct attribute attr;
20454 -       ssize_t (*show)(struct kobject *kobj,
20455 -                       struct attribute *attr, char *buf);
20456 -       ssize_t (*store)(struct kobject *kobj,
20457 -                       struct attribute *attr, const char *c, ssize_t count);
20460  #define define_one_cppc_ro(_name)              \
20461 -static struct cppc_attr _name =                        \
20462 +static struct kobj_attribute _name =           \
20463  __ATTR(_name, 0444, show_##_name, NULL)
20465  #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
20467  #define show_cppc_data(access_fn, struct_name, member_name)            \
20468         static ssize_t show_##member_name(struct kobject *kobj,         \
20469 -                                       struct attribute *attr, char *buf) \
20470 +                               struct kobj_attribute *attr, char *buf) \
20471         {                                                               \
20472                 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);           \
20473                 struct struct_name st_name = {0};                       \
20474 @@ -161,7 +153,7 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
20475  show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
20477  static ssize_t show_feedback_ctrs(struct kobject *kobj,
20478 -               struct attribute *attr, char *buf)
20479 +               struct kobj_attribute *attr, char *buf)
20481         struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
20482         struct cppc_perf_fb_ctrs fb_ctrs = {0};
20483 diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
20484 index 7b54dc95d36b..4058e0241091 100644
20485 --- a/drivers/acpi/custom_method.c
20486 +++ b/drivers/acpi/custom_method.c
20487 @@ -42,6 +42,8 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
20488                                    sizeof(struct acpi_table_header)))
20489                         return -EFAULT;
20490                 uncopied_bytes = max_size = table.length;
20491 +               /* make sure the buf is not allocated */
20492 +               kfree(buf);
20493                 buf = kzalloc(max_size, GFP_KERNEL);
20494                 if (!buf)
20495                         return -ENOMEM;
20496 @@ -55,6 +57,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
20497             (*ppos + count < count) ||
20498             (count > uncopied_bytes)) {
20499                 kfree(buf);
20500 +               buf = NULL;
20501                 return -EINVAL;
20502         }
20504 @@ -76,7 +79,6 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
20505                 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
20506         }
20508 -       kfree(buf);
20509         return count;
20512 diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
20513 index 096153761ebc..58876248b192 100644
20514 --- a/drivers/acpi/device_pm.c
20515 +++ b/drivers/acpi/device_pm.c
20516 @@ -1310,6 +1310,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
20517                 {"PNP0C0B", }, /* Generic ACPI fan */
20518                 {"INT3404", }, /* Fan */
20519                 {"INTC1044", }, /* Fan for Tiger Lake generation */
20520 +               {"INTC1048", }, /* Fan for Alder Lake generation */
20521                 {}
20522         };
20523         struct acpi_device *adev = ACPI_COMPANION(dev);
20524 diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
20525 index 6efe7edd7b1e..345777bf7af9 100644
20526 --- a/drivers/acpi/scan.c
20527 +++ b/drivers/acpi/scan.c
20528 @@ -701,6 +701,7 @@ int acpi_device_add(struct acpi_device *device,
20530                 result = acpi_device_set_name(device, acpi_device_bus_id);
20531                 if (result) {
20532 +                       kfree_const(acpi_device_bus_id->bus_id);
20533                         kfree(acpi_device_bus_id);
20534                         goto err_unlock;
20535                 }
20536 diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
20537 index 53b22e26266c..2d821ed78453 100644
20538 --- a/drivers/android/Kconfig
20539 +++ b/drivers/android/Kconfig
20540 @@ -9,7 +9,7 @@ config ANDROID
20541  if ANDROID
20543  config ANDROID_BINDER_IPC
20544 -       bool "Android Binder IPC Driver"
20545 +       tristate "Android Binder IPC Driver"
20546         depends on MMU
20547         default n
20548         help
20549 @@ -21,8 +21,8 @@ config ANDROID_BINDER_IPC
20550           between said processes.
20552  config ANDROID_BINDERFS
20553 -       bool "Android Binderfs filesystem"
20554 -       depends on ANDROID_BINDER_IPC
20555 +       tristate "Android Binderfs filesystem"
20556 +       depends on (ANDROID_BINDER_IPC=y) || (ANDROID_BINDER_IPC=m && m)
20557         default n
20558         help
20559           Binderfs is a pseudo-filesystem for the Android Binder IPC driver
20560 diff --git a/drivers/android/Makefile b/drivers/android/Makefile
20561 index c9d3d0c99c25..b9d5ce8deca2 100644
20562 --- a/drivers/android/Makefile
20563 +++ b/drivers/android/Makefile
20564 @@ -1,6 +1,10 @@
20565  # SPDX-License-Identifier: GPL-2.0-only
20566  ccflags-y += -I$(src)                  # needed for trace events
20568 -obj-$(CONFIG_ANDROID_BINDERFS)         += binderfs.o
20569 -obj-$(CONFIG_ANDROID_BINDER_IPC)       += binder.o binder_alloc.o
20570 -obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
20571 +binder_linux-y := binder.o binder_alloc.o
20572 +obj-$(CONFIG_ANDROID_BINDER_IPC) += binder_linux.o
20573 +binder_linux-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
20574 +binder_linux-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
20576 +# binder-$(CONFIG_ANDROID_BINDER_IPC) := binder.o binder_alloc.o
20577 +# binder-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
20578 diff --git a/drivers/android/binder.c b/drivers/android/binder.c
20579 index c119736ca56a..569850551e88 100644
20580 --- a/drivers/android/binder.c
20581 +++ b/drivers/android/binder.c
20582 @@ -5788,9 +5788,20 @@ static int __init binder_init(void)
20583         return ret;
20586 -device_initcall(binder_init);
20587 +module_init(binder_init);
20589 + * binder will have no exit function since binderfs instances can be mounted
20590 + * multiple times and also in user namespaces finding and destroying them all
20591 + * is not feasible without introducing insane locking. Just ignoring existing
20592 + * instances on module unload also wouldn't work since we would loose track of
20593 + * what major numer was dynamically allocated and also what minor numbers are
20594 + * already given out. So this would get us into all kinds of issues with device
20595 + * number reuse. So simply don't allow unloading unless we are forced to do so.
20596 + */
20598 +MODULE_AUTHOR("Google, Inc.");
20599 +MODULE_DESCRIPTION("Driver for Android binder device");
20600 +MODULE_LICENSE("GPL v2");
20602  #define CREATE_TRACE_POINTS
20603  #include "binder_trace.h"
20605 -MODULE_LICENSE("GPL v2");
20606 diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
20607 index 7caf74ad2405..07c11e8d6dad 100644
20608 --- a/drivers/android/binder_alloc.c
20609 +++ b/drivers/android/binder_alloc.c
20610 @@ -38,8 +38,7 @@ enum {
20611  };
20612  static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
20614 -module_param_named(debug_mask, binder_alloc_debug_mask,
20615 -                  uint, 0644);
20616 +module_param_named(alloc_debug_mask, binder_alloc_debug_mask, uint, 0644);
20618  #define binder_alloc_debug(mask, x...) \
20619         do { \
20620 diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
20621 index 6e8e001381af..e4e0678e2781 100644
20622 --- a/drivers/android/binder_alloc.h
20623 +++ b/drivers/android/binder_alloc.h
20624 @@ -6,6 +6,7 @@
20625  #ifndef _LINUX_BINDER_ALLOC_H
20626  #define _LINUX_BINDER_ALLOC_H
20628 +#include <linux/kconfig.h>
20629  #include <linux/rbtree.h>
20630  #include <linux/list.h>
20631  #include <linux/mm.h>
20632 @@ -109,7 +110,7 @@ struct binder_alloc {
20633         size_t pages_high;
20634  };
20636 -#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
20637 +#if IS_ENABLED(CONFIG_ANDROID_BINDER_IPC_SELFTEST)
20638  void binder_selftest_alloc(struct binder_alloc *alloc);
20639  #else
20640  static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
20641 diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
20642 index 6cd79011e35d..da5bcb3203dc 100644
20643 --- a/drivers/android/binder_internal.h
20644 +++ b/drivers/android/binder_internal.h
20645 @@ -5,6 +5,7 @@
20647  #include <linux/export.h>
20648  #include <linux/fs.h>
20649 +#include <linux/kconfig.h>
20650  #include <linux/list.h>
20651  #include <linux/miscdevice.h>
20652  #include <linux/mutex.h>
20653 @@ -77,7 +78,7 @@ extern const struct file_operations binder_fops;
20655  extern char *binder_devices_param;
20657 -#ifdef CONFIG_ANDROID_BINDERFS
20658 +#if IS_ENABLED(CONFIG_ANDROID_BINDERFS)
20659  extern bool is_binderfs_device(const struct inode *inode);
20660  extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
20661                                            const struct file_operations *fops,
20662 @@ -98,7 +99,7 @@ static inline struct dentry *binderfs_create_file(struct dentry *dir,
20663  static inline void binderfs_remove_file(struct dentry *dentry) {}
20664  #endif
20666 -#ifdef CONFIG_ANDROID_BINDERFS
20667 +#if IS_ENABLED(CONFIG_ANDROID_BINDERFS)
20668  extern int __init init_binderfs(void);
20669  #else
20670  static inline int __init init_binderfs(void)
20671 diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
20672 index e80ba93c62a9..1a18e9dbb2a6 100644
20673 --- a/drivers/android/binderfs.c
20674 +++ b/drivers/android/binderfs.c
20675 @@ -113,7 +113,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
20676         struct super_block *sb = ref_inode->i_sb;
20677         struct binderfs_info *info = sb->s_fs_info;
20678  #if defined(CONFIG_IPC_NS)
20679 -       bool use_reserve = (info->ipc_ns == &init_ipc_ns);
20680 +       bool use_reserve = (info->ipc_ns == show_init_ipc_ns());
20681  #else
20682         bool use_reserve = true;
20683  #endif
20684 @@ -402,7 +402,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
20685         struct dentry *root = sb->s_root;
20686         struct binderfs_info *info = sb->s_fs_info;
20687  #if defined(CONFIG_IPC_NS)
20688 -       bool use_reserve = (info->ipc_ns == &init_ipc_ns);
20689 +       bool use_reserve = (info->ipc_ns == show_init_ipc_ns());
20690  #else
20691         bool use_reserve = true;
20692  #endif
20693 @@ -682,7 +682,7 @@ static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
20694                 return -ENOMEM;
20695         info = sb->s_fs_info;
20697 -       info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
20698 +       info->ipc_ns = get_ipc_ns_exported(current->nsproxy->ipc_ns);
20700         info->root_gid = make_kgid(sb->s_user_ns, 0);
20701         if (!gid_valid(info->root_gid))
20702 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
20703 index 00ba8e5a1ccc..33192a8f687d 100644
20704 --- a/drivers/ata/ahci.c
20705 +++ b/drivers/ata/ahci.c
20706 @@ -1772,6 +1772,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
20707                 hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
20709  #ifdef CONFIG_ARM64
20710 +       if (pdev->vendor == PCI_VENDOR_ID_HUAWEI &&
20711 +           pdev->device == 0xa235 &&
20712 +           pdev->revision < 0x30)
20713 +               hpriv->flags |= AHCI_HFLAG_NO_SXS;
20715         if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
20716                 hpriv->irq_handler = ahci_thunderx_irq_handler;
20717  #endif
20718 diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
20719 index 98b8baa47dc5..d1f284f0c83d 100644
20720 --- a/drivers/ata/ahci.h
20721 +++ b/drivers/ata/ahci.h
20722 @@ -242,6 +242,7 @@ enum {
20723                                                         suspend/resume */
20724         AHCI_HFLAG_IGN_NOTSUPP_POWER_ON = (1 << 27), /* ignore -EOPNOTSUPP
20725                                                         from phy_power_on() */
20726 +       AHCI_HFLAG_NO_SXS               = (1 << 28), /* SXS not supported */
20728         /* ap->flags bits */
20730 diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
20731 index 5b32df5d33ad..6e9c5ade4c2e 100644
20732 --- a/drivers/ata/ahci_brcm.c
20733 +++ b/drivers/ata/ahci_brcm.c
20734 @@ -86,7 +86,8 @@ struct brcm_ahci_priv {
20735         u32 port_mask;
20736         u32 quirks;
20737         enum brcm_ahci_version version;
20738 -       struct reset_control *rcdev;
20739 +       struct reset_control *rcdev_rescal;
20740 +       struct reset_control *rcdev_ahci;
20741  };
20743  static inline u32 brcm_sata_readreg(void __iomem *addr)
20744 @@ -352,8 +353,8 @@ static int brcm_ahci_suspend(struct device *dev)
20745         else
20746                 ret = 0;
20748 -       if (priv->version != BRCM_SATA_BCM7216)
20749 -               reset_control_assert(priv->rcdev);
20750 +       reset_control_assert(priv->rcdev_ahci);
20751 +       reset_control_rearm(priv->rcdev_rescal);
20753         return ret;
20755 @@ -365,10 +366,10 @@ static int __maybe_unused brcm_ahci_resume(struct device *dev)
20756         struct brcm_ahci_priv *priv = hpriv->plat_data;
20757         int ret = 0;
20759 -       if (priv->version == BRCM_SATA_BCM7216)
20760 -               ret = reset_control_reset(priv->rcdev);
20761 -       else
20762 -               ret = reset_control_deassert(priv->rcdev);
20763 +       ret = reset_control_deassert(priv->rcdev_ahci);
20764 +       if (ret)
20765 +               return ret;
20766 +       ret = reset_control_reset(priv->rcdev_rescal);
20767         if (ret)
20768                 return ret;
20770 @@ -434,7 +435,6 @@ static int brcm_ahci_probe(struct platform_device *pdev)
20772         const struct of_device_id *of_id;
20773         struct device *dev = &pdev->dev;
20774 -       const char *reset_name = NULL;
20775         struct brcm_ahci_priv *priv;
20776         struct ahci_host_priv *hpriv;
20777         struct resource *res;
20778 @@ -456,15 +456,15 @@ static int brcm_ahci_probe(struct platform_device *pdev)
20779         if (IS_ERR(priv->top_ctrl))
20780                 return PTR_ERR(priv->top_ctrl);
20782 -       /* Reset is optional depending on platform and named differently */
20783 -       if (priv->version == BRCM_SATA_BCM7216)
20784 -               reset_name = "rescal";
20785 -       else
20786 -               reset_name = "ahci";
20788 -       priv->rcdev = devm_reset_control_get_optional(&pdev->dev, reset_name);
20789 -       if (IS_ERR(priv->rcdev))
20790 -               return PTR_ERR(priv->rcdev);
20791 +       if (priv->version == BRCM_SATA_BCM7216) {
20792 +               priv->rcdev_rescal = devm_reset_control_get_optional_shared(
20793 +                       &pdev->dev, "rescal");
20794 +               if (IS_ERR(priv->rcdev_rescal))
20795 +                       return PTR_ERR(priv->rcdev_rescal);
20796 +       }
20797 +       priv->rcdev_ahci = devm_reset_control_get_optional(&pdev->dev, "ahci");
20798 +       if (IS_ERR(priv->rcdev_ahci))
20799 +               return PTR_ERR(priv->rcdev_ahci);
20801         hpriv = ahci_platform_get_resources(pdev, 0);
20802         if (IS_ERR(hpriv))
20803 @@ -485,10 +485,10 @@ static int brcm_ahci_probe(struct platform_device *pdev)
20804                 break;
20805         }
20807 -       if (priv->version == BRCM_SATA_BCM7216)
20808 -               ret = reset_control_reset(priv->rcdev);
20809 -       else
20810 -               ret = reset_control_deassert(priv->rcdev);
20811 +       ret = reset_control_reset(priv->rcdev_rescal);
20812 +       if (ret)
20813 +               return ret;
20814 +       ret = reset_control_deassert(priv->rcdev_ahci);
20815         if (ret)
20816                 return ret;
20818 @@ -539,8 +539,8 @@ static int brcm_ahci_probe(struct platform_device *pdev)
20819  out_disable_clks:
20820         ahci_platform_disable_clks(hpriv);
20821  out_reset:
20822 -       if (priv->version != BRCM_SATA_BCM7216)
20823 -               reset_control_assert(priv->rcdev);
20824 +       reset_control_assert(priv->rcdev_ahci);
20825 +       reset_control_rearm(priv->rcdev_rescal);
20826         return ret;
20829 diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
20830 index ea5bf5f4cbed..fec2e9754aed 100644
20831 --- a/drivers/ata/libahci.c
20832 +++ b/drivers/ata/libahci.c
20833 @@ -493,6 +493,11 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
20834                 cap |= HOST_CAP_ALPM;
20835         }
20837 +       if ((cap & HOST_CAP_SXS) && (hpriv->flags & AHCI_HFLAG_NO_SXS)) {
20838 +               dev_info(dev, "controller does not support SXS, disabling CAP_SXS\n");
20839 +               cap &= ~HOST_CAP_SXS;
20840 +       }
20842         if (hpriv->force_port_map && port_map != hpriv->force_port_map) {
20843                 dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
20844                          port_map, hpriv->force_port_map);
20845 diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
20846 index de638dafce21..b2f552088291 100644
20847 --- a/drivers/ata/libahci_platform.c
20848 +++ b/drivers/ata/libahci_platform.c
20849 @@ -582,11 +582,13 @@ int ahci_platform_init_host(struct platform_device *pdev,
20850         int i, irq, n_ports, rc;
20852         irq = platform_get_irq(pdev, 0);
20853 -       if (irq <= 0) {
20854 +       if (irq < 0) {
20855                 if (irq != -EPROBE_DEFER)
20856                         dev_err(dev, "no irq\n");
20857                 return irq;
20858         }
20859 +       if (!irq)
20860 +               return -EINVAL;
20862         hpriv->irq = irq;
20864 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
20865 index e9cf31f38450..63f39440a9b4 100644
20866 --- a/drivers/ata/pata_arasan_cf.c
20867 +++ b/drivers/ata/pata_arasan_cf.c
20868 @@ -818,12 +818,19 @@ static int arasan_cf_probe(struct platform_device *pdev)
20869         else
20870                 quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */
20872 -       /* if irq is 0, support only PIO */
20873 -       acdev->irq = platform_get_irq(pdev, 0);
20874 -       if (acdev->irq)
20875 +       /*
20876 +        * If there's an error getting IRQ (or we do get IRQ0),
20877 +        * support only PIO
20878 +        */
20879 +       ret = platform_get_irq(pdev, 0);
20880 +       if (ret > 0) {
20881 +               acdev->irq = ret;
20882                 irq_handler = arasan_cf_interrupt;
20883 -       else
20884 +       } else  if (ret == -EPROBE_DEFER) {
20885 +               return ret;
20886 +       } else  {
20887                 quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
20888 +       }
20890         acdev->pbase = res->start;
20891         acdev->vbase = devm_ioremap(&pdev->dev, res->start,
20892 diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
20893 index d1644a8ef9fa..abc0e87ca1a8 100644
20894 --- a/drivers/ata/pata_ixp4xx_cf.c
20895 +++ b/drivers/ata/pata_ixp4xx_cf.c
20896 @@ -165,8 +165,12 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
20897                 return -ENOMEM;
20899         irq = platform_get_irq(pdev, 0);
20900 -       if (irq)
20901 +       if (irq > 0)
20902                 irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
20903 +       else if (irq < 0)
20904 +               return irq;
20905 +       else
20906 +               return -EINVAL;
20908         /* Setup expansion bus chip selects */
20909         *data->cs0_cfg = data->cs0_bits;
20910 diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
20911 index 664ef658a955..b62446ea5f40 100644
20912 --- a/drivers/ata/sata_mv.c
20913 +++ b/drivers/ata/sata_mv.c
20914 @@ -4097,6 +4097,10 @@ static int mv_platform_probe(struct platform_device *pdev)
20915                 n_ports = mv_platform_data->n_ports;
20916                 irq = platform_get_irq(pdev, 0);
20917         }
20918 +       if (irq < 0)
20919 +               return irq;
20920 +       if (!irq)
20921 +               return -EINVAL;
20923         host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
20924         hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
20925 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
20926 index 653c8c6ac7a7..aedeb2dc1a18 100644
20927 --- a/drivers/base/devtmpfs.c
20928 +++ b/drivers/base/devtmpfs.c
20929 @@ -419,7 +419,6 @@ static int __init devtmpfs_setup(void *p)
20930         init_chroot(".");
20931  out:
20932         *(int *)p = err;
20933 -       complete(&setup_done);
20934         return err;
20937 @@ -432,6 +431,7 @@ static int __ref devtmpfsd(void *p)
20939         int err = devtmpfs_setup(p);
20941 +       complete(&setup_done);
20942         if (err)
20943                 return err;
20944         devtmpfs_work_loop();
20945 diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
20946 index 78355095e00d..7e2c79e2a88b 100644
20947 --- a/drivers/base/firmware_loader/main.c
20948 +++ b/drivers/base/firmware_loader/main.c
20949 @@ -465,6 +465,8 @@ static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
20950  static char fw_path_para[256];
20951  static const char * const fw_path[] = {
20952         fw_path_para,
20953 +       "/etc/firmware/" UTS_RELEASE,
20954 +       "/etc/firmware",
20955         "/lib/firmware/updates/" UTS_RELEASE,
20956         "/lib/firmware/updates",
20957         "/lib/firmware/" UTS_RELEASE,
20958 diff --git a/drivers/base/node.c b/drivers/base/node.c
20959 index f449dbb2c746..2c36f61d30bc 100644
20960 --- a/drivers/base/node.c
20961 +++ b/drivers/base/node.c
20962 @@ -268,21 +268,20 @@ static void node_init_cache_dev(struct node *node)
20963         if (!dev)
20964                 return;
20966 +       device_initialize(dev);
20967         dev->parent = &node->dev;
20968         dev->release = node_cache_release;
20969         if (dev_set_name(dev, "memory_side_cache"))
20970 -               goto free_dev;
20971 +               goto put_device;
20973 -       if (device_register(dev))
20974 -               goto free_name;
20975 +       if (device_add(dev))
20976 +               goto put_device;
20978         pm_runtime_no_callbacks(dev);
20979         node->cache_dev = dev;
20980         return;
20981 -free_name:
20982 -       kfree_const(dev->kobj.name);
20983 -free_dev:
20984 -       kfree(dev);
20985 +put_device:
20986 +       put_device(dev);
20989  /**
20990 @@ -319,25 +318,24 @@ void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
20991                 return;
20993         dev = &info->dev;
20994 +       device_initialize(dev);
20995         dev->parent = node->cache_dev;
20996         dev->release = node_cacheinfo_release;
20997         dev->groups = cache_groups;
20998         if (dev_set_name(dev, "index%d", cache_attrs->level))
20999 -               goto free_cache;
21000 +               goto put_device;
21002         info->cache_attrs = *cache_attrs;
21003 -       if (device_register(dev)) {
21004 +       if (device_add(dev)) {
21005                 dev_warn(&node->dev, "failed to add cache level:%d\n",
21006                          cache_attrs->level);
21007 -               goto free_name;
21008 +               goto put_device;
21009         }
21010         pm_runtime_no_callbacks(dev);
21011         list_add_tail(&info->node, &node->cache_attrs);
21012         return;
21013 -free_name:
21014 -       kfree_const(dev->kobj.name);
21015 -free_cache:
21016 -       kfree(info);
21017 +put_device:
21018 +       put_device(dev);
21021  static void node_remove_caches(struct node *node)
21022 diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
21023 index fe1dad68aee4..ae011f2bc537 100644
21024 --- a/drivers/base/power/runtime.c
21025 +++ b/drivers/base/power/runtime.c
21026 @@ -1637,6 +1637,7 @@ void pm_runtime_init(struct device *dev)
21027         dev->power.request_pending = false;
21028         dev->power.request = RPM_REQ_NONE;
21029         dev->power.deferred_resume = false;
21030 +       dev->power.needs_force_resume = 0;
21031         INIT_WORK(&dev->power.work, pm_runtime_work);
21033         dev->power.timer_expires = 0;
21034 @@ -1804,10 +1805,12 @@ int pm_runtime_force_suspend(struct device *dev)
21035          * its parent, but set its status to RPM_SUSPENDED anyway in case this
21036          * function will be called again for it in the meantime.
21037          */
21038 -       if (pm_runtime_need_not_resume(dev))
21039 +       if (pm_runtime_need_not_resume(dev)) {
21040                 pm_runtime_set_suspended(dev);
21041 -       else
21042 +       } else {
21043                 __update_runtime_status(dev, RPM_SUSPENDED);
21044 +               dev->power.needs_force_resume = 1;
21045 +       }
21047         return 0;
21049 @@ -1834,7 +1837,7 @@ int pm_runtime_force_resume(struct device *dev)
21050         int (*callback)(struct device *);
21051         int ret = 0;
21053 -       if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
21054 +       if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
21055                 goto out;
21057         /*
21058 @@ -1853,6 +1856,7 @@ int pm_runtime_force_resume(struct device *dev)
21060         pm_runtime_mark_last_busy(dev);
21061  out:
21062 +       dev->power.needs_force_resume = 0;
21063         pm_runtime_enable(dev);
21064         return ret;
21066 diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
21067 index ff2ee87987c7..211a335a608d 100644
21068 --- a/drivers/base/regmap/regmap-debugfs.c
21069 +++ b/drivers/base/regmap/regmap-debugfs.c
21070 @@ -660,6 +660,7 @@ void regmap_debugfs_exit(struct regmap *map)
21071                 regmap_debugfs_free_dump_cache(map);
21072                 mutex_unlock(&map->cache_lock);
21073                 kfree(map->debugfs_name);
21074 +               map->debugfs_name = NULL;
21075         } else {
21076                 struct regmap_debugfs_node *node, *tmp;
21078 diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
21079 index fa3719ef80e4..88310ac9ce90 100644
21080 --- a/drivers/base/swnode.c
21081 +++ b/drivers/base/swnode.c
21082 @@ -1032,6 +1032,7 @@ int device_add_software_node(struct device *dev, const struct software_node *nod
21083         }
21085         set_secondary_fwnode(dev, &swnode->fwnode);
21086 +       software_node_notify(dev, KOBJ_ADD);
21088         return 0;
21090 @@ -1105,8 +1106,8 @@ int software_node_notify(struct device *dev, unsigned long action)
21092         switch (action) {
21093         case KOBJ_ADD:
21094 -               ret = sysfs_create_link(&dev->kobj, &swnode->kobj,
21095 -                                       "software_node");
21096 +               ret = sysfs_create_link_nowarn(&dev->kobj, &swnode->kobj,
21097 +                                              "software_node");
21098                 if (ret)
21099                         break;
21101 diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
21102 index 104b713f4055..d601e49f80e0 100644
21103 --- a/drivers/block/ataflop.c
21104 +++ b/drivers/block/ataflop.c
21105 @@ -729,8 +729,12 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
21106         unsigned long   flags;
21107         int ret;
21109 -       if (type)
21110 +       if (type) {
21111                 type--;
21112 +               if (type >= NUM_DISK_MINORS ||
21113 +                   minor2disktype[type].drive_types > DriveType)
21114 +                       return -EINVAL;
21115 +       }
21117         q = unit[drive].disk[type]->queue;
21118         blk_mq_freeze_queue(q);
21119 @@ -742,11 +746,6 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
21120         local_irq_restore(flags);
21122         if (type) {
21123 -               if (type >= NUM_DISK_MINORS ||
21124 -                   minor2disktype[type].drive_types > DriveType) {
21125 -                       ret = -EINVAL;
21126 -                       goto out;
21127 -               }
21128                 type = minor2disktype[type].index;
21129                 UDT = &atari_disk_type[type];
21130         }
21131 @@ -2002,7 +2001,10 @@ static void ataflop_probe(dev_t dev)
21132         int drive = MINOR(dev) & 3;
21133         int type  = MINOR(dev) >> 2;
21135 -       if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS)
21136 +       if (type)
21137 +               type--;
21139 +       if (drive >= FD_MAX_UNITS || type >= NUM_DISK_MINORS)
21140                 return;
21141         mutex_lock(&ataflop_probe_lock);
21142         if (!unit[drive].disk[type]) {
21143 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
21144 index 4ff71b579cfc..974da561b8e5 100644
21145 --- a/drivers/block/nbd.c
21146 +++ b/drivers/block/nbd.c
21147 @@ -1980,7 +1980,8 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
21148          * config ref and try to destroy the workqueue from inside the work
21149          * queue.
21150          */
21151 -       flush_workqueue(nbd->recv_workq);
21152 +       if (nbd->recv_workq)
21153 +               flush_workqueue(nbd->recv_workq);
21154         if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
21155                                &nbd->config->runtime_flags))
21156                 nbd_config_put(nbd);
21157 diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
21158 index bfcab1c782b5..dae54dd1aeac 100644
21159 --- a/drivers/block/null_blk/zoned.c
21160 +++ b/drivers/block/null_blk/zoned.c
21161 @@ -180,6 +180,7 @@ int null_register_zoned_dev(struct nullb *nullb)
21162  void null_free_zoned_dev(struct nullb_device *dev)
21164         kvfree(dev->zones);
21165 +       dev->zones = NULL;
21168  int null_report_zones(struct gendisk *disk, sector_t sector,
21169 diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
21170 index d4aa6bfc9555..49ad400a5225 100644
21171 --- a/drivers/block/rnbd/rnbd-clt-sysfs.c
21172 +++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
21173 @@ -432,10 +432,14 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
21174          * i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because
21175          * of sysfs link already was removed already.
21176          */
21177 -       if (dev->blk_symlink_name && try_module_get(THIS_MODULE)) {
21178 -               sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
21179 +       if (dev->blk_symlink_name) {
21180 +               if (try_module_get(THIS_MODULE)) {
21181 +                       sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
21182 +                       module_put(THIS_MODULE);
21183 +               }
21184 +               /* It should be freed always. */
21185                 kfree(dev->blk_symlink_name);
21186 -               module_put(THIS_MODULE);
21187 +               dev->blk_symlink_name = NULL;
21188         }
21191 @@ -479,11 +483,7 @@ static int rnbd_clt_get_path_name(struct rnbd_clt_dev *dev, char *buf,
21192         while ((s = strchr(pathname, '/')))
21193                 s[0] = '!';
21195 -       ret = snprintf(buf, len, "%s", pathname);
21196 -       if (ret >= len)
21197 -               return -ENAMETOOLONG;
21199 -       ret = snprintf(buf, len, "%s@%s", buf, dev->sess->sessname);
21200 +       ret = snprintf(buf, len, "%s@%s", pathname, dev->sess->sessname);
21201         if (ret >= len)
21202                 return -ENAMETOOLONG;
21204 diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
21205 index 45a470076652..5ab7319ff2ea 100644
21206 --- a/drivers/block/rnbd/rnbd-clt.c
21207 +++ b/drivers/block/rnbd/rnbd-clt.c
21208 @@ -693,7 +693,11 @@ static void remap_devs(struct rnbd_clt_session *sess)
21209                 return;
21210         }
21212 -       rtrs_clt_query(sess->rtrs, &attrs);
21213 +       err = rtrs_clt_query(sess->rtrs, &attrs);
21214 +       if (err) {
21215 +               pr_err("rtrs_clt_query(\"%s\"): %d\n", sess->sessname, err);
21216 +               return;
21217 +       }
21218         mutex_lock(&sess->lock);
21219         sess->max_io_size = attrs.max_io_size;
21221 @@ -1234,7 +1238,11 @@ find_and_get_or_create_sess(const char *sessname,
21222                 err = PTR_ERR(sess->rtrs);
21223                 goto wake_up_and_put;
21224         }
21225 -       rtrs_clt_query(sess->rtrs, &attrs);
21227 +       err = rtrs_clt_query(sess->rtrs, &attrs);
21228 +       if (err)
21229 +               goto close_rtrs;
21231         sess->max_io_size = attrs.max_io_size;
21232         sess->queue_depth = attrs.queue_depth;
21234 diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
21235 index 537d499dad3b..73d980840531 100644
21236 --- a/drivers/block/rnbd/rnbd-clt.h
21237 +++ b/drivers/block/rnbd/rnbd-clt.h
21238 @@ -87,7 +87,7 @@ struct rnbd_clt_session {
21239         DECLARE_BITMAP(cpu_queues_bm, NR_CPUS);
21240         int     __percpu        *cpu_rr; /* per-cpu var for CPU round-robin */
21241         atomic_t                busy;
21242 -       int                     queue_depth;
21243 +       size_t                  queue_depth;
21244         u32                     max_io_size;
21245         struct blk_mq_tag_set   tag_set;
21246         struct mutex            lock; /* protects state and devs_list */
21247 diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
21248 index a6a68d44f517..677770f32843 100644
21249 --- a/drivers/block/rnbd/rnbd-srv.c
21250 +++ b/drivers/block/rnbd/rnbd-srv.c
21251 @@ -341,7 +341,9 @@ void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev)
21252         struct rnbd_srv_session *sess = sess_dev->sess;
21254         sess_dev->keep_id = true;
21255 -       mutex_lock(&sess->lock);
21256 +       /* It is already started to close by client's close message. */
21257 +       if (!mutex_trylock(&sess->lock))
21258 +               return;
21259         rnbd_srv_destroy_dev_session_sysfs(sess_dev);
21260         mutex_unlock(&sess->lock);
21262 diff --git a/drivers/block/swim.c b/drivers/block/swim.c
21263 index cc6a0bc6c005..ac5c170d76e8 100644
21264 --- a/drivers/block/swim.c
21265 +++ b/drivers/block/swim.c
21266 @@ -328,7 +328,7 @@ static inline void swim_motor(struct swim __iomem *base,
21267                         if (swim_readbit(base, MOTOR_ON))
21268                                 break;
21269                         set_current_state(TASK_INTERRUPTIBLE);
21270 -                       schedule_timeout(1);
21271 +                       schedule_min_hrtimeout();
21272                 }
21273         } else if (action == OFF) {
21274                 swim_action(base, MOTOR_OFF);
21275 @@ -347,7 +347,7 @@ static inline void swim_eject(struct swim __iomem *base)
21276                 if (!swim_readbit(base, DISK_IN))
21277                         break;
21278                 set_current_state(TASK_INTERRUPTIBLE);
21279 -               schedule_timeout(1);
21280 +               schedule_min_hrtimeout();
21281         }
21282         swim_select(base, RELAX);
21284 @@ -372,6 +372,7 @@ static inline int swim_step(struct swim __iomem *base)
21286                 set_current_state(TASK_INTERRUPTIBLE);
21287                 schedule_timeout(1);
21288 +               schedule_min_hrtimeout();
21290                 swim_select(base, RELAX);
21291                 if (!swim_readbit(base, STEP))
21292 diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
21293 index b0c71d3a81a0..bda5c815e441 100644
21294 --- a/drivers/block/xen-blkback/common.h
21295 +++ b/drivers/block/xen-blkback/common.h
21296 @@ -313,6 +313,7 @@ struct xen_blkif {
21298         struct work_struct      free_work;
21299         unsigned int            nr_ring_pages;
21300 +       bool                    multi_ref;
21301         /* All rings for this device. */
21302         struct xen_blkif_ring   *rings;
21303         unsigned int            nr_rings;
21304 diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
21305 index c2aaf690352c..125b22205d38 100644
21306 --- a/drivers/block/xen-blkback/xenbus.c
21307 +++ b/drivers/block/xen-blkback/xenbus.c
21308 @@ -998,14 +998,17 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
21309         for (i = 0; i < nr_grefs; i++) {
21310                 char ring_ref_name[RINGREF_NAME_LEN];
21312 -               snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
21313 +               if (blkif->multi_ref)
21314 +                       snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
21315 +               else {
21316 +                       WARN_ON(i != 0);
21317 +                       snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref");
21318 +               }
21320                 err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
21321                                    "%u", &ring_ref[i]);
21323                 if (err != 1) {
21324 -                       if (nr_grefs == 1)
21325 -                               break;
21327                         err = -EINVAL;
21328                         xenbus_dev_fatal(dev, err, "reading %s/%s",
21329                                          dir, ring_ref_name);
21330 @@ -1013,18 +1016,6 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
21331                 }
21332         }
21334 -       if (err != 1) {
21335 -               WARN_ON(nr_grefs != 1);
21337 -               err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
21338 -                                  &ring_ref[0]);
21339 -               if (err != 1) {
21340 -                       err = -EINVAL;
21341 -                       xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
21342 -                       return err;
21343 -               }
21344 -       }
21346         err = -ENOMEM;
21347         for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
21348                 req = kzalloc(sizeof(*req), GFP_KERNEL);
21349 @@ -1129,10 +1120,15 @@ static int connect_ring(struct backend_info *be)
21350                  blkif->nr_rings, blkif->blk_protocol, protocol,
21351                  blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
21353 -       ring_page_order = xenbus_read_unsigned(dev->otherend,
21354 -                                              "ring-page-order", 0);
21356 -       if (ring_page_order > xen_blkif_max_ring_order) {
21357 +       err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
21358 +                          &ring_page_order);
21359 +       if (err != 1) {
21360 +               blkif->nr_ring_pages = 1;
21361 +               blkif->multi_ref = false;
21362 +       } else if (ring_page_order <= xen_blkif_max_ring_order) {
21363 +               blkif->nr_ring_pages = 1 << ring_page_order;
21364 +               blkif->multi_ref = true;
21365 +       } else {
21366                 err = -EINVAL;
21367                 xenbus_dev_fatal(dev, err,
21368                                  "requested ring page order %d exceed max:%d",
21369 @@ -1141,8 +1137,6 @@ static int connect_ring(struct backend_info *be)
21370                 return err;
21371         }
21373 -       blkif->nr_ring_pages = 1 << ring_page_order;
21375         if (blkif->nr_rings == 1)
21376                 return read_per_ring_refs(&blkif->rings[0], dev->otherend);
21377         else {
21378 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
21379 index 5cbfbd948f67..4a901508e48e 100644
21380 --- a/drivers/bluetooth/btusb.c
21381 +++ b/drivers/bluetooth/btusb.c
21382 @@ -399,7 +399,9 @@ static const struct usb_device_id blacklist_table[] = {
21384         /* MediaTek Bluetooth devices */
21385         { USB_VENDOR_AND_INTERFACE_INFO(0x0e8d, 0xe0, 0x01, 0x01),
21386 -         .driver_info = BTUSB_MEDIATEK },
21387 +         .driver_info = BTUSB_MEDIATEK |
21388 +                        BTUSB_WIDEBAND_SPEECH |
21389 +                        BTUSB_VALID_LE_STATES },
21391         /* Additional MediaTek MT7615E Bluetooth devices */
21392         { USB_DEVICE(0x13d3, 0x3560), .driver_info = BTUSB_MEDIATEK},
21393 diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
21394 index be4eebb0971b..08b7f4a06bfc 100644
21395 --- a/drivers/bus/mhi/core/init.c
21396 +++ b/drivers/bus/mhi/core/init.c
21397 @@ -508,8 +508,6 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
21399         /* Setup wake db */
21400         mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
21401 -       mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
21402 -       mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
21403         mhi_cntrl->wake_set = false;
21405         /* Setup channel db address for each channel in tre_ring */
21406 @@ -552,6 +550,7 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
21407         struct mhi_ring *buf_ring;
21408         struct mhi_ring *tre_ring;
21409         struct mhi_chan_ctxt *chan_ctxt;
21410 +       u32 tmp;
21412         buf_ring = &mhi_chan->buf_ring;
21413         tre_ring = &mhi_chan->tre_ring;
21414 @@ -565,7 +564,19 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
21415         vfree(buf_ring->base);
21417         buf_ring->base = tre_ring->base = NULL;
21418 +       tre_ring->ctxt_wp = NULL;
21419         chan_ctxt->rbase = 0;
21420 +       chan_ctxt->rlen = 0;
21421 +       chan_ctxt->rp = 0;
21422 +       chan_ctxt->wp = 0;
21424 +       tmp = chan_ctxt->chcfg;
21425 +       tmp &= ~CHAN_CTX_CHSTATE_MASK;
21426 +       tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
21427 +       chan_ctxt->chcfg = tmp;
21429 +       /* Update to all cores */
21430 +       smp_wmb();
21433  int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
21434 @@ -863,12 +874,10 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
21435         u32 soc_info;
21436         int ret, i;
21438 -       if (!mhi_cntrl)
21439 -               return -EINVAL;
21441 -       if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
21442 +       if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
21443 +           !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
21444             !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
21445 -           !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs)
21446 +           !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || !mhi_cntrl->irq)
21447                 return -EINVAL;
21449         ret = parse_config(mhi_cntrl, config);
21450 @@ -890,8 +899,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
21451         INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
21452         init_waitqueue_head(&mhi_cntrl->state_event);
21454 -       mhi_cntrl->hiprio_wq = alloc_ordered_workqueue
21455 -                               ("mhi_hiprio_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI);
21456 +       mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
21457         if (!mhi_cntrl->hiprio_wq) {
21458                 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
21459                 ret = -ENOMEM;
21460 @@ -1296,7 +1304,8 @@ static int mhi_driver_remove(struct device *dev)
21462                 mutex_lock(&mhi_chan->mutex);
21464 -               if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
21465 +               if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
21466 +                    ch_state[dir] == MHI_CH_STATE_STOP) &&
21467                     !mhi_chan->offload_ch)
21468                         mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
21470 diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
21471 index 4e0131b94056..61c37b23dd71 100644
21472 --- a/drivers/bus/mhi/core/main.c
21473 +++ b/drivers/bus/mhi/core/main.c
21474 @@ -242,10 +242,17 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
21475         smp_wmb();
21478 +static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
21480 +       return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
21483  int mhi_destroy_device(struct device *dev, void *data)
21485 +       struct mhi_chan *ul_chan, *dl_chan;
21486         struct mhi_device *mhi_dev;
21487         struct mhi_controller *mhi_cntrl;
21488 +       enum mhi_ee_type ee = MHI_EE_MAX;
21490         if (dev->bus != &mhi_bus_type)
21491                 return 0;
21492 @@ -257,6 +264,17 @@ int mhi_destroy_device(struct device *dev, void *data)
21493         if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
21494                 return 0;
21496 +       ul_chan = mhi_dev->ul_chan;
21497 +       dl_chan = mhi_dev->dl_chan;
21499 +       /*
21500 +        * If execution environment is specified, remove only those devices that
21501 +        * started in them based on ee_mask for the channels as we move on to a
21502 +        * different execution environment
21503 +        */
21504 +       if (data)
21505 +               ee = *(enum mhi_ee_type *)data;
21507         /*
21508          * For the suspend and resume case, this function will get called
21509          * without mhi_unregister_controller(). Hence, we need to drop the
21510 @@ -264,11 +282,19 @@ int mhi_destroy_device(struct device *dev, void *data)
21511          * be sure that there will be no instances of mhi_dev left after
21512          * this.
21513          */
21514 -       if (mhi_dev->ul_chan)
21515 -               put_device(&mhi_dev->ul_chan->mhi_dev->dev);
21516 +       if (ul_chan) {
21517 +               if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
21518 +                       return 0;
21520 -       if (mhi_dev->dl_chan)
21521 -               put_device(&mhi_dev->dl_chan->mhi_dev->dev);
21522 +               put_device(&ul_chan->mhi_dev->dev);
21523 +       }
21525 +       if (dl_chan) {
21526 +               if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
21527 +                       return 0;
21529 +               put_device(&dl_chan->mhi_dev->dev);
21530 +       }
21532         dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
21533                  mhi_dev->name);
21534 @@ -383,7 +409,16 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
21535         struct mhi_event_ctxt *er_ctxt =
21536                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
21537         struct mhi_ring *ev_ring = &mhi_event->ring;
21538 -       void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
21539 +       dma_addr_t ptr = er_ctxt->rp;
21540 +       void *dev_rp;
21542 +       if (!is_valid_ring_ptr(ev_ring, ptr)) {
21543 +               dev_err(&mhi_cntrl->mhi_dev->dev,
21544 +                       "Event ring rp points outside of the event ring\n");
21545 +               return IRQ_HANDLED;
21546 +       }
21548 +       dev_rp = mhi_to_virtual(ev_ring, ptr);
21550         /* Only proceed if event ring has pending events */
21551         if (ev_ring->rp == dev_rp)
21552 @@ -409,7 +444,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
21553         struct device *dev = &mhi_cntrl->mhi_dev->dev;
21554         enum mhi_state state = MHI_STATE_MAX;
21555         enum mhi_pm_state pm_state = 0;
21556 -       enum mhi_ee_type ee = 0;
21557 +       enum mhi_ee_type ee = MHI_EE_MAX;
21559         write_lock_irq(&mhi_cntrl->pm_lock);
21560         if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
21561 @@ -418,8 +453,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
21562         }
21564         state = mhi_get_mhi_state(mhi_cntrl);
21565 -       ee = mhi_cntrl->ee;
21566 -       mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
21567 +       ee = mhi_get_exec_env(mhi_cntrl);
21568         dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n",
21569                 TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
21570                 TO_MHI_STATE_STR(state));
21571 @@ -431,27 +465,30 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
21572         }
21573         write_unlock_irq(&mhi_cntrl->pm_lock);
21575 -        /* If device supports RDDM don't bother processing SYS error */
21576 -       if (mhi_cntrl->rddm_image) {
21577 -               /* host may be performing a device power down already */
21578 -               if (!mhi_is_active(mhi_cntrl))
21579 -                       goto exit_intvec;
21580 +       if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
21581 +               goto exit_intvec;
21583 -               if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
21584 +       switch (ee) {
21585 +       case MHI_EE_RDDM:
21586 +               /* proceed if power down is not already in progress */
21587 +               if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
21588                         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
21589 +                       mhi_cntrl->ee = ee;
21590                         wake_up_all(&mhi_cntrl->state_event);
21591                 }
21592 -               goto exit_intvec;
21593 -       }
21595 -       if (pm_state == MHI_PM_SYS_ERR_DETECT) {
21596 +               break;
21597 +       case MHI_EE_PBL:
21598 +       case MHI_EE_EDL:
21599 +       case MHI_EE_PTHRU:
21600 +               mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
21601 +               mhi_cntrl->ee = ee;
21602                 wake_up_all(&mhi_cntrl->state_event);
21604 -               /* For fatal errors, we let controller decide next step */
21605 -               if (MHI_IN_PBL(ee))
21606 -                       mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
21607 -               else
21608 -                       mhi_pm_sys_err_handler(mhi_cntrl);
21609 +               mhi_pm_sys_err_handler(mhi_cntrl);
21610 +               break;
21611 +       default:
21612 +               wake_up_all(&mhi_cntrl->state_event);
21613 +               mhi_pm_sys_err_handler(mhi_cntrl);
21614 +               break;
21615         }
21617  exit_intvec:
21618 @@ -536,6 +573,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
21619                 struct mhi_buf_info *buf_info;
21620                 u16 xfer_len;
21622 +               if (!is_valid_ring_ptr(tre_ring, ptr)) {
21623 +                       dev_err(&mhi_cntrl->mhi_dev->dev,
21624 +                               "Event element points outside of the tre ring\n");
21625 +                       break;
21626 +               }
21627                 /* Get the TRB this event points to */
21628                 ev_tre = mhi_to_virtual(tre_ring, ptr);
21630 @@ -570,8 +612,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
21631                         /* notify client */
21632                         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
21634 -                       if (mhi_chan->dir == DMA_TO_DEVICE)
21635 +                       if (mhi_chan->dir == DMA_TO_DEVICE) {
21636                                 atomic_dec(&mhi_cntrl->pending_pkts);
21637 +                               /* Release the reference got from mhi_queue() */
21638 +                               mhi_cntrl->runtime_put(mhi_cntrl);
21639 +                       }
21641                         /*
21642                          * Recycle the buffer if buffer is pre-allocated,
21643 @@ -695,6 +740,12 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
21644         struct mhi_chan *mhi_chan;
21645         u32 chan;
21647 +       if (!is_valid_ring_ptr(mhi_ring, ptr)) {
21648 +               dev_err(&mhi_cntrl->mhi_dev->dev,
21649 +                       "Event element points outside of the cmd ring\n");
21650 +               return;
21651 +       }
21653         cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
21655         chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
21656 @@ -719,6 +770,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
21657         struct device *dev = &mhi_cntrl->mhi_dev->dev;
21658         u32 chan;
21659         int count = 0;
21660 +       dma_addr_t ptr = er_ctxt->rp;
21662         /*
21663          * This is a quick check to avoid unnecessary event processing
21664 @@ -728,7 +780,13 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
21665         if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
21666                 return -EIO;
21668 -       dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
21669 +       if (!is_valid_ring_ptr(ev_ring, ptr)) {
21670 +               dev_err(&mhi_cntrl->mhi_dev->dev,
21671 +                       "Event ring rp points outside of the event ring\n");
21672 +               return -EIO;
21673 +       }
21675 +       dev_rp = mhi_to_virtual(ev_ring, ptr);
21676         local_rp = ev_ring->rp;
21678         while (dev_rp != local_rp) {
21679 @@ -834,6 +892,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
21680                          */
21681                         if (chan < mhi_cntrl->max_chan) {
21682                                 mhi_chan = &mhi_cntrl->mhi_chan[chan];
21683 +                               if (!mhi_chan->configured)
21684 +                                       break;
21685                                 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
21686                                 event_quota--;
21687                         }
21688 @@ -845,7 +905,15 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
21690                 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
21691                 local_rp = ev_ring->rp;
21692 -               dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
21694 +               ptr = er_ctxt->rp;
21695 +               if (!is_valid_ring_ptr(ev_ring, ptr)) {
21696 +                       dev_err(&mhi_cntrl->mhi_dev->dev,
21697 +                               "Event ring rp points outside of the event ring\n");
21698 +                       return -EIO;
21699 +               }
21701 +               dev_rp = mhi_to_virtual(ev_ring, ptr);
21702                 count++;
21703         }
21705 @@ -868,11 +936,18 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
21706         int count = 0;
21707         u32 chan;
21708         struct mhi_chan *mhi_chan;
21709 +       dma_addr_t ptr = er_ctxt->rp;
21711         if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
21712                 return -EIO;
21714 -       dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
21715 +       if (!is_valid_ring_ptr(ev_ring, ptr)) {
21716 +               dev_err(&mhi_cntrl->mhi_dev->dev,
21717 +                       "Event ring rp points outside of the event ring\n");
21718 +               return -EIO;
21719 +       }
21721 +       dev_rp = mhi_to_virtual(ev_ring, ptr);
21722         local_rp = ev_ring->rp;
21724         while (dev_rp != local_rp && event_quota > 0) {
21725 @@ -886,7 +961,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
21726                  * Only process the event ring elements whose channel
21727                  * ID is within the maximum supported range.
21728                  */
21729 -               if (chan < mhi_cntrl->max_chan) {
21730 +               if (chan < mhi_cntrl->max_chan &&
21731 +                   mhi_cntrl->mhi_chan[chan].configured) {
21732                         mhi_chan = &mhi_cntrl->mhi_chan[chan];
21734                         if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
21735 @@ -900,7 +976,15 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
21737                 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
21738                 local_rp = ev_ring->rp;
21739 -               dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
21741 +               ptr = er_ctxt->rp;
21742 +               if (!is_valid_ring_ptr(ev_ring, ptr)) {
21743 +                       dev_err(&mhi_cntrl->mhi_dev->dev,
21744 +                               "Event ring rp points outside of the event ring\n");
21745 +                       return -EIO;
21746 +               }
21748 +               dev_rp = mhi_to_virtual(ev_ring, ptr);
21749                 count++;
21750         }
21751         read_lock_bh(&mhi_cntrl->pm_lock);
21752 @@ -1004,9 +1088,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
21753         if (unlikely(ret))
21754                 goto exit_unlock;
21756 -       /* trigger M3 exit if necessary */
21757 -       if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
21758 -               mhi_trigger_resume(mhi_cntrl);
21759 +       /* Packet is queued, take a usage ref to exit M3 if necessary
21760 +        * for host->device buffer, balanced put is done on buffer completion
21761 +        * for device->host buffer, balanced put is after ringing the DB
21762 +        */
21763 +       mhi_cntrl->runtime_get(mhi_cntrl);
21765         /* Assert dev_wake (to exit/prevent M1/M2)*/
21766         mhi_cntrl->wake_toggle(mhi_cntrl);
21767 @@ -1014,12 +1100,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
21768         if (mhi_chan->dir == DMA_TO_DEVICE)
21769                 atomic_inc(&mhi_cntrl->pending_pkts);
21771 -       if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
21772 -               ret = -EIO;
21773 -               goto exit_unlock;
21774 -       }
21775 +       if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
21776 +               mhi_ring_chan_db(mhi_cntrl, mhi_chan);
21778 -       mhi_ring_chan_db(mhi_cntrl, mhi_chan);
21779 +       if (dir == DMA_FROM_DEVICE)
21780 +               mhi_cntrl->runtime_put(mhi_cntrl);
21782  exit_unlock:
21783         read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
21784 @@ -1365,6 +1450,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
21785         struct mhi_ring *ev_ring;
21786         struct device *dev = &mhi_cntrl->mhi_dev->dev;
21787         unsigned long flags;
21788 +       dma_addr_t ptr;
21790         dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
21792 @@ -1372,7 +1458,15 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
21794         /* mark all stale events related to channel as STALE event */
21795         spin_lock_irqsave(&mhi_event->lock, flags);
21796 -       dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
21798 +       ptr = er_ctxt->rp;
21799 +       if (!is_valid_ring_ptr(ev_ring, ptr)) {
21800 +               dev_err(&mhi_cntrl->mhi_dev->dev,
21801 +                       "Event ring rp points outside of the event ring\n");
21802 +               dev_rp = ev_ring->rp;
21803 +       } else {
21804 +               dev_rp = mhi_to_virtual(ev_ring, ptr);
21805 +       }
21807         local_rp = ev_ring->rp;
21808         while (dev_rp != local_rp) {
21809 @@ -1403,8 +1497,11 @@ static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
21810         while (tre_ring->rp != tre_ring->wp) {
21811                 struct mhi_buf_info *buf_info = buf_ring->rp;
21813 -               if (mhi_chan->dir == DMA_TO_DEVICE)
21814 +               if (mhi_chan->dir == DMA_TO_DEVICE) {
21815                         atomic_dec(&mhi_cntrl->pending_pkts);
21816 +                       /* Release the reference got from mhi_queue() */
21817 +                       mhi_cntrl->runtime_put(mhi_cntrl);
21818 +               }
21820                 if (!buf_info->pre_mapped)
21821                         mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
21822 diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
21823 index 681960c72d2a..277704af7eb6 100644
21824 --- a/drivers/bus/mhi/core/pm.c
21825 +++ b/drivers/bus/mhi/core/pm.c
21826 @@ -377,24 +377,28 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
21828         struct mhi_event *mhi_event;
21829         struct device *dev = &mhi_cntrl->mhi_dev->dev;
21830 +       enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
21831         int i, ret;
21833         dev_dbg(dev, "Processing Mission Mode transition\n");
21835         write_lock_irq(&mhi_cntrl->pm_lock);
21836         if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
21837 -               mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
21838 +               ee = mhi_get_exec_env(mhi_cntrl);
21840 -       if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
21841 +       if (!MHI_IN_MISSION_MODE(ee)) {
21842                 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
21843                 write_unlock_irq(&mhi_cntrl->pm_lock);
21844                 wake_up_all(&mhi_cntrl->state_event);
21845                 return -EIO;
21846         }
21847 +       mhi_cntrl->ee = ee;
21848         write_unlock_irq(&mhi_cntrl->pm_lock);
21850         wake_up_all(&mhi_cntrl->state_event);
21852 +       device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
21853 +                             mhi_destroy_device);
21854         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
21856         /* Force MHI to be in M0 state before continuing */
21857 @@ -1092,7 +1096,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
21858                                                            &val) ||
21859                                         !val,
21860                                 msecs_to_jiffies(mhi_cntrl->timeout_ms));
21861 -               if (ret) {
21862 +               if (!ret) {
21863                         ret = -EIO;
21864                         dev_info(dev, "Failed to reset MHI due to syserr state\n");
21865                         goto error_bhi_offset;
21866 diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
21867 index 20673a4b4a3c..ef549c695b55 100644
21868 --- a/drivers/bus/mhi/pci_generic.c
21869 +++ b/drivers/bus/mhi/pci_generic.c
21870 @@ -230,6 +230,21 @@ static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
21871         }
21874 +static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
21876 +       /* no-op */
21879 +static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
21881 +       /* no-op */
21884 +static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
21886 +       /* no-op */
21889  static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
21891         struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
21892 @@ -433,6 +448,9 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
21893         mhi_cntrl->status_cb = mhi_pci_status_cb;
21894         mhi_cntrl->runtime_get = mhi_pci_runtime_get;
21895         mhi_cntrl->runtime_put = mhi_pci_runtime_put;
21896 +       mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
21897 +       mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
21898 +       mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
21900         err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
21901         if (err)
21902 @@ -498,6 +516,12 @@ static void mhi_pci_remove(struct pci_dev *pdev)
21903         mhi_unregister_controller(mhi_cntrl);
21906 +static void mhi_pci_shutdown(struct pci_dev *pdev)
21908 +       mhi_pci_remove(pdev);
21909 +       pci_set_power_state(pdev, PCI_D3hot);
21912  static void mhi_pci_reset_prepare(struct pci_dev *pdev)
21914         struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
21915 @@ -668,6 +692,7 @@ static struct pci_driver mhi_pci_driver = {
21916         .id_table       = mhi_pci_id_table,
21917         .probe          = mhi_pci_probe,
21918         .remove         = mhi_pci_remove,
21919 +       .shutdown       = mhi_pci_shutdown,
21920         .err_handler    = &mhi_pci_err_handler,
21921         .driver.pm      = &mhi_pci_pm_ops
21922  };
21923 diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
21924 index 03ddcf426887..0b8f53a688b8 100644
21925 --- a/drivers/bus/qcom-ebi2.c
21926 +++ b/drivers/bus/qcom-ebi2.c
21927 @@ -353,8 +353,10 @@ static int qcom_ebi2_probe(struct platform_device *pdev)
21929                 /* Figure out the chipselect */
21930                 ret = of_property_read_u32(child, "reg", &csindex);
21931 -               if (ret)
21932 +               if (ret) {
21933 +                       of_node_put(child);
21934                         return ret;
21935 +               }
21937                 if (csindex > 5) {
21938                         dev_err(dev,
21939 diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
21940 index 3d74f237f005..68145e326eb9 100644
21941 --- a/drivers/bus/ti-sysc.c
21942 +++ b/drivers/bus/ti-sysc.c
21943 @@ -635,6 +635,51 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
21944         return 0;
21947 +/* Interconnect instances to probe before l4_per instances */
21948 +static struct resource early_bus_ranges[] = {
21949 +       /* am3/4 l4_wkup */
21950 +       { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
21951 +       /* omap4/5 and dra7 l4_cfg */
21952 +       { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
21953 +       /* omap4 l4_wkup */
21954 +       { .start = 0x4a300000, .end = 0x4a300000 + 0x30000,  },
21955 +       /* omap5 and dra7 l4_wkup without dra7 dcan segment */
21956 +       { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000,  },
21959 +static atomic_t sysc_defer = ATOMIC_INIT(10);
21961 +/**
21962 + * sysc_defer_non_critical - defer non_critical interconnect probing
21963 + * @ddata: device driver data
21964 + *
21965 + * We want to probe l4_cfg and l4_wkup interconnect instances before any
21966 + * l4_per instances as l4_per instances depend on resources on l4_cfg and
21967 + * l4_wkup interconnects.
21968 + */
21969 +static int sysc_defer_non_critical(struct sysc *ddata)
21971 +       struct resource *res;
21972 +       int i;
21974 +       if (!atomic_read(&sysc_defer))
21975 +               return 0;
21977 +       for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
21978 +               res = &early_bus_ranges[i];
21979 +               if (ddata->module_pa >= res->start &&
21980 +                   ddata->module_pa <= res->end) {
21981 +                       atomic_set(&sysc_defer, 0);
21983 +                       return 0;
21984 +               }
21985 +       }
21987 +       atomic_dec_if_positive(&sysc_defer);
21989 +       return -EPROBE_DEFER;
21992  static struct device_node *stdout_path;
21994  static void sysc_init_stdout_path(struct sysc *ddata)
21995 @@ -856,15 +901,19 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
21996         struct device_node *np = ddata->dev->of_node;
21997         int error;
21999 -       if (!of_get_property(np, "reg", NULL))
22000 -               return 0;
22002         error = sysc_parse_and_check_child_range(ddata);
22003         if (error)
22004                 return error;
22006 +       error = sysc_defer_non_critical(ddata);
22007 +       if (error)
22008 +               return error;
22010         sysc_check_children(ddata);
22012 +       if (!of_get_property(np, "reg", NULL))
22013 +               return 0;
22015         error = sysc_parse_registers(ddata);
22016         if (error)
22017                 return error;
22018 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
22019 index c44ad18464f1..ca87178200e0 100644
22020 --- a/drivers/char/ipmi/ipmi_msghandler.c
22021 +++ b/drivers/char/ipmi/ipmi_msghandler.c
22022 @@ -3563,7 +3563,7 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf)
22023         /* Current message first, to preserve order */
22024         while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
22025                 /* Wait for the message to clear out. */
22026 -               schedule_timeout(1);
22027 +               schedule_min_hrtimeout();
22028         }
22030         /* No need for locks, the interface is down. */
22031 diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
22032 index 0416b9c9d410..9ce5fae0f1cf 100644
22033 --- a/drivers/char/ipmi/ipmi_ssif.c
22034 +++ b/drivers/char/ipmi/ipmi_ssif.c
22035 @@ -1288,7 +1288,7 @@ static void shutdown_ssif(void *send_info)
22037         /* make sure the driver is not looking for flags any more. */
22038         while (ssif_info->ssif_state != SSIF_NORMAL)
22039 -               schedule_timeout(1);
22040 +               schedule_min_hrtimeout();
22042         ssif_info->stopping = true;
22043         del_timer_sync(&ssif_info->watch_timer);
22044 diff --git a/drivers/char/random.c b/drivers/char/random.c
22045 index 0fe9e200e4c8..5d6acfecd919 100644
22046 --- a/drivers/char/random.c
22047 +++ b/drivers/char/random.c
22048 @@ -819,7 +819,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
22050  static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
22052 -       memcpy(&crng->state[0], "expand 32-byte k", 16);
22053 +       chacha_init_consts(crng->state);
22054         _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
22055         crng_init_try_arch(crng);
22056         crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
22057 @@ -827,7 +827,7 @@ static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
22059  static void __init crng_initialize_primary(struct crng_state *crng)
22061 -       memcpy(&crng->state[0], "expand 32-byte k", 16);
22062 +       chacha_init_consts(crng->state);
22063         _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
22064         if (crng_init_try_arch_early(crng) && trust_cpu) {
22065                 invalidate_batched_entropy();
22066 diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
22067 index 3633ed70f48f..1b18ce5ebab1 100644
22068 --- a/drivers/char/tpm/eventlog/acpi.c
22069 +++ b/drivers/char/tpm/eventlog/acpi.c
22070 @@ -41,6 +41,27 @@ struct acpi_tcpa {
22071         };
22072  };
22074 +/* Check that the given log is indeed a TPM2 log. */
22075 +static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
22077 +       struct tcg_efi_specid_event_head *efispecid;
22078 +       struct tcg_pcr_event *event_header;
22079 +       int n;
22081 +       if (len < sizeof(*event_header))
22082 +               return false;
22083 +       len -= sizeof(*event_header);
22084 +       event_header = bios_event_log;
22086 +       if (len < sizeof(*efispecid))
22087 +               return false;
22088 +       efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
22090 +       n = memcmp(efispecid->signature, TCG_SPECID_SIG,
22091 +                  sizeof(TCG_SPECID_SIG));
22092 +       return n == 0;
22095  /* read binary bios log */
22096  int tpm_read_log_acpi(struct tpm_chip *chip)
22098 @@ -52,6 +73,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
22099         struct acpi_table_tpm2 *tbl;
22100         struct acpi_tpm2_phy *tpm2_phy;
22101         int format;
22102 +       int ret;
22104         log = &chip->log;
22106 @@ -112,6 +134,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
22108         log->bios_event_log_end = log->bios_event_log + len;
22110 +       ret = -EIO;
22111         virt = acpi_os_map_iomem(start, len);
22112         if (!virt)
22113                 goto err;
22114 @@ -119,11 +142,19 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
22115         memcpy_fromio(log->bios_event_log, virt, len);
22117         acpi_os_unmap_iomem(virt, len);
22119 +       if (chip->flags & TPM_CHIP_FLAG_TPM2 &&
22120 +           !tpm_is_tpm2_log(log->bios_event_log, len)) {
22121 +               /* try EFI log next */
22122 +               ret = -ENODEV;
22123 +               goto err;
22124 +       }
22126         return format;
22128  err:
22129         kfree(log->bios_event_log);
22130         log->bios_event_log = NULL;
22131 -       return -EIO;
22132 +       return ret;
22135 diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
22136 index 7460f230bae4..8512ec76d526 100644
22137 --- a/drivers/char/tpm/eventlog/common.c
22138 +++ b/drivers/char/tpm/eventlog/common.c
22139 @@ -107,6 +107,9 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
22140         int log_version;
22141         int rc = 0;
22143 +       if (chip->flags & TPM_CHIP_FLAG_VIRTUAL)
22144 +               return;
22146         rc = tpm_read_log(chip);
22147         if (rc < 0)
22148                 return;
22149 diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
22150 index 35229e5143ca..e6cb9d525e30 100644
22151 --- a/drivers/char/tpm/eventlog/efi.c
22152 +++ b/drivers/char/tpm/eventlog/efi.c
22153 @@ -17,6 +17,7 @@ int tpm_read_log_efi(struct tpm_chip *chip)
22156         struct efi_tcg2_final_events_table *final_tbl = NULL;
22157 +       int final_events_log_size = efi_tpm_final_log_size;
22158         struct linux_efi_tpm_eventlog *log_tbl;
22159         struct tpm_bios_log *log;
22160         u32 log_size;
22161 @@ -66,12 +67,12 @@ int tpm_read_log_efi(struct tpm_chip *chip)
22162         ret = tpm_log_version;
22164         if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
22165 -           efi_tpm_final_log_size == 0 ||
22166 +           final_events_log_size == 0 ||
22167             tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
22168                 goto out;
22170         final_tbl = memremap(efi.tpm_final_log,
22171 -                            sizeof(*final_tbl) + efi_tpm_final_log_size,
22172 +                            sizeof(*final_tbl) + final_events_log_size,
22173                              MEMREMAP_WB);
22174         if (!final_tbl) {
22175                 pr_err("Could not map UEFI TPM final log\n");
22176 @@ -80,10 +81,18 @@ int tpm_read_log_efi(struct tpm_chip *chip)
22177                 goto out;
22178         }
22180 -       efi_tpm_final_log_size -= log_tbl->final_events_preboot_size;
22181 +       /*
22182 +        * The 'final events log' size excludes the 'final events preboot log'
22183 +        * at its beginning.
22184 +        */
22185 +       final_events_log_size -= log_tbl->final_events_preboot_size;
22187 +       /*
22188 +        * Allocate memory for the 'combined log' where we will append the
22189 +        * 'final events log' to.
22190 +        */
22191         tmp = krealloc(log->bios_event_log,
22192 -                      log_size + efi_tpm_final_log_size,
22193 +                      log_size + final_events_log_size,
22194                        GFP_KERNEL);
22195         if (!tmp) {
22196                 kfree(log->bios_event_log);
22197 @@ -94,15 +103,19 @@ int tpm_read_log_efi(struct tpm_chip *chip)
22198         log->bios_event_log = tmp;
22200         /*
22201 -        * Copy any of the final events log that didn't also end up in the
22202 -        * main log. Events can be logged in both if events are generated
22203 +        * Append any of the 'final events log' that didn't also end up in the
22204 +        * 'main log'. Events can be logged in both if events are generated
22205          * between GetEventLog() and ExitBootServices().
22206          */
22207         memcpy((void *)log->bios_event_log + log_size,
22208                final_tbl->events + log_tbl->final_events_preboot_size,
22209 -              efi_tpm_final_log_size);
22210 +              final_events_log_size);
22211 +       /*
22212 +        * The size of the 'combined log' is the size of the 'main log' plus
22213 +        * the size of the 'final events log'.
22214 +        */
22215         log->bios_event_log_end = log->bios_event_log +
22216 -               log_size + efi_tpm_final_log_size;
22217 +               log_size + final_events_log_size;
22219  out:
22220         memunmap(final_tbl);
22221 diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
22222 index eff1f12d981a..c84d23951219 100644
22223 --- a/drivers/char/tpm/tpm2-cmd.c
22224 +++ b/drivers/char/tpm/tpm2-cmd.c
22225 @@ -656,6 +656,7 @@ int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
22227         if (nr_commands !=
22228             be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) {
22229 +               rc = -EFAULT;
22230                 tpm_buf_destroy(&buf);
22231                 goto out;
22232         }
22233 diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
22234 index a2e0395cbe61..55b9d3965ae1 100644
22235 --- a/drivers/char/tpm/tpm_tis_core.c
22236 +++ b/drivers/char/tpm/tpm_tis_core.c
22237 @@ -709,16 +709,14 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
22238         cap_t cap;
22239         int ret;
22241 -       /* TPM 2.0 */
22242 -       if (chip->flags & TPM_CHIP_FLAG_TPM2)
22243 -               return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
22245 -       /* TPM 1.2 */
22246         ret = request_locality(chip, 0);
22247         if (ret < 0)
22248                 return ret;
22250 -       ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
22251 +       if (chip->flags & TPM_CHIP_FLAG_TPM2)
22252 +               ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
22253 +       else
22254 +               ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
22256         release_locality(chip, 0);
22258 @@ -1127,12 +1125,20 @@ int tpm_tis_resume(struct device *dev)
22259         if (ret)
22260                 return ret;
22262 -       /* TPM 1.2 requires self-test on resume. This function actually returns
22263 +       /*
22264 +        * TPM 1.2 requires self-test on resume. This function actually returns
22265          * an error code but for unknown reason it isn't handled.
22266          */
22267 -       if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
22268 +       if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
22269 +               ret = request_locality(chip, 0);
22270 +               if (ret < 0)
22271 +                       return ret;
22273                 tpm1_do_selftest(chip);
22275 +               release_locality(chip, 0);
22276 +       }
22278         return 0;
22280  EXPORT_SYMBOL_GPL(tpm_tis_resume);
22281 diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
22282 index ec9a65e7887d..f19c227d20f4 100644
22283 --- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
22284 +++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
22285 @@ -483,6 +483,7 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
22286         expected = be32_to_cpup((__be32 *)(buf + 2));
22287         if (expected > buf_len) {
22288                 dev_err(&chip->dev, "Buffer too small to receive i2c data\n");
22289 +               rc = -E2BIG;
22290                 goto out_err;
22291         }
22293 diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
22294 index 6a0059e508e3..93f5d11c830b 100644
22295 --- a/drivers/char/ttyprintk.c
22296 +++ b/drivers/char/ttyprintk.c
22297 @@ -158,12 +158,23 @@ static int tpk_ioctl(struct tty_struct *tty,
22298         return 0;
22302 + * TTY operations hangup function.
22303 + */
22304 +static void tpk_hangup(struct tty_struct *tty)
22306 +       struct ttyprintk_port *tpkp = tty->driver_data;
22308 +       tty_port_hangup(&tpkp->port);
22311  static const struct tty_operations ttyprintk_ops = {
22312         .open = tpk_open,
22313         .close = tpk_close,
22314         .write = tpk_write,
22315         .write_room = tpk_write_room,
22316         .ioctl = tpk_ioctl,
22317 +       .hangup = tpk_hangup,
22318  };
22320  static const struct tty_port_operations null_ops = { };
22321 diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
22322 index a55b37fc2c8b..bc3be5f3eae1 100644
22323 --- a/drivers/clk/clk-ast2600.c
22324 +++ b/drivers/clk/clk-ast2600.c
22325 @@ -61,10 +61,10 @@ static void __iomem *scu_g6_base;
22326  static const struct aspeed_gate_data aspeed_g6_gates[] = {
22327         /*                                  clk rst  name               parent   flags */
22328         [ASPEED_CLK_GATE_MCLK]          = {  0, -1, "mclk-gate",        "mpll",  CLK_IS_CRITICAL }, /* SDRAM */
22329 -       [ASPEED_CLK_GATE_ECLK]          = {  1, -1, "eclk-gate",        "eclk",  0 },   /* Video Engine */
22330 +       [ASPEED_CLK_GATE_ECLK]          = {  1,  6, "eclk-gate",        "eclk",  0 },   /* Video Engine */
22331         [ASPEED_CLK_GATE_GCLK]          = {  2,  7, "gclk-gate",        NULL,    0 },   /* 2D engine */
22332         /* vclk parent - dclk/d1clk/hclk/mclk */
22333 -       [ASPEED_CLK_GATE_VCLK]          = {  3,  6, "vclk-gate",        NULL,    0 },   /* Video Capture */
22334 +       [ASPEED_CLK_GATE_VCLK]          = {  3, -1, "vclk-gate",        NULL,    0 },   /* Video Capture */
22335         [ASPEED_CLK_GATE_BCLK]          = {  4,  8, "bclk-gate",        "bclk",  0 }, /* PCIe/PCI */
22336         /* From dpll */
22337         [ASPEED_CLK_GATE_DCLK]          = {  5, -1, "dclk-gate",        NULL,    CLK_IS_CRITICAL }, /* DAC */
22338 diff --git a/drivers/clk/imx/clk-imx25.c b/drivers/clk/imx/clk-imx25.c
22339 index a66cabfbf94f..66192fe0a898 100644
22340 --- a/drivers/clk/imx/clk-imx25.c
22341 +++ b/drivers/clk/imx/clk-imx25.c
22342 @@ -73,16 +73,6 @@ enum mx25_clks {
22344  static struct clk *clk[clk_max];
22346 -static struct clk ** const uart_clks[] __initconst = {
22347 -       &clk[uart_ipg_per],
22348 -       &clk[uart1_ipg],
22349 -       &clk[uart2_ipg],
22350 -       &clk[uart3_ipg],
22351 -       &clk[uart4_ipg],
22352 -       &clk[uart5_ipg],
22353 -       NULL
22356  static int __init __mx25_clocks_init(void __iomem *ccm_base)
22358         BUG_ON(!ccm_base);
22359 @@ -228,7 +218,7 @@ static int __init __mx25_clocks_init(void __iomem *ccm_base)
22360          */
22361         clk_set_parent(clk[cko_sel], clk[ipg]);
22363 -       imx_register_uart_clocks(uart_clks);
22364 +       imx_register_uart_clocks(6);
22366         return 0;
22368 diff --git a/drivers/clk/imx/clk-imx27.c b/drivers/clk/imx/clk-imx27.c
22369 index 5585ded8b8c6..56a5fc402b10 100644
22370 --- a/drivers/clk/imx/clk-imx27.c
22371 +++ b/drivers/clk/imx/clk-imx27.c
22372 @@ -49,17 +49,6 @@ static const char *ssi_sel_clks[] = { "spll_gate", "mpll", };
22373  static struct clk *clk[IMX27_CLK_MAX];
22374  static struct clk_onecell_data clk_data;
22376 -static struct clk ** const uart_clks[] __initconst = {
22377 -       &clk[IMX27_CLK_PER1_GATE],
22378 -       &clk[IMX27_CLK_UART1_IPG_GATE],
22379 -       &clk[IMX27_CLK_UART2_IPG_GATE],
22380 -       &clk[IMX27_CLK_UART3_IPG_GATE],
22381 -       &clk[IMX27_CLK_UART4_IPG_GATE],
22382 -       &clk[IMX27_CLK_UART5_IPG_GATE],
22383 -       &clk[IMX27_CLK_UART6_IPG_GATE],
22384 -       NULL
22387  static void __init _mx27_clocks_init(unsigned long fref)
22389         BUG_ON(!ccm);
22390 @@ -176,7 +165,7 @@ static void __init _mx27_clocks_init(unsigned long fref)
22392         clk_prepare_enable(clk[IMX27_CLK_EMI_AHB_GATE]);
22394 -       imx_register_uart_clocks(uart_clks);
22395 +       imx_register_uart_clocks(7);
22397         imx_print_silicon_rev("i.MX27", mx27_revision());
22399 diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c
22400 index c1df03665c09..0fe5ac210156 100644
22401 --- a/drivers/clk/imx/clk-imx35.c
22402 +++ b/drivers/clk/imx/clk-imx35.c
22403 @@ -82,14 +82,6 @@ enum mx35_clks {
22405  static struct clk *clk[clk_max];
22407 -static struct clk ** const uart_clks[] __initconst = {
22408 -       &clk[ipg],
22409 -       &clk[uart1_gate],
22410 -       &clk[uart2_gate],
22411 -       &clk[uart3_gate],
22412 -       NULL
22415  static void __init _mx35_clocks_init(void)
22417         void __iomem *base;
22418 @@ -243,7 +235,7 @@ static void __init _mx35_clocks_init(void)
22419          */
22420         clk_prepare_enable(clk[scc_gate]);
22422 -       imx_register_uart_clocks(uart_clks);
22423 +       imx_register_uart_clocks(4);
22425         imx_print_silicon_rev("i.MX35", mx35_revision());
22427 diff --git a/drivers/clk/imx/clk-imx5.c b/drivers/clk/imx/clk-imx5.c
22428 index 01e079b81026..e4493846454d 100644
22429 --- a/drivers/clk/imx/clk-imx5.c
22430 +++ b/drivers/clk/imx/clk-imx5.c
22431 @@ -128,30 +128,6 @@ static const char *ieee1588_sels[] = { "pll3_sw", "pll4_sw", "dummy" /* usbphy2_
22432  static struct clk *clk[IMX5_CLK_END];
22433  static struct clk_onecell_data clk_data;
22435 -static struct clk ** const uart_clks_mx51[] __initconst = {
22436 -       &clk[IMX5_CLK_UART1_IPG_GATE],
22437 -       &clk[IMX5_CLK_UART1_PER_GATE],
22438 -       &clk[IMX5_CLK_UART2_IPG_GATE],
22439 -       &clk[IMX5_CLK_UART2_PER_GATE],
22440 -       &clk[IMX5_CLK_UART3_IPG_GATE],
22441 -       &clk[IMX5_CLK_UART3_PER_GATE],
22442 -       NULL
22445 -static struct clk ** const uart_clks_mx50_mx53[] __initconst = {
22446 -       &clk[IMX5_CLK_UART1_IPG_GATE],
22447 -       &clk[IMX5_CLK_UART1_PER_GATE],
22448 -       &clk[IMX5_CLK_UART2_IPG_GATE],
22449 -       &clk[IMX5_CLK_UART2_PER_GATE],
22450 -       &clk[IMX5_CLK_UART3_IPG_GATE],
22451 -       &clk[IMX5_CLK_UART3_PER_GATE],
22452 -       &clk[IMX5_CLK_UART4_IPG_GATE],
22453 -       &clk[IMX5_CLK_UART4_PER_GATE],
22454 -       &clk[IMX5_CLK_UART5_IPG_GATE],
22455 -       &clk[IMX5_CLK_UART5_PER_GATE],
22456 -       NULL
22459  static void __init mx5_clocks_common_init(void __iomem *ccm_base)
22461         clk[IMX5_CLK_DUMMY]             = imx_clk_fixed("dummy", 0);
22462 @@ -382,7 +358,7 @@ static void __init mx50_clocks_init(struct device_node *np)
22463         r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
22464         clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
22466 -       imx_register_uart_clocks(uart_clks_mx50_mx53);
22467 +       imx_register_uart_clocks(5);
22469  CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init);
22471 @@ -488,7 +464,7 @@ static void __init mx51_clocks_init(struct device_node *np)
22472         val |= 1 << 23;
22473         writel(val, MXC_CCM_CLPCR);
22475 -       imx_register_uart_clocks(uart_clks_mx51);
22476 +       imx_register_uart_clocks(3);
22478  CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init);
22480 @@ -633,6 +609,6 @@ static void __init mx53_clocks_init(struct device_node *np)
22481         r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
22482         clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
22484 -       imx_register_uart_clocks(uart_clks_mx50_mx53);
22485 +       imx_register_uart_clocks(5);
22487  CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init);
22488 diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
22489 index 521d6136d22c..496900de0b0b 100644
22490 --- a/drivers/clk/imx/clk-imx6q.c
22491 +++ b/drivers/clk/imx/clk-imx6q.c
22492 @@ -140,13 +140,6 @@ static inline int clk_on_imx6dl(void)
22493         return of_machine_is_compatible("fsl,imx6dl");
22496 -static const int uart_clk_ids[] __initconst = {
22497 -       IMX6QDL_CLK_UART_IPG,
22498 -       IMX6QDL_CLK_UART_SERIAL,
22501 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
22503  static int ldb_di_sel_by_clock_id(int clock_id)
22505         switch (clock_id) {
22506 @@ -440,7 +433,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
22507         struct device_node *np;
22508         void __iomem *anatop_base, *base;
22509         int ret;
22510 -       int i;
22512         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
22513                                           IMX6QDL_CLK_END), GFP_KERNEL);
22514 @@ -982,12 +974,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
22515                                hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk);
22516         }
22518 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
22519 -               int index = uart_clk_ids[i];
22521 -               uart_clks[i] = &hws[index]->clk;
22522 -       }
22524 -       imx_register_uart_clocks(uart_clks);
22525 +       imx_register_uart_clocks(1);
22527  CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
22528 diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
22529 index 29eab05c9068..277365970320 100644
22530 --- a/drivers/clk/imx/clk-imx6sl.c
22531 +++ b/drivers/clk/imx/clk-imx6sl.c
22532 @@ -179,19 +179,11 @@ void imx6sl_set_wait_clk(bool enter)
22533                 imx6sl_enable_pll_arm(false);
22536 -static const int uart_clk_ids[] __initconst = {
22537 -       IMX6SL_CLK_UART,
22538 -       IMX6SL_CLK_UART_SERIAL,
22541 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
22543  static void __init imx6sl_clocks_init(struct device_node *ccm_node)
22545         struct device_node *np;
22546         void __iomem *base;
22547         int ret;
22548 -       int i;
22550         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
22551                                           IMX6SL_CLK_END), GFP_KERNEL);
22552 @@ -448,12 +440,6 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
22553         clk_set_parent(hws[IMX6SL_CLK_LCDIF_AXI_SEL]->clk,
22554                        hws[IMX6SL_CLK_PLL2_PFD2]->clk);
22556 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
22557 -               int index = uart_clk_ids[i];
22559 -               uart_clks[i] = &hws[index]->clk;
22560 -       }
22562 -       imx_register_uart_clocks(uart_clks);
22563 +       imx_register_uart_clocks(2);
22565  CLK_OF_DECLARE(imx6sl, "fsl,imx6sl-ccm", imx6sl_clocks_init);
22566 diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c
22567 index 8e8288bda4d0..31d777f30039 100644
22568 --- a/drivers/clk/imx/clk-imx6sll.c
22569 +++ b/drivers/clk/imx/clk-imx6sll.c
22570 @@ -76,26 +76,10 @@ static u32 share_count_ssi1;
22571  static u32 share_count_ssi2;
22572  static u32 share_count_ssi3;
22574 -static const int uart_clk_ids[] __initconst = {
22575 -       IMX6SLL_CLK_UART1_IPG,
22576 -       IMX6SLL_CLK_UART1_SERIAL,
22577 -       IMX6SLL_CLK_UART2_IPG,
22578 -       IMX6SLL_CLK_UART2_SERIAL,
22579 -       IMX6SLL_CLK_UART3_IPG,
22580 -       IMX6SLL_CLK_UART3_SERIAL,
22581 -       IMX6SLL_CLK_UART4_IPG,
22582 -       IMX6SLL_CLK_UART4_SERIAL,
22583 -       IMX6SLL_CLK_UART5_IPG,
22584 -       IMX6SLL_CLK_UART5_SERIAL,
22587 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
22589  static void __init imx6sll_clocks_init(struct device_node *ccm_node)
22591         struct device_node *np;
22592         void __iomem *base;
22593 -       int i;
22595         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
22596                                           IMX6SLL_CLK_END), GFP_KERNEL);
22597 @@ -356,13 +340,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
22599         of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
22601 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
22602 -               int index = uart_clk_ids[i];
22604 -               uart_clks[i] = &hws[index]->clk;
22605 -       }
22607 -       imx_register_uart_clocks(uart_clks);
22608 +       imx_register_uart_clocks(5);
22610         /* Lower the AHB clock rate before changing the clock source. */
22611         clk_set_rate(hws[IMX6SLL_CLK_AHB]->clk, 99000000);
22612 diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
22613 index 20dcce526d07..fc1bd23d4583 100644
22614 --- a/drivers/clk/imx/clk-imx6sx.c
22615 +++ b/drivers/clk/imx/clk-imx6sx.c
22616 @@ -117,18 +117,10 @@ static u32 share_count_ssi3;
22617  static u32 share_count_sai1;
22618  static u32 share_count_sai2;
22620 -static const int uart_clk_ids[] __initconst = {
22621 -       IMX6SX_CLK_UART_IPG,
22622 -       IMX6SX_CLK_UART_SERIAL,
22625 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
22627  static void __init imx6sx_clocks_init(struct device_node *ccm_node)
22629         struct device_node *np;
22630         void __iomem *base;
22631 -       int i;
22633         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
22634                                           IMX6SX_CLK_CLK_END), GFP_KERNEL);
22635 @@ -556,12 +548,6 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
22636         clk_set_parent(hws[IMX6SX_CLK_QSPI1_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk);
22637         clk_set_parent(hws[IMX6SX_CLK_QSPI2_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk);
22639 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
22640 -               int index = uart_clk_ids[i];
22642 -               uart_clks[i] = &hws[index]->clk;
22643 -       }
22645 -       imx_register_uart_clocks(uart_clks);
22646 +       imx_register_uart_clocks(2);
22648  CLK_OF_DECLARE(imx6sx, "fsl,imx6sx-ccm", imx6sx_clocks_init);
22649 diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
22650 index 22d24a6a05e7..c4e0f1c07192 100644
22651 --- a/drivers/clk/imx/clk-imx7d.c
22652 +++ b/drivers/clk/imx/clk-imx7d.c
22653 @@ -377,23 +377,10 @@ static const char *pll_video_bypass_sel[] = { "pll_video_main", "pll_video_main_
22654  static struct clk_hw **hws;
22655  static struct clk_hw_onecell_data *clk_hw_data;
22657 -static const int uart_clk_ids[] __initconst = {
22658 -       IMX7D_UART1_ROOT_CLK,
22659 -       IMX7D_UART2_ROOT_CLK,
22660 -       IMX7D_UART3_ROOT_CLK,
22661 -       IMX7D_UART4_ROOT_CLK,
22662 -       IMX7D_UART5_ROOT_CLK,
22663 -       IMX7D_UART6_ROOT_CLK,
22664 -       IMX7D_UART7_ROOT_CLK,
22667 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
22669  static void __init imx7d_clocks_init(struct device_node *ccm_node)
22671         struct device_node *np;
22672         void __iomem *base;
22673 -       int i;
22675         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
22676                                           IMX7D_CLK_END), GFP_KERNEL);
22677 @@ -897,14 +884,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
22678         hws[IMX7D_USB1_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb1_main_clk", "osc", 20, 1);
22679         hws[IMX7D_USB_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb_main_clk", "osc", 20, 1);
22681 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
22682 -               int index = uart_clk_ids[i];
22684 -               uart_clks[i] = &hws[index]->clk;
22685 -       }
22688 -       imx_register_uart_clocks(uart_clks);
22689 +       imx_register_uart_clocks(7);
22692  CLK_OF_DECLARE(imx7d, "fsl,imx7d-ccm", imx7d_clocks_init);
22693 diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
22694 index 634c0b6636b0..779e09105da7 100644
22695 --- a/drivers/clk/imx/clk-imx7ulp.c
22696 +++ b/drivers/clk/imx/clk-imx7ulp.c
22697 @@ -43,19 +43,6 @@ static const struct clk_div_table ulp_div_table[] = {
22698         { /* sentinel */ },
22699  };
22701 -static const int pcc2_uart_clk_ids[] __initconst = {
22702 -       IMX7ULP_CLK_LPUART4,
22703 -       IMX7ULP_CLK_LPUART5,
22706 -static const int pcc3_uart_clk_ids[] __initconst = {
22707 -       IMX7ULP_CLK_LPUART6,
22708 -       IMX7ULP_CLK_LPUART7,
22711 -static struct clk **pcc2_uart_clks[ARRAY_SIZE(pcc2_uart_clk_ids) + 1] __initdata;
22712 -static struct clk **pcc3_uart_clks[ARRAY_SIZE(pcc3_uart_clk_ids) + 1] __initdata;
22714  static void __init imx7ulp_clk_scg1_init(struct device_node *np)
22716         struct clk_hw_onecell_data *clk_data;
22717 @@ -150,7 +137,6 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
22718         struct clk_hw_onecell_data *clk_data;
22719         struct clk_hw **hws;
22720         void __iomem *base;
22721 -       int i;
22723         clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC2_END),
22724                            GFP_KERNEL);
22725 @@ -190,13 +176,7 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
22727         of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
22729 -       for (i = 0; i < ARRAY_SIZE(pcc2_uart_clk_ids); i++) {
22730 -               int index = pcc2_uart_clk_ids[i];
22732 -               pcc2_uart_clks[i] = &hws[index]->clk;
22733 -       }
22735 -       imx_register_uart_clocks(pcc2_uart_clks);
22736 +       imx_register_uart_clocks(2);
22738  CLK_OF_DECLARE(imx7ulp_clk_pcc2, "fsl,imx7ulp-pcc2", imx7ulp_clk_pcc2_init);
22740 @@ -205,7 +185,6 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
22741         struct clk_hw_onecell_data *clk_data;
22742         struct clk_hw **hws;
22743         void __iomem *base;
22744 -       int i;
22746         clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC3_END),
22747                            GFP_KERNEL);
22748 @@ -244,13 +223,7 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
22750         of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
22752 -       for (i = 0; i < ARRAY_SIZE(pcc3_uart_clk_ids); i++) {
22753 -               int index = pcc3_uart_clk_ids[i];
22755 -               pcc3_uart_clks[i] = &hws[index]->clk;
22756 -       }
22758 -       imx_register_uart_clocks(pcc3_uart_clks);
22759 +       imx_register_uart_clocks(7);
22761  CLK_OF_DECLARE(imx7ulp_clk_pcc3, "fsl,imx7ulp-pcc3", imx7ulp_clk_pcc3_init);
22763 diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
22764 index 6a01eec36dd0..f1919fafb124 100644
22765 --- a/drivers/clk/imx/clk-imx8mm.c
22766 +++ b/drivers/clk/imx/clk-imx8mm.c
22767 @@ -296,20 +296,12 @@ static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "
22768  static struct clk_hw_onecell_data *clk_hw_data;
22769  static struct clk_hw **hws;
22771 -static const int uart_clk_ids[] = {
22772 -       IMX8MM_CLK_UART1_ROOT,
22773 -       IMX8MM_CLK_UART2_ROOT,
22774 -       IMX8MM_CLK_UART3_ROOT,
22775 -       IMX8MM_CLK_UART4_ROOT,
22777 -static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
22779  static int imx8mm_clocks_probe(struct platform_device *pdev)
22781         struct device *dev = &pdev->dev;
22782         struct device_node *np = dev->of_node;
22783         void __iomem *base;
22784 -       int ret, i;
22785 +       int ret;
22787         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
22788                                           IMX8MM_CLK_END), GFP_KERNEL);
22789 @@ -634,13 +626,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
22790                 goto unregister_hws;
22791         }
22793 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
22794 -               int index = uart_clk_ids[i];
22796 -               uart_hws[i] = &hws[index]->clk;
22797 -       }
22799 -       imx_register_uart_clocks(uart_hws);
22800 +       imx_register_uart_clocks(4);
22802         return 0;
22804 diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
22805 index 324c5fd0aa04..88f6630cd472 100644
22806 --- a/drivers/clk/imx/clk-imx8mn.c
22807 +++ b/drivers/clk/imx/clk-imx8mn.c
22808 @@ -289,20 +289,12 @@ static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "
22809  static struct clk_hw_onecell_data *clk_hw_data;
22810  static struct clk_hw **hws;
22812 -static const int uart_clk_ids[] = {
22813 -       IMX8MN_CLK_UART1_ROOT,
22814 -       IMX8MN_CLK_UART2_ROOT,
22815 -       IMX8MN_CLK_UART3_ROOT,
22816 -       IMX8MN_CLK_UART4_ROOT,
22818 -static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
22820  static int imx8mn_clocks_probe(struct platform_device *pdev)
22822         struct device *dev = &pdev->dev;
22823         struct device_node *np = dev->of_node;
22824         void __iomem *base;
22825 -       int ret, i;
22826 +       int ret;
22828         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
22829                                           IMX8MN_CLK_END), GFP_KERNEL);
22830 @@ -585,13 +577,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
22831                 goto unregister_hws;
22832         }
22834 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
22835 -               int index = uart_clk_ids[i];
22837 -               uart_hws[i] = &hws[index]->clk;
22838 -       }
22840 -       imx_register_uart_clocks(uart_hws);
22841 +       imx_register_uart_clocks(4);
22843         return 0;
22845 diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
22846 index 2f4e1d674e1c..3e6557e7d559 100644
22847 --- a/drivers/clk/imx/clk-imx8mp.c
22848 +++ b/drivers/clk/imx/clk-imx8mp.c
22849 @@ -414,20 +414,11 @@ static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_r
22850  static struct clk_hw **hws;
22851  static struct clk_hw_onecell_data *clk_hw_data;
22853 -static const int uart_clk_ids[] = {
22854 -       IMX8MP_CLK_UART1_ROOT,
22855 -       IMX8MP_CLK_UART2_ROOT,
22856 -       IMX8MP_CLK_UART3_ROOT,
22857 -       IMX8MP_CLK_UART4_ROOT,
22859 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1];
22861  static int imx8mp_clocks_probe(struct platform_device *pdev)
22863         struct device *dev = &pdev->dev;
22864         struct device_node *np;
22865         void __iomem *anatop_base, *ccm_base;
22866 -       int i;
22868         np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
22869         anatop_base = of_iomap(np, 0);
22870 @@ -737,13 +728,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
22872         of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
22874 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
22875 -               int index = uart_clk_ids[i];
22877 -               uart_clks[i] = &hws[index]->clk;
22878 -       }
22880 -       imx_register_uart_clocks(uart_clks);
22881 +       imx_register_uart_clocks(4);
22883         return 0;
22885 diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
22886 index 4dd4ae9d022b..3e1a10d3f55c 100644
22887 --- a/drivers/clk/imx/clk-imx8mq.c
22888 +++ b/drivers/clk/imx/clk-imx8mq.c
22889 @@ -281,20 +281,12 @@ static const char * const pllout_monitor_sels[] = {"osc_25m", "osc_27m", "dummy"
22890  static struct clk_hw_onecell_data *clk_hw_data;
22891  static struct clk_hw **hws;
22893 -static const int uart_clk_ids[] = {
22894 -       IMX8MQ_CLK_UART1_ROOT,
22895 -       IMX8MQ_CLK_UART2_ROOT,
22896 -       IMX8MQ_CLK_UART3_ROOT,
22897 -       IMX8MQ_CLK_UART4_ROOT,
22899 -static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
22901  static int imx8mq_clocks_probe(struct platform_device *pdev)
22903         struct device *dev = &pdev->dev;
22904         struct device_node *np = dev->of_node;
22905         void __iomem *base;
22906 -       int err, i;
22907 +       int err;
22909         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
22910                                           IMX8MQ_CLK_END), GFP_KERNEL);
22911 @@ -629,13 +621,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
22912                 goto unregister_hws;
22913         }
22915 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
22916 -               int index = uart_clk_ids[i];
22918 -               uart_hws[i] = &hws[index]->clk;
22919 -       }
22921 -       imx_register_uart_clocks(uart_hws);
22922 +       imx_register_uart_clocks(4);
22924         return 0;
22926 diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
22927 index 47882c51cb85..7cc669934253 100644
22928 --- a/drivers/clk/imx/clk.c
22929 +++ b/drivers/clk/imx/clk.c
22930 @@ -147,8 +147,10 @@ void imx_cscmr1_fixup(u32 *val)
22933  #ifndef MODULE
22934 -static int imx_keep_uart_clocks;
22935 -static struct clk ** const *imx_uart_clocks;
22937 +static bool imx_keep_uart_clocks;
22938 +static int imx_enabled_uart_clocks;
22939 +static struct clk **imx_uart_clocks;
22941  static int __init imx_keep_uart_clocks_param(char *str)
22943 @@ -161,24 +163,45 @@ __setup_param("earlycon", imx_keep_uart_earlycon,
22944  __setup_param("earlyprintk", imx_keep_uart_earlyprintk,
22945               imx_keep_uart_clocks_param, 0);
22947 -void imx_register_uart_clocks(struct clk ** const clks[])
22948 +void imx_register_uart_clocks(unsigned int clk_count)
22950 +       imx_enabled_uart_clocks = 0;
22952 +/* i.MX boards use device trees now.  For build tests without CONFIG_OF, do nothing */
22953 +#ifdef CONFIG_OF
22954         if (imx_keep_uart_clocks) {
22955                 int i;
22957 -               imx_uart_clocks = clks;
22958 -               for (i = 0; imx_uart_clocks[i]; i++)
22959 -                       clk_prepare_enable(*imx_uart_clocks[i]);
22960 +               imx_uart_clocks = kcalloc(clk_count, sizeof(struct clk *), GFP_KERNEL);
22962 +               if (!of_stdout)
22963 +                       return;
22965 +               for (i = 0; i < clk_count; i++) {
22966 +                       imx_uart_clocks[imx_enabled_uart_clocks] = of_clk_get(of_stdout, i);
22968 +                       /* Stop if there are no more of_stdout references */
22969 +                       if (IS_ERR(imx_uart_clocks[imx_enabled_uart_clocks]))
22970 +                               return;
22972 +                       /* Only enable the clock if it's not NULL */
22973 +                       if (imx_uart_clocks[imx_enabled_uart_clocks])
22974 +                               clk_prepare_enable(imx_uart_clocks[imx_enabled_uart_clocks++]);
22975 +               }
22976         }
22977 +#endif
22980  static int __init imx_clk_disable_uart(void)
22982 -       if (imx_keep_uart_clocks && imx_uart_clocks) {
22983 +       if (imx_keep_uart_clocks && imx_enabled_uart_clocks) {
22984                 int i;
22986 -               for (i = 0; imx_uart_clocks[i]; i++)
22987 -                       clk_disable_unprepare(*imx_uart_clocks[i]);
22988 +               for (i = 0; i < imx_enabled_uart_clocks; i++) {
22989 +                       clk_disable_unprepare(imx_uart_clocks[i]);
22990 +                       clk_put(imx_uart_clocks[i]);
22991 +               }
22992 +               kfree(imx_uart_clocks);
22993         }
22995         return 0;
22996 diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
22997 index 4f04c8287286..7571603bee23 100644
22998 --- a/drivers/clk/imx/clk.h
22999 +++ b/drivers/clk/imx/clk.h
23000 @@ -11,9 +11,9 @@ extern spinlock_t imx_ccm_lock;
23001  void imx_check_clocks(struct clk *clks[], unsigned int count);
23002  void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count);
23003  #ifndef MODULE
23004 -void imx_register_uart_clocks(struct clk ** const clks[]);
23005 +void imx_register_uart_clocks(unsigned int clk_count);
23006  #else
23007 -static inline void imx_register_uart_clocks(struct clk ** const clks[])
23008 +static inline void imx_register_uart_clocks(unsigned int clk_count)
23011  #endif
23012 diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
23013 index f5746f9ea929..32ac6b6b7530 100644
23014 --- a/drivers/clk/mvebu/armada-37xx-periph.c
23015 +++ b/drivers/clk/mvebu/armada-37xx-periph.c
23016 @@ -84,6 +84,7 @@ struct clk_pm_cpu {
23017         void __iomem *reg_div;
23018         u8 shift_div;
23019         struct regmap *nb_pm_base;
23020 +       unsigned long l1_expiration;
23021  };
23023  #define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw)
23024 @@ -440,33 +441,6 @@ static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
23025         return val;
23028 -static int clk_pm_cpu_set_parent(struct clk_hw *hw, u8 index)
23030 -       struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
23031 -       struct regmap *base = pm_cpu->nb_pm_base;
23032 -       int load_level;
23034 -       /*
23035 -        * We set the clock parent only if the DVFS is available but
23036 -        * not enabled.
23037 -        */
23038 -       if (IS_ERR(base) || armada_3700_pm_dvfs_is_enabled(base))
23039 -               return -EINVAL;
23041 -       /* Set the parent clock for all the load level */
23042 -       for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
23043 -               unsigned int reg, mask,  val,
23044 -                       offset = ARMADA_37XX_NB_TBG_SEL_OFF;
23046 -               armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
23048 -               val = index << offset;
23049 -               mask = ARMADA_37XX_NB_TBG_SEL_MASK << offset;
23050 -               regmap_update_bits(base, reg, mask, val);
23051 -       }
23052 -       return 0;
23055  static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw,
23056                                             unsigned long parent_rate)
23058 @@ -514,8 +488,10 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
23061  /*
23062 - * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
23063 - * respectively) to L0 frequency (1.2 Ghz) requires a significant
23064 + * Workaround when base CPU frequnecy is 1000 or 1200 MHz
23065 + *
23066 + * Switching the CPU from the L2 or L3 frequencies (250/300 or 200 MHz
23067 + * respectively) to L0 frequency (1/1.2 GHz) requires a significant
23068   * amount of time to let VDD stabilize to the appropriate
23069   * voltage. This amount of time is large enough that it cannot be
23070   * covered by the hardware countdown register. Due to this, the CPU
23071 @@ -525,26 +501,56 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
23072   * To work around this problem, we prevent switching directly from the
23073   * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
23074   * frequency in-between. The sequence therefore becomes:
23075 - * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
23076 + * 1. First switch from L2/L3 (200/250/300 MHz) to L1 (500/600 MHz)
23077   * 2. Sleep 20ms for stabling VDD voltage
23078 - * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
23079 + * 3. Then switch from L1 (500/600 MHz) to L0 (1000/1200 MHz).
23080   */
23081 -static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
23082 +static void clk_pm_cpu_set_rate_wa(struct clk_pm_cpu *pm_cpu,
23083 +                                  unsigned int new_level, unsigned long rate,
23084 +                                  struct regmap *base)
23086         unsigned int cur_level;
23088 -       if (rate != 1200 * 1000 * 1000)
23089 -               return;
23091         regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
23092         cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
23093 -       if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
23095 +       if (cur_level == new_level)
23096 +               return;
23098 +       /*
23099 +        * System wants to go to L1 on its own. If we are going from L2/L3,
23100 +        * remember when 20ms will expire. If from L0, set the value so that
23101 +        * next switch to L0 won't have to wait.
23102 +        */
23103 +       if (new_level == ARMADA_37XX_DVFS_LOAD_1) {
23104 +               if (cur_level == ARMADA_37XX_DVFS_LOAD_0)
23105 +                       pm_cpu->l1_expiration = jiffies;
23106 +               else
23107 +                       pm_cpu->l1_expiration = jiffies + msecs_to_jiffies(20);
23108                 return;
23109 +       }
23111 +       /*
23112 +        * If we are setting to L2/L3, just invalidate L1 expiration time,
23113 +        * sleeping is not needed.
23114 +        */
23115 +       if (rate < 1000*1000*1000)
23116 +               goto invalidate_l1_exp;
23118 +       /*
23119 +        * We are going to L0 with rate >= 1GHz. Check whether we have been at
23120 +        * L1 for long enough time. If not, go to L1 for 20ms.
23121 +        */
23122 +       if (pm_cpu->l1_expiration && jiffies >= pm_cpu->l1_expiration)
23123 +               goto invalidate_l1_exp;
23125         regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
23126                            ARMADA_37XX_NB_CPU_LOAD_MASK,
23127                            ARMADA_37XX_DVFS_LOAD_1);
23128         msleep(20);
23130 +invalidate_l1_exp:
23131 +       pm_cpu->l1_expiration = 0;
23134  static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
23135 @@ -578,7 +584,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
23136                         reg = ARMADA_37XX_NB_CPU_LOAD;
23137                         mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
23139 -                       clk_pm_cpu_set_rate_wa(rate, base);
23140 +                       /* Apply workaround when base CPU frequency is 1000 or 1200 MHz */
23141 +                       if (parent_rate >= 1000*1000*1000)
23142 +                               clk_pm_cpu_set_rate_wa(pm_cpu, load_level, rate, base);
23144                         regmap_update_bits(base, reg, mask, load_level);
23146 @@ -592,7 +600,6 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
23148  static const struct clk_ops clk_pm_cpu_ops = {
23149         .get_parent = clk_pm_cpu_get_parent,
23150 -       .set_parent = clk_pm_cpu_set_parent,
23151         .round_rate = clk_pm_cpu_round_rate,
23152         .set_rate = clk_pm_cpu_set_rate,
23153         .recalc_rate = clk_pm_cpu_recalc_rate,
23154 diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
23155 index 45cfc57bff92..af6ac17c7dae 100644
23156 --- a/drivers/clk/qcom/a53-pll.c
23157 +++ b/drivers/clk/qcom/a53-pll.c
23158 @@ -93,6 +93,7 @@ static const struct of_device_id qcom_a53pll_match_table[] = {
23159         { .compatible = "qcom,msm8916-a53pll" },
23160         { }
23161  };
23162 +MODULE_DEVICE_TABLE(of, qcom_a53pll_match_table);
23164  static struct platform_driver qcom_a53pll_driver = {
23165         .probe = qcom_a53pll_probe,
23166 diff --git a/drivers/clk/qcom/a7-pll.c b/drivers/clk/qcom/a7-pll.c
23167 index e171d3caf2cf..c4a53e5db229 100644
23168 --- a/drivers/clk/qcom/a7-pll.c
23169 +++ b/drivers/clk/qcom/a7-pll.c
23170 @@ -86,6 +86,7 @@ static const struct of_device_id qcom_a7pll_match_table[] = {
23171         { .compatible = "qcom,sdx55-a7pll" },
23172         { }
23173  };
23174 +MODULE_DEVICE_TABLE(of, qcom_a7pll_match_table);
23176  static struct platform_driver qcom_a7pll_driver = {
23177         .probe = qcom_a7pll_probe,
23178 diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
23179 index 30be87fb222a..bef7899ad0d6 100644
23180 --- a/drivers/clk/qcom/apss-ipq-pll.c
23181 +++ b/drivers/clk/qcom/apss-ipq-pll.c
23182 @@ -81,6 +81,7 @@ static const struct of_device_id apss_ipq_pll_match_table[] = {
23183         { .compatible = "qcom,ipq6018-a53pll" },
23184         { }
23185  };
23186 +MODULE_DEVICE_TABLE(of, apss_ipq_pll_match_table);
23188  static struct platform_driver apss_ipq_pll_driver = {
23189         .probe = apss_ipq_pll_probe,
23190 diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
23191 index 87ee1bad9a9a..4a5d2a914bd6 100644
23192 --- a/drivers/clk/samsung/clk-exynos7.c
23193 +++ b/drivers/clk/samsung/clk-exynos7.c
23194 @@ -537,8 +537,13 @@ static const struct samsung_gate_clock top1_gate_clks[] __initconst = {
23195         GATE(CLK_ACLK_FSYS0_200, "aclk_fsys0_200", "dout_aclk_fsys0_200",
23196                 ENABLE_ACLK_TOP13, 28, CLK_SET_RATE_PARENT |
23197                 CLK_IS_CRITICAL, 0),
23198 +       /*
23199 +        * This clock is required for the CMU_FSYS1 registers access, keep it
23200 +        * enabled permanently until proper runtime PM support is added.
23201 +        */
23202         GATE(CLK_ACLK_FSYS1_200, "aclk_fsys1_200", "dout_aclk_fsys1_200",
23203 -               ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT, 0),
23204 +               ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT |
23205 +               CLK_IS_CRITICAL, 0),
23207         GATE(CLK_SCLK_PHY_FSYS1_26M, "sclk_phy_fsys1_26m",
23208                 "dout_sclk_phy_fsys1_26m", ENABLE_SCLK_TOP1_FSYS11,
23209 diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c
23210 index cd5df9103614..d62778884208 100644
23211 --- a/drivers/clk/socfpga/clk-gate-a10.c
23212 +++ b/drivers/clk/socfpga/clk-gate-a10.c
23213 @@ -146,6 +146,7 @@ static void __init __socfpga_gate_init(struct device_node *node,
23214                 if (IS_ERR(socfpga_clk->sys_mgr_base_addr)) {
23215                         pr_err("%s: failed to find altr,sys-mgr regmap!\n",
23216                                         __func__);
23217 +                       kfree(socfpga_clk);
23218                         return;
23219                 }
23220         }
23221 diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c
23222 index 462c84321b2d..1998e9d4cfc0 100644
23223 --- a/drivers/clk/uniphier/clk-uniphier-mux.c
23224 +++ b/drivers/clk/uniphier/clk-uniphier-mux.c
23225 @@ -31,10 +31,10 @@ static int uniphier_clk_mux_set_parent(struct clk_hw *hw, u8 index)
23226  static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw)
23228         struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw);
23229 -       int num_parents = clk_hw_get_num_parents(hw);
23230 +       unsigned int num_parents = clk_hw_get_num_parents(hw);
23231         int ret;
23232         unsigned int val;
23233 -       u8 i;
23234 +       unsigned int i;
23236         ret = regmap_read(mux->regmap, mux->reg, &val);
23237         if (ret)
23238 diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
23239 index 92f449ed38e5..abe6afbf3407 100644
23240 --- a/drivers/clk/zynqmp/pll.c
23241 +++ b/drivers/clk/zynqmp/pll.c
23242 @@ -14,10 +14,12 @@
23243   * struct zynqmp_pll - PLL clock
23244   * @hw:                Handle between common and hardware-specific interfaces
23245   * @clk_id:    PLL clock ID
23246 + * @set_pll_mode:      Whether an IOCTL_SET_PLL_FRAC_MODE request be sent to ATF
23247   */
23248  struct zynqmp_pll {
23249         struct clk_hw hw;
23250         u32 clk_id;
23251 +       bool set_pll_mode;
23252  };
23254  #define to_zynqmp_pll(_hw)     container_of(_hw, struct zynqmp_pll, hw)
23255 @@ -81,6 +83,8 @@ static inline void zynqmp_pll_set_mode(struct clk_hw *hw, bool on)
23256         if (ret)
23257                 pr_warn_once("%s() PLL set frac mode failed for %s, ret = %d\n",
23258                              __func__, clk_name, ret);
23259 +       else
23260 +               clk->set_pll_mode = true;
23263  /**
23264 @@ -100,9 +104,7 @@ static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
23265         /* Enable the fractional mode if needed */
23266         rate_div = (rate * FRAC_DIV) / *prate;
23267         f = rate_div % FRAC_DIV;
23268 -       zynqmp_pll_set_mode(hw, !!f);
23270 -       if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
23271 +       if (f) {
23272                 if (rate > PS_PLL_VCO_MAX) {
23273                         fbdiv = rate / PS_PLL_VCO_MAX;
23274                         rate = rate / (fbdiv + 1);
23275 @@ -173,10 +175,12 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
23276         long rate_div, frac, m, f;
23277         int ret;
23279 -       if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
23280 -               rate_div = (rate * FRAC_DIV) / parent_rate;
23281 +       rate_div = (rate * FRAC_DIV) / parent_rate;
23282 +       f = rate_div % FRAC_DIV;
23283 +       zynqmp_pll_set_mode(hw, !!f);
23285 +       if (f) {
23286                 m = rate_div / FRAC_DIV;
23287 -               f = rate_div % FRAC_DIV;
23288                 m = clamp_t(u32, m, (PLL_FBDIV_MIN), (PLL_FBDIV_MAX));
23289                 rate = parent_rate * m;
23290                 frac = (parent_rate * f) / FRAC_DIV;
23291 @@ -240,9 +244,15 @@ static int zynqmp_pll_enable(struct clk_hw *hw)
23292         u32 clk_id = clk->clk_id;
23293         int ret;
23295 -       if (zynqmp_pll_is_enabled(hw))
23296 +       /*
23297 +        * Don't skip enabling clock if there is an IOCTL_SET_PLL_FRAC_MODE request
23298 +        * that has been sent to ATF.
23299 +        */
23300 +       if (zynqmp_pll_is_enabled(hw) && (!clk->set_pll_mode))
23301                 return 0;
23303 +       clk->set_pll_mode = false;
23305         ret = zynqmp_pm_clock_enable(clk_id);
23306         if (ret)
23307                 pr_warn_once("%s() clock enable failed for %s, ret = %d\n",
23308 diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
23309 index 42e7e43b8fcd..b1e2b697b21b 100644
23310 --- a/drivers/clocksource/dw_apb_timer_of.c
23311 +++ b/drivers/clocksource/dw_apb_timer_of.c
23312 @@ -52,18 +52,34 @@ static int __init timer_get_base_and_rate(struct device_node *np,
23313                 return 0;
23315         timer_clk = of_clk_get_by_name(np, "timer");
23316 -       if (IS_ERR(timer_clk))
23317 -               return PTR_ERR(timer_clk);
23318 +       if (IS_ERR(timer_clk)) {
23319 +               ret = PTR_ERR(timer_clk);
23320 +               goto out_pclk_disable;
23321 +       }
23323         ret = clk_prepare_enable(timer_clk);
23324         if (ret)
23325 -               return ret;
23326 +               goto out_timer_clk_put;
23328         *rate = clk_get_rate(timer_clk);
23329 -       if (!(*rate))
23330 -               return -EINVAL;
23331 +       if (!(*rate)) {
23332 +               ret = -EINVAL;
23333 +               goto out_timer_clk_disable;
23334 +       }
23336         return 0;
23338 +out_timer_clk_disable:
23339 +       clk_disable_unprepare(timer_clk);
23340 +out_timer_clk_put:
23341 +       clk_put(timer_clk);
23342 +out_pclk_disable:
23343 +       if (!IS_ERR(pclk)) {
23344 +               clk_disable_unprepare(pclk);
23345 +               clk_put(pclk);
23346 +       }
23347 +       iounmap(*base);
23348 +       return ret;
23351  static int __init add_clockevent(struct device_node *event_timer)
23352 diff --git a/drivers/clocksource/ingenic-ost.c b/drivers/clocksource/ingenic-ost.c
23353 index 029efc2731b4..6af2470136bd 100644
23354 --- a/drivers/clocksource/ingenic-ost.c
23355 +++ b/drivers/clocksource/ingenic-ost.c
23356 @@ -88,9 +88,9 @@ static int __init ingenic_ost_probe(struct platform_device *pdev)
23357                 return PTR_ERR(ost->regs);
23359         map = device_node_to_regmap(dev->parent->of_node);
23360 -       if (!map) {
23361 +       if (IS_ERR(map)) {
23362                 dev_err(dev, "regmap not found");
23363 -               return -EINVAL;
23364 +               return PTR_ERR(map);
23365         }
23367         ost->clk = devm_clk_get(dev, "ost");
23368 diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
23369 index 33b3e8aa2cc5..b6f97960d8ee 100644
23370 --- a/drivers/clocksource/timer-ti-dm-systimer.c
23371 +++ b/drivers/clocksource/timer-ti-dm-systimer.c
23372 @@ -2,6 +2,7 @@
23373  #include <linux/clk.h>
23374  #include <linux/clocksource.h>
23375  #include <linux/clockchips.h>
23376 +#include <linux/cpuhotplug.h>
23377  #include <linux/interrupt.h>
23378  #include <linux/io.h>
23379  #include <linux/iopoll.h>
23380 @@ -449,13 +450,13 @@ static int dmtimer_set_next_event(unsigned long cycles,
23381         struct dmtimer_systimer *t = &clkevt->t;
23382         void __iomem *pend = t->base + t->pend;
23384 -       writel_relaxed(0xffffffff - cycles, t->base + t->counter);
23385         while (readl_relaxed(pend) & WP_TCRR)
23386                 cpu_relax();
23387 +       writel_relaxed(0xffffffff - cycles, t->base + t->counter);
23389 -       writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
23390         while (readl_relaxed(pend) & WP_TCLR)
23391                 cpu_relax();
23392 +       writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
23394         return 0;
23396 @@ -490,18 +491,18 @@ static int dmtimer_set_periodic(struct clock_event_device *evt)
23397         dmtimer_clockevent_shutdown(evt);
23399         /* Looks like we need to first set the load value separately */
23400 -       writel_relaxed(clkevt->period, t->base + t->load);
23401         while (readl_relaxed(pend) & WP_TLDR)
23402                 cpu_relax();
23403 +       writel_relaxed(clkevt->period, t->base + t->load);
23405 -       writel_relaxed(clkevt->period, t->base + t->counter);
23406         while (readl_relaxed(pend) & WP_TCRR)
23407                 cpu_relax();
23408 +       writel_relaxed(clkevt->period, t->base + t->counter);
23410 -       writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
23411 -                      t->base + t->ctrl);
23412         while (readl_relaxed(pend) & WP_TCLR)
23413                 cpu_relax();
23414 +       writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
23415 +                      t->base + t->ctrl);
23417         return 0;
23419 @@ -530,17 +531,17 @@ static void omap_clockevent_unidle(struct clock_event_device *evt)
23420         writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
23423 -static int __init dmtimer_clockevent_init(struct device_node *np)
23424 +static int __init dmtimer_clkevt_init_common(struct dmtimer_clockevent *clkevt,
23425 +                                            struct device_node *np,
23426 +                                            unsigned int features,
23427 +                                            const struct cpumask *cpumask,
23428 +                                            const char *name,
23429 +                                            int rating)
23431 -       struct dmtimer_clockevent *clkevt;
23432         struct clock_event_device *dev;
23433         struct dmtimer_systimer *t;
23434         int error;
23436 -       clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
23437 -       if (!clkevt)
23438 -               return -ENOMEM;
23440         t = &clkevt->t;
23441         dev = &clkevt->dev;
23443 @@ -548,24 +549,23 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
23444          * We mostly use cpuidle_coupled with ARM local timers for runtime,
23445          * so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here.
23446          */
23447 -       dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
23448 -       dev->rating = 300;
23449 +       dev->features = features;
23450 +       dev->rating = rating;
23451         dev->set_next_event = dmtimer_set_next_event;
23452         dev->set_state_shutdown = dmtimer_clockevent_shutdown;
23453         dev->set_state_periodic = dmtimer_set_periodic;
23454         dev->set_state_oneshot = dmtimer_clockevent_shutdown;
23455 +       dev->set_state_oneshot_stopped = dmtimer_clockevent_shutdown;
23456         dev->tick_resume = dmtimer_clockevent_shutdown;
23457 -       dev->cpumask = cpu_possible_mask;
23458 +       dev->cpumask = cpumask;
23460         dev->irq = irq_of_parse_and_map(np, 0);
23461 -       if (!dev->irq) {
23462 -               error = -ENXIO;
23463 -               goto err_out_free;
23464 -       }
23465 +       if (!dev->irq)
23466 +               return -ENXIO;
23468         error = dmtimer_systimer_setup(np, &clkevt->t);
23469         if (error)
23470 -               goto err_out_free;
23471 +               return error;
23473         clkevt->period = 0xffffffff - DIV_ROUND_CLOSEST(t->rate, HZ);
23475 @@ -577,38 +577,132 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
23476         writel_relaxed(OMAP_TIMER_CTRL_POSTED, t->base + t->ifctrl);
23478         error = request_irq(dev->irq, dmtimer_clockevent_interrupt,
23479 -                           IRQF_TIMER, "clockevent", clkevt);
23480 +                           IRQF_TIMER, name, clkevt);
23481         if (error)
23482                 goto err_out_unmap;
23484         writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
23485         writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
23487 -       pr_info("TI gptimer clockevent: %s%lu Hz at %pOF\n",
23488 -               of_find_property(np, "ti,timer-alwon", NULL) ?
23489 +       pr_info("TI gptimer %s: %s%lu Hz at %pOF\n",
23490 +               name, of_find_property(np, "ti,timer-alwon", NULL) ?
23491                 "always-on " : "", t->rate, np->parent);
23493 -       clockevents_config_and_register(dev, t->rate,
23494 -                                       3, /* Timer internal resynch latency */
23495 +       return 0;
23497 +err_out_unmap:
23498 +       iounmap(t->base);
23500 +       return error;
23503 +static int __init dmtimer_clockevent_init(struct device_node *np)
23505 +       struct dmtimer_clockevent *clkevt;
23506 +       int error;
23508 +       clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
23509 +       if (!clkevt)
23510 +               return -ENOMEM;
23512 +       error = dmtimer_clkevt_init_common(clkevt, np,
23513 +                                          CLOCK_EVT_FEAT_PERIODIC |
23514 +                                          CLOCK_EVT_FEAT_ONESHOT,
23515 +                                          cpu_possible_mask, "clockevent",
23516 +                                          300);
23517 +       if (error)
23518 +               goto err_out_free;
23520 +       clockevents_config_and_register(&clkevt->dev, clkevt->t.rate,
23521 +                                       3, /* Timer internal resync latency */
23522                                         0xffffffff);
23524         if (of_machine_is_compatible("ti,am33xx") ||
23525             of_machine_is_compatible("ti,am43")) {
23526 -               dev->suspend = omap_clockevent_idle;
23527 -               dev->resume = omap_clockevent_unidle;
23528 +               clkevt->dev.suspend = omap_clockevent_idle;
23529 +               clkevt->dev.resume = omap_clockevent_unidle;
23530         }
23532         return 0;
23534 -err_out_unmap:
23535 -       iounmap(t->base);
23537  err_out_free:
23538         kfree(clkevt);
23540         return error;
23543 +/* Dmtimer as percpu timer. See dra7 ARM architected timer wrap erratum i940 */
23544 +static DEFINE_PER_CPU(struct dmtimer_clockevent, dmtimer_percpu_timer);
23546 +static int __init dmtimer_percpu_timer_init(struct device_node *np, int cpu)
23548 +       struct dmtimer_clockevent *clkevt;
23549 +       int error;
23551 +       if (!cpu_possible(cpu))
23552 +               return -EINVAL;
23554 +       if (!of_property_read_bool(np->parent, "ti,no-reset-on-init") ||
23555 +           !of_property_read_bool(np->parent, "ti,no-idle"))
23556 +               pr_warn("Incomplete dtb for percpu dmtimer %pOF\n", np->parent);
23558 +       clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
23560 +       error = dmtimer_clkevt_init_common(clkevt, np, CLOCK_EVT_FEAT_ONESHOT,
23561 +                                          cpumask_of(cpu), "percpu-dmtimer",
23562 +                                          500);
23563 +       if (error)
23564 +               return error;
23566 +       return 0;
23569 +/* See TRM for timer internal resynch latency */
23570 +static int omap_dmtimer_starting_cpu(unsigned int cpu)
23572 +       struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
23573 +       struct clock_event_device *dev = &clkevt->dev;
23574 +       struct dmtimer_systimer *t = &clkevt->t;
23576 +       clockevents_config_and_register(dev, t->rate, 3, ULONG_MAX);
23577 +       irq_force_affinity(dev->irq, cpumask_of(cpu));
23579 +       return 0;
23582 +static int __init dmtimer_percpu_timer_startup(void)
23584 +       struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, 0);
23585 +       struct dmtimer_systimer *t = &clkevt->t;
23587 +       if (t->sysc) {
23588 +               cpuhp_setup_state(CPUHP_AP_TI_GP_TIMER_STARTING,
23589 +                                 "clockevents/omap/gptimer:starting",
23590 +                                 omap_dmtimer_starting_cpu, NULL);
23591 +       }
23593 +       return 0;
23595 +subsys_initcall(dmtimer_percpu_timer_startup);
23597 +static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
23599 +       struct device_node *arm_timer;
23601 +       arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
23602 +       if (of_device_is_available(arm_timer)) {
23603 +               pr_warn_once("ARM architected timer wrap issue i940 detected\n");
23604 +               return 0;
23605 +       }
23607 +       if (pa == 0x48034000)           /* dra7 dmtimer3 */
23608 +               return dmtimer_percpu_timer_init(np, 0);
23609 +       else if (pa == 0x48036000)      /* dra7 dmtimer4 */
23610 +               return dmtimer_percpu_timer_init(np, 1);
23612 +       return 0;
23615  /* Clocksource */
23616  static struct dmtimer_clocksource *
23617  to_dmtimer_clocksource(struct clocksource *cs)
23618 @@ -742,6 +836,9 @@ static int __init dmtimer_systimer_init(struct device_node *np)
23619         if (clockevent == pa)
23620                 return dmtimer_clockevent_init(np);
23622 +       if (of_machine_is_compatible("ti,dra7"))
23623 +               return dmtimer_percpu_quirk_init(np, pa);
23625         return 0;
23628 diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
23629 index d1bbc16fba4b..7e7450453714 100644
23630 --- a/drivers/cpufreq/acpi-cpufreq.c
23631 +++ b/drivers/cpufreq/acpi-cpufreq.c
23632 @@ -646,7 +646,11 @@ static u64 get_max_boost_ratio(unsigned int cpu)
23633                 return 0;
23634         }
23636 -       highest_perf = perf_caps.highest_perf;
23637 +       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
23638 +               highest_perf = amd_get_highest_perf();
23639 +       else
23640 +               highest_perf = perf_caps.highest_perf;
23642         nominal_perf = perf_caps.nominal_perf;
23644         if (!highest_perf || !nominal_perf) {
23645 diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
23646 index b4af4094309b..e4782f562e7a 100644
23647 --- a/drivers/cpufreq/armada-37xx-cpufreq.c
23648 +++ b/drivers/cpufreq/armada-37xx-cpufreq.c
23649 @@ -25,6 +25,10 @@
23651  #include "cpufreq-dt.h"
23653 +/* Clk register set */
23654 +#define ARMADA_37XX_CLK_TBG_SEL                0
23655 +#define ARMADA_37XX_CLK_TBG_SEL_CPU_OFF        22
23657  /* Power management in North Bridge register set */
23658  #define ARMADA_37XX_NB_L0L1    0x18
23659  #define ARMADA_37XX_NB_L2L3    0x1C
23660 @@ -69,6 +73,8 @@
23661  #define LOAD_LEVEL_NR  4
23663  #define MIN_VOLT_MV 1000
23664 +#define MIN_VOLT_MV_FOR_L1_1000MHZ 1108
23665 +#define MIN_VOLT_MV_FOR_L1_1200MHZ 1155
23667  /*  AVS value for the corresponding voltage (in mV) */
23668  static int avs_map[] = {
23669 @@ -120,10 +126,15 @@ static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq)
23670   * will be configured then the DVFS will be enabled.
23671   */
23672  static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
23673 -                                                struct clk *clk, u8 *divider)
23674 +                                                struct regmap *clk_base, u8 *divider)
23676 +       u32 cpu_tbg_sel;
23677         int load_lvl;
23678 -       struct clk *parent;
23680 +       /* Determine to which TBG clock is CPU connected */
23681 +       regmap_read(clk_base, ARMADA_37XX_CLK_TBG_SEL, &cpu_tbg_sel);
23682 +       cpu_tbg_sel >>= ARMADA_37XX_CLK_TBG_SEL_CPU_OFF;
23683 +       cpu_tbg_sel &= ARMADA_37XX_NB_TBG_SEL_MASK;
23685         for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
23686                 unsigned int reg, mask, val, offset = 0;
23687 @@ -142,6 +153,11 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
23688                 mask = (ARMADA_37XX_NB_CLK_SEL_MASK
23689                         << ARMADA_37XX_NB_CLK_SEL_OFF);
23691 +               /* Set TBG index, for all levels we use the same TBG */
23692 +               val = cpu_tbg_sel << ARMADA_37XX_NB_TBG_SEL_OFF;
23693 +               mask = (ARMADA_37XX_NB_TBG_SEL_MASK
23694 +                       << ARMADA_37XX_NB_TBG_SEL_OFF);
23696                 /*
23697                  * Set cpu divider based on the pre-computed array in
23698                  * order to have balanced step.
23699 @@ -160,14 +176,6 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
23701                 regmap_update_bits(base, reg, mask, val);
23702         }
23704 -       /*
23705 -        * Set cpu clock source, for all the level we keep the same
23706 -        * clock source that the one already configured. For this one
23707 -        * we need to use the clock framework
23708 -        */
23709 -       parent = clk_get_parent(clk);
23710 -       clk_set_parent(clk, parent);
23713  /*
23714 @@ -202,6 +210,8 @@ static u32 armada_37xx_avs_val_match(int target_vm)
23715   * - L2 & L3 voltage should be about 150mv smaller than L0 voltage.
23716   * This function calculates L1 & L2 & L3 AVS values dynamically based
23717   * on L0 voltage and fill all AVS values to the AVS value table.
23718 + * When base CPU frequency is 1000 or 1200 MHz then there is additional
23719 + * minimal avs value for load L1.
23720   */
23721  static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
23722                                                 struct armada_37xx_dvfs *dvfs)
23723 @@ -233,6 +243,19 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
23724                 for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++)
23725                         dvfs->avs[load_level] = avs_min;
23727 +               /*
23728 +                * Set the avs values for load L0 and L1 when base CPU frequency
23729 +                * is 1000/1200 MHz to its typical initial values according to
23730 +                * the Armada 3700 Hardware Specifications.
23731 +                */
23732 +               if (dvfs->cpu_freq_max >= 1000*1000*1000) {
23733 +                       if (dvfs->cpu_freq_max >= 1200*1000*1000)
23734 +                               avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
23735 +                       else
23736 +                               avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
23737 +                       dvfs->avs[0] = dvfs->avs[1] = avs_min;
23738 +               }
23740                 return;
23741         }
23743 @@ -252,6 +275,26 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
23744         target_vm = avs_map[l0_vdd_min] - 150;
23745         target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
23746         dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
23748 +       /*
23749 +        * Fix the avs value for load L1 when base CPU frequency is 1000/1200 MHz,
23750 +        * otherwise the CPU gets stuck when switching from load L1 to load L0.
23751 +        * Also ensure that avs value for load L1 is not higher than for L0.
23752 +        */
23753 +       if (dvfs->cpu_freq_max >= 1000*1000*1000) {
23754 +               u32 avs_min_l1;
23756 +               if (dvfs->cpu_freq_max >= 1200*1000*1000)
23757 +                       avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
23758 +               else
23759 +                       avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
23761 +               if (avs_min_l1 > dvfs->avs[0])
23762 +                       avs_min_l1 = dvfs->avs[0];
23764 +               if (dvfs->avs[1] < avs_min_l1)
23765 +                       dvfs->avs[1] = avs_min_l1;
23766 +       }
23769  static void __init armada37xx_cpufreq_avs_setup(struct regmap *base,
23770 @@ -358,11 +401,16 @@ static int __init armada37xx_cpufreq_driver_init(void)
23771         struct platform_device *pdev;
23772         unsigned long freq;
23773         unsigned int cur_frequency, base_frequency;
23774 -       struct regmap *nb_pm_base, *avs_base;
23775 +       struct regmap *nb_clk_base, *nb_pm_base, *avs_base;
23776         struct device *cpu_dev;
23777         int load_lvl, ret;
23778         struct clk *clk, *parent;
23780 +       nb_clk_base =
23781 +               syscon_regmap_lookup_by_compatible("marvell,armada-3700-periph-clock-nb");
23782 +       if (IS_ERR(nb_clk_base))
23783 +               return -ENODEV;
23785         nb_pm_base =
23786                 syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
23788 @@ -421,7 +469,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
23789                 return -EINVAL;
23790         }
23792 -       dvfs = armada_37xx_cpu_freq_info_get(cur_frequency);
23793 +       dvfs = armada_37xx_cpu_freq_info_get(base_frequency);
23794         if (!dvfs) {
23795                 clk_put(clk);
23796                 return -EINVAL;
23797 @@ -439,7 +487,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
23798         armada37xx_cpufreq_avs_configure(avs_base, dvfs);
23799         armada37xx_cpufreq_avs_setup(avs_base, dvfs);
23801 -       armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
23802 +       armada37xx_cpufreq_dvfs_setup(nb_pm_base, nb_clk_base, dvfs->divider);
23803         clk_put(clk);
23805         for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
23806 @@ -473,7 +521,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
23807  remove_opp:
23808         /* clean-up the already added opp before leaving */
23809         while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
23810 -               freq = cur_frequency / dvfs->divider[load_lvl];
23811 +               freq = base_frequency / dvfs->divider[load_lvl];
23812                 dev_pm_opp_remove(cpu_dev, freq);
23813         }
23815 diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
23816 index aa39ff31ec9f..b3eae5ec17b2 100644
23817 --- a/drivers/cpufreq/cpufreq_conservative.c
23818 +++ b/drivers/cpufreq/cpufreq_conservative.c
23819 @@ -28,8 +28,8 @@ struct cs_dbs_tuners {
23820  };
23822  /* Conservative governor macros */
23823 -#define DEF_FREQUENCY_UP_THRESHOLD             (80)
23824 -#define DEF_FREQUENCY_DOWN_THRESHOLD           (20)
23825 +#define DEF_FREQUENCY_UP_THRESHOLD             (63)
23826 +#define DEF_FREQUENCY_DOWN_THRESHOLD           (26)
23827  #define DEF_FREQUENCY_STEP                     (5)
23828  #define DEF_SAMPLING_DOWN_FACTOR               (1)
23829  #define MAX_SAMPLING_DOWN_FACTOR               (10)
23830 @@ -47,9 +47,9 @@ static inline unsigned int get_freq_step(struct cs_dbs_tuners *cs_tuners,
23833  /*
23834 - * Every sampling_rate, we check, if current idle time is less than 20%
23835 + * Every sampling_rate, we check, if current idle time is less than 37%
23836   * (default), then we try to increase frequency. Every sampling_rate *
23837 - * sampling_down_factor, we check, if current idle time is more than 80%
23838 + * sampling_down_factor, we check, if current idle time is more than 74%
23839   * (default), then we try to decrease frequency
23840   *
23841   * Frequency updates happen at minimum steps of 5% (default) of maximum
23842 diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
23843 index ac361a8b1d3b..611d80122336 100644
23844 --- a/drivers/cpufreq/cpufreq_ondemand.c
23845 +++ b/drivers/cpufreq/cpufreq_ondemand.c
23846 @@ -18,10 +18,10 @@
23847  #include "cpufreq_ondemand.h"
23849  /* On-demand governor macros */
23850 -#define DEF_FREQUENCY_UP_THRESHOLD             (80)
23851 -#define DEF_SAMPLING_DOWN_FACTOR               (1)
23852 +#define DEF_FREQUENCY_UP_THRESHOLD             (63)
23853 +#define DEF_SAMPLING_DOWN_FACTOR               (100)
23854  #define MAX_SAMPLING_DOWN_FACTOR               (100000)
23855 -#define MICRO_FREQUENCY_UP_THRESHOLD           (95)
23856 +#define MICRO_FREQUENCY_UP_THRESHOLD           (70)
23857  #define MICRO_FREQUENCY_MIN_SAMPLE_RATE                (10000)
23858  #define MIN_FREQUENCY_UP_THRESHOLD             (1)
23859  #define MAX_FREQUENCY_UP_THRESHOLD             (100)
23860 @@ -127,7 +127,7 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
23863  /*
23864 - * Every sampling_rate, we check, if current idle time is less than 20%
23865 + * Every sampling_rate, we check, if current idle time is less than 37%
23866   * (default), then we try to increase frequency. Else, we adjust the frequency
23867   * proportional to load.
23868   */
23869 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
23870 index 5175ae3cac44..34196c107de6 100644
23871 --- a/drivers/cpufreq/intel_pstate.c
23872 +++ b/drivers/cpufreq/intel_pstate.c
23873 @@ -3054,6 +3054,14 @@ static const struct x86_cpu_id hwp_support_ids[] __initconst = {
23874         {}
23875  };
23877 +static bool intel_pstate_hwp_is_enabled(void)
23879 +       u64 value;
23881 +       rdmsrl(MSR_PM_ENABLE, value);
23882 +       return !!(value & 0x1);
23885  static int __init intel_pstate_init(void)
23887         const struct x86_cpu_id *id;
23888 @@ -3072,8 +3080,12 @@ static int __init intel_pstate_init(void)
23889                  * Avoid enabling HWP for processors without EPP support,
23890                  * because that means incomplete HWP implementation which is a
23891                  * corner case and supporting it is generally problematic.
23892 +                *
23893 +                * If HWP is enabled already, though, there is no choice but to
23894 +                * deal with it.
23895                  */
23896 -               if (!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) {
23897 +               if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
23898 +                   intel_pstate_hwp_is_enabled()) {
23899                         hwp_active++;
23900                         hwp_mode_bdw = id->driver_data;
23901                         intel_pstate.attr = hwp_cpufreq_attrs;
23902 diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
23903 index 0844fadc4be8..334f83e56120 100644
23904 --- a/drivers/cpuidle/Kconfig.arm
23905 +++ b/drivers/cpuidle/Kconfig.arm
23906 @@ -107,7 +107,7 @@ config ARM_TEGRA_CPUIDLE
23908  config ARM_QCOM_SPM_CPUIDLE
23909         bool "CPU Idle Driver for Qualcomm Subsystem Power Manager (SPM)"
23910 -       depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64
23911 +       depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64 && MMU
23912         select ARM_CPU_SUSPEND
23913         select CPU_IDLE_MULTIPLE_DRIVERS
23914         select DT_IDLE_STATES
23915 diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
23916 index 191966dc8d02..29c5e83500d3 100644
23917 --- a/drivers/cpuidle/cpuidle-tegra.c
23918 +++ b/drivers/cpuidle/cpuidle-tegra.c
23919 @@ -135,13 +135,13 @@ static int tegra_cpuidle_c7_enter(void)
23921         int err;
23923 -       if (tegra_cpuidle_using_firmware()) {
23924 -               err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
23925 -               if (err)
23926 -                       return err;
23927 +       err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
23928 +       if (err && err != -ENOSYS)
23929 +               return err;
23931 -               return call_firmware_op(do_idle, 0);
23932 -       }
23933 +       err = call_firmware_op(do_idle, 0);
23934 +       if (err != -ENOSYS)
23935 +               return err;
23937         return cpu_suspend(0, tegra30_pm_secondary_cpu_suspend);
23939 diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
23940 index 856fb2045656..b8e75210a0e3 100644
23941 --- a/drivers/crypto/allwinner/Kconfig
23942 +++ b/drivers/crypto/allwinner/Kconfig
23943 @@ -71,10 +71,10 @@ config CRYPTO_DEV_SUN8I_CE_DEBUG
23944  config CRYPTO_DEV_SUN8I_CE_HASH
23945         bool "Enable support for hash on sun8i-ce"
23946         depends on CRYPTO_DEV_SUN8I_CE
23947 -       select MD5
23948 -       select SHA1
23949 -       select SHA256
23950 -       select SHA512
23951 +       select CRYPTO_MD5
23952 +       select CRYPTO_SHA1
23953 +       select CRYPTO_SHA256
23954 +       select CRYPTO_SHA512
23955         help
23956           Say y to enable support for hash algorithms.
23958 @@ -132,8 +132,8 @@ config CRYPTO_DEV_SUN8I_SS_PRNG
23959  config CRYPTO_DEV_SUN8I_SS_HASH
23960         bool "Enable support for hash on sun8i-ss"
23961         depends on CRYPTO_DEV_SUN8I_SS
23962 -       select MD5
23963 -       select SHA1
23964 -       select SHA256
23965 +       select CRYPTO_MD5
23966 +       select CRYPTO_SHA1
23967 +       select CRYPTO_SHA256
23968         help
23969           Say y to enable support for hash algorithms.
23970 diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
23971 index c2e6f5ed1d79..dec79fa3ebaf 100644
23972 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
23973 +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
23974 @@ -561,7 +561,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
23975                                     sizeof(struct sun4i_cipher_req_ctx) +
23976                                     crypto_skcipher_reqsize(op->fallback_tfm));
23978 -       err = pm_runtime_get_sync(op->ss->dev);
23979 +       err = pm_runtime_resume_and_get(op->ss->dev);
23980         if (err < 0)
23981                 goto error_pm;
23983 diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
23984 index 709905ec4680..02a2d34845f2 100644
23985 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
23986 +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
23987 @@ -459,7 +459,7 @@ static int sun4i_ss_probe(struct platform_device *pdev)
23988          * this info could be useful
23989          */
23991 -       err = pm_runtime_get_sync(ss->dev);
23992 +       err = pm_runtime_resume_and_get(ss->dev);
23993         if (err < 0)
23994                 goto error_pm;
23996 diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
23997 index c1b4585e9bbc..d28292762b32 100644
23998 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
23999 +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
24000 @@ -27,7 +27,7 @@ int sun4i_hash_crainit(struct crypto_tfm *tfm)
24001         algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
24002         op->ss = algt->ss;
24004 -       err = pm_runtime_get_sync(op->ss->dev);
24005 +       err = pm_runtime_resume_and_get(op->ss->dev);
24006         if (err < 0)
24007                 return err;
24009 diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
24010 index 443160a114bb..491fcb7b81b4 100644
24011 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
24012 +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
24013 @@ -29,7 +29,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
24014         algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
24015         ss = algt->ss;
24017 -       err = pm_runtime_get_sync(ss->dev);
24018 +       err = pm_runtime_resume_and_get(ss->dev);
24019         if (err < 0)
24020                 return err;
24022 diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
24023 index 158422ff5695..00194d1d9ae6 100644
24024 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
24025 +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
24026 @@ -932,7 +932,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
24027         if (err)
24028                 goto error_alg;
24030 -       err = pm_runtime_get_sync(ce->dev);
24031 +       err = pm_runtime_resume_and_get(ce->dev);
24032         if (err < 0)
24033                 goto error_alg;
24035 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
24036 index ed2a69f82e1c..7c355bc2fb06 100644
24037 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
24038 +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
24039 @@ -351,7 +351,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
24040         op->enginectx.op.prepare_request = NULL;
24041         op->enginectx.op.unprepare_request = NULL;
24043 -       err = pm_runtime_get_sync(op->ss->dev);
24044 +       err = pm_runtime_resume_and_get(op->ss->dev);
24045         if (err < 0) {
24046                 dev_err(op->ss->dev, "pm error %d\n", err);
24047                 goto error_pm;
24048 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
24049 index e0ddc684798d..80e89066dbd1 100644
24050 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
24051 +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
24052 @@ -753,7 +753,7 @@ static int sun8i_ss_probe(struct platform_device *pdev)
24053         if (err)
24054                 goto error_alg;
24056 -       err = pm_runtime_get_sync(ss->dev);
24057 +       err = pm_runtime_resume_and_get(ss->dev);
24058         if (err < 0)
24059                 goto error_alg;
24061 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
24062 index 11cbcbc83a7b..64446b86c927 100644
24063 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
24064 +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
24065 @@ -348,8 +348,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
24066         bf = (__le32 *)pad;
24068         result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
24069 -       if (!result)
24070 +       if (!result) {
24071 +               kfree(pad);
24072                 return -ENOMEM;
24073 +       }
24075         for (i = 0; i < MAX_SG; i++) {
24076                 rctx->t_dst[i].addr = 0;
24077 @@ -435,11 +437,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
24078         dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
24079         dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
24081 -       kfree(pad);
24083         memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
24084 -       kfree(result);
24085  theend:
24086 +       kfree(pad);
24087 +       kfree(result);
24088         crypto_finalize_hash_request(engine, breq, err);
24089         return 0;
24091 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
24092 index 08a1473b2145..3191527928e4 100644
24093 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
24094 +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
24095 @@ -103,7 +103,8 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
24096         dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
24097         if (dma_mapping_error(ss->dev, dma_iv)) {
24098                 dev_err(ss->dev, "Cannot DMA MAP IV\n");
24099 -               return -EFAULT;
24100 +               err = -EFAULT;
24101 +               goto err_free;
24102         }
24104         dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE);
24105 @@ -167,6 +168,7 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
24106                 memcpy(ctx->seed, d + dlen, ctx->slen);
24107         }
24108         memzero_explicit(d, todo);
24109 +err_free:
24110         kfree(d);
24112         return err;
24113 diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
24114 index cb9b4c4e371e..3e0d1d6922ba 100644
24115 --- a/drivers/crypto/ccp/sev-dev.c
24116 +++ b/drivers/crypto/ccp/sev-dev.c
24117 @@ -150,6 +150,9 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
24119         sev = psp->sev_data;
24121 +       if (data && WARN_ON_ONCE(!virt_addr_valid(data)))
24122 +               return -EINVAL;
24124         /* Get the physical address of the command buffer */
24125         phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
24126         phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
24127 @@ -987,7 +990,7 @@ int sev_dev_init(struct psp_device *psp)
24128         if (!sev->vdata) {
24129                 ret = -ENODEV;
24130                 dev_err(dev, "sev: missing driver data\n");
24131 -               goto e_err;
24132 +               goto e_sev;
24133         }
24135         psp_set_sev_irq_handler(psp, sev_irq_handler, sev);
24136 @@ -1002,6 +1005,8 @@ int sev_dev_init(struct psp_device *psp)
24138  e_irq:
24139         psp_clear_sev_irq_handler(psp);
24140 +e_sev:
24141 +       devm_kfree(dev, sev);
24142  e_err:
24143         psp->sev_data = NULL;
24145 diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
24146 index 5e697a90ea7f..bcb81fef4211 100644
24147 --- a/drivers/crypto/ccp/tee-dev.c
24148 +++ b/drivers/crypto/ccp/tee-dev.c
24149 @@ -36,6 +36,7 @@ static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size)
24150         if (!start_addr)
24151                 return -ENOMEM;
24153 +       memset(start_addr, 0x0, ring_size);
24154         rb_mgr->ring_start = start_addr;
24155         rb_mgr->ring_size = ring_size;
24156         rb_mgr->ring_pa = __psp_pa(start_addr);
24157 @@ -244,41 +245,54 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
24158                           void *buf, size_t len, struct tee_ring_cmd **resp)
24160         struct tee_ring_cmd *cmd;
24161 -       u32 rptr, wptr;
24162         int nloop = 1000, ret = 0;
24163 +       u32 rptr;
24165         *resp = NULL;
24167         mutex_lock(&tee->rb_mgr.mutex);
24169 -       wptr = tee->rb_mgr.wptr;
24171 -       /* Check if ring buffer is full */
24172 +       /* Loop until empty entry found in ring buffer */
24173         do {
24174 +               /* Get pointer to ring buffer command entry */
24175 +               cmd = (struct tee_ring_cmd *)
24176 +                       (tee->rb_mgr.ring_start + tee->rb_mgr.wptr);
24178                 rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg);
24180 -               if (!(wptr + sizeof(struct tee_ring_cmd) == rptr))
24181 +               /* Check if ring buffer is full or command entry is waiting
24182 +                * for response from TEE
24183 +                */
24184 +               if (!(tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
24185 +                     cmd->flag == CMD_WAITING_FOR_RESPONSE))
24186                         break;
24188 -               dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
24189 -                        rptr, wptr);
24190 +               dev_dbg(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
24191 +                       rptr, tee->rb_mgr.wptr);
24193 -               /* Wait if ring buffer is full */
24194 +               /* Wait if ring buffer is full or TEE is processing data */
24195                 mutex_unlock(&tee->rb_mgr.mutex);
24196                 schedule_timeout_interruptible(msecs_to_jiffies(10));
24197                 mutex_lock(&tee->rb_mgr.mutex);
24199         } while (--nloop);
24201 -       if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) {
24202 -               dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
24203 -                       rptr, wptr);
24204 +       if (!nloop &&
24205 +           (tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
24206 +            cmd->flag == CMD_WAITING_FOR_RESPONSE)) {
24207 +               dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u response flag %u\n",
24208 +                       rptr, tee->rb_mgr.wptr, cmd->flag);
24209                 ret = -EBUSY;
24210                 goto unlock;
24211         }
24213 -       /* Pointer to empty data entry in ring buffer */
24214 -       cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr);
24215 +       /* Do not submit command if PSP got disabled while processing any
24216 +        * command in another thread
24217 +        */
24218 +       if (psp_dead) {
24219 +               ret = -EBUSY;
24220 +               goto unlock;
24221 +       }
24223         /* Write command data into ring buffer */
24224         cmd->cmd_id = cmd_id;
24225 @@ -286,6 +300,9 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
24226         memset(&cmd->buf[0], 0, sizeof(cmd->buf));
24227         memcpy(&cmd->buf[0], buf, len);
24229 +       /* Indicate driver is waiting for response */
24230 +       cmd->flag = CMD_WAITING_FOR_RESPONSE;
24232         /* Update local copy of write pointer */
24233         tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd);
24234         if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size)
24235 @@ -353,12 +370,16 @@ int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
24236                 return ret;
24238         ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
24239 -       if (ret)
24240 +       if (ret) {
24241 +               resp->flag = CMD_RESPONSE_TIMEDOUT;
24242                 return ret;
24243 +       }
24245         memcpy(buf, &resp->buf[0], len);
24246         *status = resp->status;
24248 +       resp->flag = CMD_RESPONSE_COPIED;
24250         return 0;
24252  EXPORT_SYMBOL(psp_tee_process_cmd);
24253 diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h
24254 index f09960112115..49d26158b71e 100644
24255 --- a/drivers/crypto/ccp/tee-dev.h
24256 +++ b/drivers/crypto/ccp/tee-dev.h
24257 @@ -1,6 +1,6 @@
24258  /* SPDX-License-Identifier: MIT */
24259  /*
24260 - * Copyright 2019 Advanced Micro Devices, Inc.
24261 + * Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
24262   *
24263   * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
24264   * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
24265 @@ -18,7 +18,7 @@
24266  #include <linux/mutex.h>
24268  #define TEE_DEFAULT_TIMEOUT            10
24269 -#define MAX_BUFFER_SIZE                        992
24270 +#define MAX_BUFFER_SIZE                        988
24272  /**
24273   * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
24274 @@ -81,6 +81,20 @@ enum tee_cmd_state {
24275         TEE_CMD_STATE_COMPLETED,
24276  };
24278 +/**
24279 + * enum cmd_resp_state - TEE command's response status maintained by driver
24280 + * @CMD_RESPONSE_INVALID:      initial state when no command is written to ring
24281 + * @CMD_WAITING_FOR_RESPONSE:  driver waiting for response from TEE
24282 + * @CMD_RESPONSE_TIMEDOUT:     failed to get response from TEE
24283 + * @CMD_RESPONSE_COPIED:       driver has copied response from TEE
24284 + */
24285 +enum cmd_resp_state {
24286 +       CMD_RESPONSE_INVALID,
24287 +       CMD_WAITING_FOR_RESPONSE,
24288 +       CMD_RESPONSE_TIMEDOUT,
24289 +       CMD_RESPONSE_COPIED,
24292  /**
24293   * struct tee_ring_cmd - Structure of the command buffer in TEE ring
24294   * @cmd_id:      refers to &enum tee_cmd_id. Command id for the ring buffer
24295 @@ -91,6 +105,7 @@ enum tee_cmd_state {
24296   * @pdata:       private data (currently unused)
24297   * @res1:        reserved region
24298   * @buf:         TEE command specific buffer
24299 + * @flag:       refers to &enum cmd_resp_state
24300   */
24301  struct tee_ring_cmd {
24302         u32 cmd_id;
24303 @@ -100,6 +115,7 @@ struct tee_ring_cmd {
24304         u64 pdata;
24305         u32 res1[2];
24306         u8 buf[MAX_BUFFER_SIZE];
24307 +       u32 flag;
24309         /* Total size: 1024 bytes */
24310  } __packed;
24311 diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
24312 index f5a336634daa..405ff957b837 100644
24313 --- a/drivers/crypto/chelsio/chcr_algo.c
24314 +++ b/drivers/crypto/chelsio/chcr_algo.c
24315 @@ -769,13 +769,14 @@ static inline void create_wreq(struct chcr_context *ctx,
24316         struct uld_ctx *u_ctx = ULD_CTX(ctx);
24317         unsigned int tx_channel_id, rx_channel_id;
24318         unsigned int txqidx = 0, rxqidx = 0;
24319 -       unsigned int qid, fid;
24320 +       unsigned int qid, fid, portno;
24322         get_qidxs(req, &txqidx, &rxqidx);
24323         qid = u_ctx->lldi.rxq_ids[rxqidx];
24324         fid = u_ctx->lldi.rxq_ids[0];
24325 +       portno = rxqidx / ctx->rxq_perchan;
24326         tx_channel_id = txqidx / ctx->txq_perchan;
24327 -       rx_channel_id = rxqidx / ctx->rxq_perchan;
24328 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
24331         chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
24332 @@ -806,6 +807,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
24334         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
24335         struct chcr_context *ctx = c_ctx(tfm);
24336 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
24337         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
24338         struct sk_buff *skb = NULL;
24339         struct chcr_wr *chcr_req;
24340 @@ -822,6 +824,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
24341         struct adapter *adap = padap(ctx->dev);
24342         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
24344 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
24345         nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
24346                               reqctx->dst_ofst);
24347         dst_size = get_space_for_phys_dsgl(nents);
24348 @@ -1580,6 +1583,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
24349         int error = 0;
24350         unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
24352 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
24353         transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
24354         req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
24355                                 param->sg_len) <= SGE_MAX_WR_LEN;
24356 @@ -2438,6 +2442,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
24358         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
24359         struct chcr_context *ctx = a_ctx(tfm);
24360 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
24361         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
24362         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
24363         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
24364 @@ -2457,6 +2462,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
24365         struct adapter *adap = padap(ctx->dev);
24366         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
24368 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
24369         if (req->cryptlen == 0)
24370                 return NULL;
24372 @@ -2710,9 +2716,11 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
24373         struct dsgl_walk dsgl_walk;
24374         unsigned int authsize = crypto_aead_authsize(tfm);
24375         struct chcr_context *ctx = a_ctx(tfm);
24376 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
24377         u32 temp;
24378         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
24380 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
24381         dsgl_walk_init(&dsgl_walk, phys_cpl);
24382         dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
24383         temp = req->assoclen + req->cryptlen +
24384 @@ -2752,9 +2760,11 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
24385         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
24386         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
24387         struct chcr_context *ctx = c_ctx(tfm);
24388 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
24389         struct dsgl_walk dsgl_walk;
24390         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
24392 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
24393         dsgl_walk_init(&dsgl_walk, phys_cpl);
24394         dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
24395                          reqctx->dst_ofst);
24396 @@ -2958,6 +2968,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
24398         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
24399         struct chcr_context *ctx = a_ctx(tfm);
24400 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
24401         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
24402         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
24403         unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
24404 @@ -2967,6 +2978,8 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
24405         unsigned int tag_offset = 0, auth_offset = 0;
24406         unsigned int assoclen;
24408 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
24410         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
24411                 assoclen = req->assoclen - 8;
24412         else
24413 @@ -3127,6 +3140,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
24415         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
24416         struct chcr_context *ctx = a_ctx(tfm);
24417 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
24418         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
24419         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
24420         struct sk_buff *skb = NULL;
24421 @@ -3143,6 +3157,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
24422         struct adapter *adap = padap(ctx->dev);
24423         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
24425 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
24426         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
24427                 assoclen = req->assoclen - 8;
24429 diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
24430 index 2eaa516b3231..8adcbb327126 100644
24431 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
24432 +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
24433 @@ -546,7 +546,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
24434         crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
24435         ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
24436         if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
24437 -               dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
24438 +               pr_err("get error skcipher iv size!\n");
24439                 return -EINVAL;
24440         }
24442 diff --git a/drivers/crypto/keembay/keembay-ocs-aes-core.c b/drivers/crypto/keembay/keembay-ocs-aes-core.c
24443 index b6b25d994af3..2ef312866338 100644
24444 --- a/drivers/crypto/keembay/keembay-ocs-aes-core.c
24445 +++ b/drivers/crypto/keembay/keembay-ocs-aes-core.c
24446 @@ -1649,8 +1649,10 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev)
24448         /* Initialize crypto engine */
24449         aes_dev->engine = crypto_engine_alloc_init(dev, true);
24450 -       if (!aes_dev->engine)
24451 +       if (!aes_dev->engine) {
24452 +               rc = -ENOMEM;
24453                 goto list_del;
24454 +       }
24456         rc = crypto_engine_start(aes_dev->engine);
24457         if (rc) {
24458 diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
24459 index c4b97b4160e9..322c51a6936f 100644
24460 --- a/drivers/crypto/keembay/keembay-ocs-hcu-core.c
24461 +++ b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
24462 @@ -1220,8 +1220,10 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
24464         /* Initialize crypto engine */
24465         hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
24466 -       if (!hcu_dev->engine)
24467 +       if (!hcu_dev->engine) {
24468 +               rc = -ENOMEM;
24469                 goto list_del;
24470 +       }
24472         rc = crypto_engine_start(hcu_dev->engine);
24473         if (rc) {
24474 diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
24475 index a45bdcf3026d..0dd4c6b157de 100644
24476 --- a/drivers/crypto/omap-aes.c
24477 +++ b/drivers/crypto/omap-aes.c
24478 @@ -103,9 +103,8 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
24479                 dd->err = 0;
24480         }
24482 -       err = pm_runtime_get_sync(dd->dev);
24483 +       err = pm_runtime_resume_and_get(dd->dev);
24484         if (err < 0) {
24485 -               pm_runtime_put_noidle(dd->dev);
24486                 dev_err(dd->dev, "failed to get sync: %d\n", err);
24487                 return err;
24488         }
24489 @@ -1134,7 +1133,7 @@ static int omap_aes_probe(struct platform_device *pdev)
24490         pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
24492         pm_runtime_enable(dev);
24493 -       err = pm_runtime_get_sync(dev);
24494 +       err = pm_runtime_resume_and_get(dev);
24495         if (err < 0) {
24496                 dev_err(dev, "%s: failed to get_sync(%d)\n",
24497                         __func__, err);
24498 @@ -1303,7 +1302,7 @@ static int omap_aes_suspend(struct device *dev)
24500  static int omap_aes_resume(struct device *dev)
24502 -       pm_runtime_get_sync(dev);
24503 +       pm_runtime_resume_and_get(dev);
24504         return 0;
24506  #endif
24507 diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
24508 index 1d1532e8fb6d..067ca5e17d38 100644
24509 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
24510 +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
24511 @@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
24512         if (ret)
24513                 goto out_err_free_reg;
24515 -       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
24517         ret = adf_dev_init(accel_dev);
24518         if (ret)
24519                 goto out_err_dev_shutdown;
24521 +       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
24523         ret = adf_dev_start(accel_dev);
24524         if (ret)
24525                 goto out_err_dev_stop;
24526 diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
24527 index 04742a6d91ca..51ea88c0b17d 100644
24528 --- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
24529 +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
24530 @@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
24531         if (ret)
24532                 goto out_err_free_reg;
24534 -       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
24536         ret = adf_dev_init(accel_dev);
24537         if (ret)
24538                 goto out_err_dev_shutdown;
24540 +       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
24542         ret = adf_dev_start(accel_dev);
24543         if (ret)
24544                 goto out_err_dev_stop;
24545 diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
24546 index c45853463530..e3ad5587be49 100644
24547 --- a/drivers/crypto/qat/qat_common/adf_isr.c
24548 +++ b/drivers/crypto/qat/qat_common/adf_isr.c
24549 @@ -291,19 +291,32 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
24551         ret = adf_isr_alloc_msix_entry_table(accel_dev);
24552         if (ret)
24553 -               return ret;
24554 -       if (adf_enable_msix(accel_dev))
24555                 goto err_out;
24557 -       if (adf_setup_bh(accel_dev))
24558 -               goto err_out;
24559 +       ret = adf_enable_msix(accel_dev);
24560 +       if (ret)
24561 +               goto err_free_msix_table;
24563 -       if (adf_request_irqs(accel_dev))
24564 -               goto err_out;
24565 +       ret = adf_setup_bh(accel_dev);
24566 +       if (ret)
24567 +               goto err_disable_msix;
24569 +       ret = adf_request_irqs(accel_dev);
24570 +       if (ret)
24571 +               goto err_cleanup_bh;
24573         return 0;
24575 +err_cleanup_bh:
24576 +       adf_cleanup_bh(accel_dev);
24578 +err_disable_msix:
24579 +       adf_disable_msix(&accel_dev->accel_pci_dev);
24581 +err_free_msix_table:
24582 +       adf_isr_free_msix_entry_table(accel_dev);
24584  err_out:
24585 -       adf_isr_resource_free(accel_dev);
24586 -       return -EFAULT;
24587 +       return ret;
24589  EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
24590 diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
24591 index 888c1e047295..8ba28409fb74 100644
24592 --- a/drivers/crypto/qat/qat_common/adf_transport.c
24593 +++ b/drivers/crypto/qat/qat_common/adf_transport.c
24594 @@ -172,6 +172,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
24595                 dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
24596                 dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
24597                                   ring->base_addr, ring->dma_addr);
24598 +               ring->base_addr = NULL;
24599                 return -EFAULT;
24600         }
24602 diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
24603 index 38d316a42ba6..888388acb6bd 100644
24604 --- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
24605 +++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
24606 @@ -261,17 +261,26 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
24607                 goto err_out;
24609         if (adf_setup_pf2vf_bh(accel_dev))
24610 -               goto err_out;
24611 +               goto err_disable_msi;
24613         if (adf_setup_bh(accel_dev))
24614 -               goto err_out;
24615 +               goto err_cleanup_pf2vf_bh;
24617         if (adf_request_msi_irq(accel_dev))
24618 -               goto err_out;
24619 +               goto err_cleanup_bh;
24621         return 0;
24623 +err_cleanup_bh:
24624 +       adf_cleanup_bh(accel_dev);
24626 +err_cleanup_pf2vf_bh:
24627 +       adf_cleanup_pf2vf_bh(accel_dev);
24629 +err_disable_msi:
24630 +       adf_disable_msi(accel_dev);
24632  err_out:
24633 -       adf_vf_isr_resource_free(accel_dev);
24634         return -EFAULT;
24636  EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
24637 diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
24638 index ff78c73c47e3..ea1c6899290d 100644
24639 --- a/drivers/crypto/qat/qat_common/qat_algs.c
24640 +++ b/drivers/crypto/qat/qat_common/qat_algs.c
24641 @@ -719,7 +719,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
24642         struct qat_alg_buf_list *bufl;
24643         struct qat_alg_buf_list *buflout = NULL;
24644         dma_addr_t blp;
24645 -       dma_addr_t bloutp = 0;
24646 +       dma_addr_t bloutp;
24647         struct scatterlist *sg;
24648         size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
24650 @@ -731,6 +731,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
24651         if (unlikely(!bufl))
24652                 return -ENOMEM;
24654 +       for_each_sg(sgl, sg, n, i)
24655 +               bufl->bufers[i].addr = DMA_MAPPING_ERROR;
24657         blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
24658         if (unlikely(dma_mapping_error(dev, blp)))
24659                 goto err_in;
24660 @@ -764,10 +767,14 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
24661                                        dev_to_node(&GET_DEV(inst->accel_dev)));
24662                 if (unlikely(!buflout))
24663                         goto err_in;
24665 +               bufers = buflout->bufers;
24666 +               for_each_sg(sglout, sg, n, i)
24667 +                       bufers[i].addr = DMA_MAPPING_ERROR;
24669                 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
24670                 if (unlikely(dma_mapping_error(dev, bloutp)))
24671                         goto err_out;
24672 -               bufers = buflout->bufers;
24673                 for_each_sg(sglout, sg, n, i) {
24674                         int y = sg_nctr;
24676 diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
24677 index c972554a755e..29999da716cc 100644
24678 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
24679 +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
24680 @@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
24681         if (ret)
24682                 goto out_err_free_reg;
24684 -       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
24686         ret = adf_dev_init(accel_dev);
24687         if (ret)
24688                 goto out_err_dev_shutdown;
24690 +       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
24692         ret = adf_dev_start(accel_dev);
24693         if (ret)
24694                 goto out_err_dev_stop;
24695 diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
24696 index f300b0a5958a..b0f0502a5bb0 100644
24697 --- a/drivers/crypto/sa2ul.c
24698 +++ b/drivers/crypto/sa2ul.c
24699 @@ -1146,8 +1146,10 @@ static int sa_run(struct sa_req *req)
24700                 mapped_sg->sgt.sgl = src;
24701                 mapped_sg->sgt.orig_nents = src_nents;
24702                 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
24703 -               if (ret)
24704 +               if (ret) {
24705 +                       kfree(rxd);
24706                         return ret;
24707 +               }
24709                 mapped_sg->dir = dir_src;
24710                 mapped_sg->mapped = true;
24711 @@ -1155,8 +1157,10 @@ static int sa_run(struct sa_req *req)
24712                 mapped_sg->sgt.sgl = req->src;
24713                 mapped_sg->sgt.orig_nents = sg_nents;
24714                 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
24715 -               if (ret)
24716 +               if (ret) {
24717 +                       kfree(rxd);
24718                         return ret;
24719 +               }
24721                 mapped_sg->dir = dir_src;
24722                 mapped_sg->mapped = true;
24723 @@ -2350,7 +2354,7 @@ static int sa_ul_probe(struct platform_device *pdev)
24724         dev_set_drvdata(sa_k3_dev, dev_data);
24726         pm_runtime_enable(dev);
24727 -       ret = pm_runtime_get_sync(dev);
24728 +       ret = pm_runtime_resume_and_get(dev);
24729         if (ret < 0) {
24730                 dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
24731                         ret);
24732 diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
24733 index 2a4793176c71..7389a0536ff0 100644
24734 --- a/drivers/crypto/stm32/stm32-cryp.c
24735 +++ b/drivers/crypto/stm32/stm32-cryp.c
24736 @@ -542,7 +542,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
24737         int ret;
24738         u32 cfg, hw_mode;
24740 -       pm_runtime_get_sync(cryp->dev);
24741 +       pm_runtime_resume_and_get(cryp->dev);
24743         /* Disable interrupt */
24744         stm32_cryp_write(cryp, CRYP_IMSCR, 0);
24745 @@ -2043,7 +2043,7 @@ static int stm32_cryp_remove(struct platform_device *pdev)
24746         if (!cryp)
24747                 return -ENODEV;
24749 -       ret = pm_runtime_get_sync(cryp->dev);
24750 +       ret = pm_runtime_resume_and_get(cryp->dev);
24751         if (ret < 0)
24752                 return ret;
24754 diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
24755 index 7ac0573ef663..389de9e3302d 100644
24756 --- a/drivers/crypto/stm32/stm32-hash.c
24757 +++ b/drivers/crypto/stm32/stm32-hash.c
24758 @@ -813,7 +813,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
24759  static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
24760                               struct stm32_hash_request_ctx *rctx)
24762 -       pm_runtime_get_sync(hdev->dev);
24763 +       pm_runtime_resume_and_get(hdev->dev);
24765         if (!(HASH_FLAGS_INIT & hdev->flags)) {
24766                 stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
24767 @@ -962,7 +962,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
24768         u32 *preg;
24769         unsigned int i;
24771 -       pm_runtime_get_sync(hdev->dev);
24772 +       pm_runtime_resume_and_get(hdev->dev);
24774         while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
24775                 cpu_relax();
24776 @@ -1000,7 +1000,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
24778         preg = rctx->hw_context;
24780 -       pm_runtime_get_sync(hdev->dev);
24781 +       pm_runtime_resume_and_get(hdev->dev);
24783         stm32_hash_write(hdev, HASH_IMR, *preg++);
24784         stm32_hash_write(hdev, HASH_STR, *preg++);
24785 @@ -1566,7 +1566,7 @@ static int stm32_hash_remove(struct platform_device *pdev)
24786         if (!hdev)
24787                 return -ENODEV;
24789 -       ret = pm_runtime_get_sync(hdev->dev);
24790 +       ret = pm_runtime_resume_and_get(hdev->dev);
24791         if (ret < 0)
24792                 return ret;
24794 diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
24795 index bf3047896e41..59ba59bea0f5 100644
24796 --- a/drivers/devfreq/devfreq.c
24797 +++ b/drivers/devfreq/devfreq.c
24798 @@ -387,7 +387,7 @@ static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
24799         devfreq->previous_freq = new_freq;
24801         if (devfreq->suspend_freq)
24802 -               devfreq->resume_freq = cur_freq;
24803 +               devfreq->resume_freq = new_freq;
24805         return err;
24807 @@ -821,7 +821,8 @@ struct devfreq *devfreq_add_device(struct device *dev,
24809         if (devfreq->profile->timer < 0
24810                 || devfreq->profile->timer >= DEVFREQ_TIMER_NUM) {
24811 -               goto err_out;
24812 +               mutex_unlock(&devfreq->lock);
24813 +               goto err_dev;
24814         }
24816         if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
24817 diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
24818 index 0db9b82ed8cf..1d8a3876b745 100644
24819 --- a/drivers/dma/idxd/cdev.c
24820 +++ b/drivers/dma/idxd/cdev.c
24821 @@ -39,15 +39,15 @@ struct idxd_user_context {
24822         struct iommu_sva *sva;
24823  };
24825 -enum idxd_cdev_cleanup {
24826 -       CDEV_NORMAL = 0,
24827 -       CDEV_FAILED,
24830  static void idxd_cdev_dev_release(struct device *dev)
24832 -       dev_dbg(dev, "releasing cdev device\n");
24833 -       kfree(dev);
24834 +       struct idxd_cdev *idxd_cdev = container_of(dev, struct idxd_cdev, dev);
24835 +       struct idxd_cdev_context *cdev_ctx;
24836 +       struct idxd_wq *wq = idxd_cdev->wq;
24838 +       cdev_ctx = &ictx[wq->idxd->type];
24839 +       ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
24840 +       kfree(idxd_cdev);
24843  static struct device_type idxd_cdev_device_type = {
24844 @@ -62,14 +62,11 @@ static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
24845         return container_of(cdev, struct idxd_cdev, cdev);
24848 -static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev)
24850 -       return container_of(idxd_cdev, struct idxd_wq, idxd_cdev);
24853  static inline struct idxd_wq *inode_wq(struct inode *inode)
24855 -       return idxd_cdev_wq(inode_idxd_cdev(inode));
24856 +       struct idxd_cdev *idxd_cdev = inode_idxd_cdev(inode);
24858 +       return idxd_cdev->wq;
24861  static int idxd_cdev_open(struct inode *inode, struct file *filp)
24862 @@ -220,11 +217,10 @@ static __poll_t idxd_cdev_poll(struct file *filp,
24863         struct idxd_user_context *ctx = filp->private_data;
24864         struct idxd_wq *wq = ctx->wq;
24865         struct idxd_device *idxd = wq->idxd;
24866 -       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
24867         unsigned long flags;
24868         __poll_t out = 0;
24870 -       poll_wait(filp, &idxd_cdev->err_queue, wait);
24871 +       poll_wait(filp, &wq->err_queue, wait);
24872         spin_lock_irqsave(&idxd->dev_lock, flags);
24873         if (idxd->sw_err.valid)
24874                 out = EPOLLIN | EPOLLRDNORM;
24875 @@ -246,98 +242,67 @@ int idxd_cdev_get_major(struct idxd_device *idxd)
24876         return MAJOR(ictx[idxd->type].devt);
24879 -static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
24880 +int idxd_wq_add_cdev(struct idxd_wq *wq)
24882         struct idxd_device *idxd = wq->idxd;
24883 -       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
24884 -       struct idxd_cdev_context *cdev_ctx;
24885 +       struct idxd_cdev *idxd_cdev;
24886 +       struct cdev *cdev;
24887         struct device *dev;
24888 -       int minor, rc;
24889 +       struct idxd_cdev_context *cdev_ctx;
24890 +       int rc, minor;
24892 -       idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL);
24893 -       if (!idxd_cdev->dev)
24894 +       idxd_cdev = kzalloc(sizeof(*idxd_cdev), GFP_KERNEL);
24895 +       if (!idxd_cdev)
24896                 return -ENOMEM;
24898 -       dev = idxd_cdev->dev;
24899 -       dev->parent = &idxd->pdev->dev;
24900 -       dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
24901 -                    idxd->id, wq->id);
24902 -       dev->bus = idxd_get_bus_type(idxd);
24904 +       idxd_cdev->wq = wq;
24905 +       cdev = &idxd_cdev->cdev;
24906 +       dev = &idxd_cdev->dev;
24907         cdev_ctx = &ictx[wq->idxd->type];
24908         minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
24909         if (minor < 0) {
24910 -               rc = minor;
24911 -               kfree(dev);
24912 -               goto ida_err;
24913 -       }
24915 -       dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
24916 -       dev->type = &idxd_cdev_device_type;
24917 -       rc = device_register(dev);
24918 -       if (rc < 0) {
24919 -               dev_err(&idxd->pdev->dev, "device register failed\n");
24920 -               goto dev_reg_err;
24921 +               kfree(idxd_cdev);
24922 +               return minor;
24923         }
24924         idxd_cdev->minor = minor;
24926 -       return 0;
24928 - dev_reg_err:
24929 -       ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
24930 -       put_device(dev);
24931 - ida_err:
24932 -       idxd_cdev->dev = NULL;
24933 -       return rc;
24936 -static void idxd_wq_cdev_cleanup(struct idxd_wq *wq,
24937 -                                enum idxd_cdev_cleanup cdev_state)
24939 -       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
24940 -       struct idxd_cdev_context *cdev_ctx;
24942 -       cdev_ctx = &ictx[wq->idxd->type];
24943 -       if (cdev_state == CDEV_NORMAL)
24944 -               cdev_del(&idxd_cdev->cdev);
24945 -       device_unregister(idxd_cdev->dev);
24946 -       /*
24947 -        * The device_type->release() will be called on the device and free
24948 -        * the allocated struct device. We can just forget it.
24949 -        */
24950 -       ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
24951 -       idxd_cdev->dev = NULL;
24952 -       idxd_cdev->minor = -1;
24955 -int idxd_wq_add_cdev(struct idxd_wq *wq)
24957 -       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
24958 -       struct cdev *cdev = &idxd_cdev->cdev;
24959 -       struct device *dev;
24960 -       int rc;
24961 +       device_initialize(dev);
24962 +       dev->parent = &wq->conf_dev;
24963 +       dev->bus = idxd_get_bus_type(idxd);
24964 +       dev->type = &idxd_cdev_device_type;
24965 +       dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
24967 -       rc = idxd_wq_cdev_dev_setup(wq);
24968 +       rc = dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
24969 +                         idxd->id, wq->id);
24970         if (rc < 0)
24971 -               return rc;
24972 +               goto err;
24974 -       dev = idxd_cdev->dev;
24975 +       wq->idxd_cdev = idxd_cdev;
24976         cdev_init(cdev, &idxd_cdev_fops);
24977 -       cdev_set_parent(cdev, &dev->kobj);
24978 -       rc = cdev_add(cdev, dev->devt, 1);
24979 +       rc = cdev_device_add(cdev, dev);
24980         if (rc) {
24981                 dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
24982 -               idxd_wq_cdev_cleanup(wq, CDEV_FAILED);
24983 -               return rc;
24984 +               goto err;
24985         }
24987 -       init_waitqueue_head(&idxd_cdev->err_queue);
24988         return 0;
24990 + err:
24991 +       put_device(dev);
24992 +       wq->idxd_cdev = NULL;
24993 +       return rc;
24996  void idxd_wq_del_cdev(struct idxd_wq *wq)
24998 -       idxd_wq_cdev_cleanup(wq, CDEV_NORMAL);
24999 +       struct idxd_cdev *idxd_cdev;
25000 +       struct idxd_cdev_context *cdev_ctx;
25002 +       cdev_ctx = &ictx[wq->idxd->type];
25003 +       idxd_cdev = wq->idxd_cdev;
25004 +       wq->idxd_cdev = NULL;
25005 +       cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev);
25006 +       put_device(&idxd_cdev->dev);
25009  int idxd_cdev_register(void)
25010 diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
25011 index 31c819544a22..4fef57717049 100644
25012 --- a/drivers/dma/idxd/device.c
25013 +++ b/drivers/dma/idxd/device.c
25014 @@ -19,7 +19,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
25015  /* Interrupt control bits */
25016  void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
25018 -       struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
25019 +       struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
25021         pci_msi_mask_irq(data);
25023 @@ -36,7 +36,7 @@ void idxd_mask_msix_vectors(struct idxd_device *idxd)
25025  void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
25027 -       struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
25028 +       struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
25030         pci_msi_unmask_irq(data);
25032 @@ -186,8 +186,6 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
25033                 desc->id = i;
25034                 desc->wq = wq;
25035                 desc->cpu = -1;
25036 -               dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
25037 -               desc->txd.tx_submit = idxd_dma_tx_submit;
25038         }
25040         return 0;
25041 @@ -451,7 +449,8 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
25043         if (idxd_device_is_halted(idxd)) {
25044                 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
25045 -               *status = IDXD_CMDSTS_HW_ERR;
25046 +               if (status)
25047 +                       *status = IDXD_CMDSTS_HW_ERR;
25048                 return;
25049         }
25051 @@ -521,7 +520,7 @@ void idxd_device_wqs_clear_state(struct idxd_device *idxd)
25052         lockdep_assert_held(&idxd->dev_lock);
25054         for (i = 0; i < idxd->max_wqs; i++) {
25055 -               struct idxd_wq *wq = &idxd->wqs[i];
25056 +               struct idxd_wq *wq = idxd->wqs[i];
25058                 if (wq->state == IDXD_WQ_ENABLED) {
25059                         idxd_wq_disable_cleanup(wq);
25060 @@ -660,7 +659,7 @@ static int idxd_groups_config_write(struct idxd_device *idxd)
25061                 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
25063         for (i = 0; i < idxd->max_groups; i++) {
25064 -               struct idxd_group *group = &idxd->groups[i];
25065 +               struct idxd_group *group = idxd->groups[i];
25067                 idxd_group_config_write(group);
25068         }
25069 @@ -739,7 +738,7 @@ static int idxd_wqs_config_write(struct idxd_device *idxd)
25070         int i, rc;
25072         for (i = 0; i < idxd->max_wqs; i++) {
25073 -               struct idxd_wq *wq = &idxd->wqs[i];
25074 +               struct idxd_wq *wq = idxd->wqs[i];
25076                 rc = idxd_wq_config_write(wq);
25077                 if (rc < 0)
25078 @@ -755,7 +754,7 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
25080         /* TC-A 0 and TC-B 1 should be defaults */
25081         for (i = 0; i < idxd->max_groups; i++) {
25082 -               struct idxd_group *group = &idxd->groups[i];
25083 +               struct idxd_group *group = idxd->groups[i];
25085                 if (group->tc_a == -1)
25086                         group->tc_a = group->grpcfg.flags.tc_a = 0;
25087 @@ -782,12 +781,12 @@ static int idxd_engines_setup(struct idxd_device *idxd)
25088         struct idxd_group *group;
25090         for (i = 0; i < idxd->max_groups; i++) {
25091 -               group = &idxd->groups[i];
25092 +               group = idxd->groups[i];
25093                 group->grpcfg.engines = 0;
25094         }
25096         for (i = 0; i < idxd->max_engines; i++) {
25097 -               eng = &idxd->engines[i];
25098 +               eng = idxd->engines[i];
25099                 group = eng->group;
25101                 if (!group)
25102 @@ -811,13 +810,13 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
25103         struct device *dev = &idxd->pdev->dev;
25105         for (i = 0; i < idxd->max_groups; i++) {
25106 -               group = &idxd->groups[i];
25107 +               group = idxd->groups[i];
25108                 for (j = 0; j < 4; j++)
25109                         group->grpcfg.wqs[j] = 0;
25110         }
25112         for (i = 0; i < idxd->max_wqs; i++) {
25113 -               wq = &idxd->wqs[i];
25114 +               wq = idxd->wqs[i];
25115                 group = wq->group;
25117                 if (!wq->group)
25118 diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
25119 index a15e50126434..77439b645044 100644
25120 --- a/drivers/dma/idxd/dma.c
25121 +++ b/drivers/dma/idxd/dma.c
25122 @@ -14,7 +14,10 @@
25124  static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
25126 -       return container_of(c, struct idxd_wq, dma_chan);
25127 +       struct idxd_dma_chan *idxd_chan;
25129 +       idxd_chan = container_of(c, struct idxd_dma_chan, chan);
25130 +       return idxd_chan->wq;
25133  void idxd_dma_complete_txd(struct idxd_desc *desc,
25134 @@ -135,7 +138,7 @@ static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
25138 -dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
25139 +static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
25141         struct dma_chan *c = tx->chan;
25142         struct idxd_wq *wq = to_idxd_wq(c);
25143 @@ -156,14 +159,25 @@ dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
25145  static void idxd_dma_release(struct dma_device *device)
25147 +       struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma);
25149 +       kfree(idxd_dma);
25152  int idxd_register_dma_device(struct idxd_device *idxd)
25154 -       struct dma_device *dma = &idxd->dma_dev;
25155 +       struct idxd_dma_dev *idxd_dma;
25156 +       struct dma_device *dma;
25157 +       struct device *dev = &idxd->pdev->dev;
25158 +       int rc;
25160 +       idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev));
25161 +       if (!idxd_dma)
25162 +               return -ENOMEM;
25164 +       dma = &idxd_dma->dma;
25165         INIT_LIST_HEAD(&dma->channels);
25166 -       dma->dev = &idxd->pdev->dev;
25167 +       dma->dev = dev;
25169         dma_cap_set(DMA_PRIVATE, dma->cap_mask);
25170         dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
25171 @@ -179,35 +193,72 @@ int idxd_register_dma_device(struct idxd_device *idxd)
25172         dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
25173         dma->device_free_chan_resources = idxd_dma_free_chan_resources;
25175 -       return dma_async_device_register(&idxd->dma_dev);
25176 +       rc = dma_async_device_register(dma);
25177 +       if (rc < 0) {
25178 +               kfree(idxd_dma);
25179 +               return rc;
25180 +       }
25182 +       idxd_dma->idxd = idxd;
25183 +       /*
25184 +        * This pointer is protected by the refs taken by the dma_chan. It will remain valid
25185 +        * as long as there are outstanding channels.
25186 +        */
25187 +       idxd->idxd_dma = idxd_dma;
25188 +       return 0;
25191  void idxd_unregister_dma_device(struct idxd_device *idxd)
25193 -       dma_async_device_unregister(&idxd->dma_dev);
25194 +       dma_async_device_unregister(&idxd->idxd_dma->dma);
25197  int idxd_register_dma_channel(struct idxd_wq *wq)
25199         struct idxd_device *idxd = wq->idxd;
25200 -       struct dma_device *dma = &idxd->dma_dev;
25201 -       struct dma_chan *chan = &wq->dma_chan;
25202 -       int rc;
25203 +       struct dma_device *dma = &idxd->idxd_dma->dma;
25204 +       struct device *dev = &idxd->pdev->dev;
25205 +       struct idxd_dma_chan *idxd_chan;
25206 +       struct dma_chan *chan;
25207 +       int rc, i;
25209 +       idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev));
25210 +       if (!idxd_chan)
25211 +               return -ENOMEM;
25213 -       memset(&wq->dma_chan, 0, sizeof(struct dma_chan));
25214 +       chan = &idxd_chan->chan;
25215         chan->device = dma;
25216         list_add_tail(&chan->device_node, &dma->channels);
25218 +       for (i = 0; i < wq->num_descs; i++) {
25219 +               struct idxd_desc *desc = wq->descs[i];
25221 +               dma_async_tx_descriptor_init(&desc->txd, chan);
25222 +               desc->txd.tx_submit = idxd_dma_tx_submit;
25223 +       }
25225         rc = dma_async_device_channel_register(dma, chan);
25226 -       if (rc < 0)
25227 +       if (rc < 0) {
25228 +               kfree(idxd_chan);
25229                 return rc;
25230 +       }
25232 +       wq->idxd_chan = idxd_chan;
25233 +       idxd_chan->wq = wq;
25234 +       get_device(&wq->conf_dev);
25236         return 0;
25239  void idxd_unregister_dma_channel(struct idxd_wq *wq)
25241 -       struct dma_chan *chan = &wq->dma_chan;
25242 +       struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
25243 +       struct dma_chan *chan = &idxd_chan->chan;
25244 +       struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma;
25246 -       dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
25247 +       dma_async_device_channel_unregister(&idxd_dma->dma, chan);
25248         list_del(&chan->device_node);
25249 +       kfree(wq->idxd_chan);
25250 +       wq->idxd_chan = NULL;
25251 +       put_device(&wq->conf_dev);
25253 diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
25254 index 76014c14f473..89daf746d121 100644
25255 --- a/drivers/dma/idxd/idxd.h
25256 +++ b/drivers/dma/idxd/idxd.h
25257 @@ -8,12 +8,16 @@
25258  #include <linux/percpu-rwsem.h>
25259  #include <linux/wait.h>
25260  #include <linux/cdev.h>
25261 +#include <linux/idr.h>
25262  #include "registers.h"
25264  #define IDXD_DRIVER_VERSION    "1.00"
25266  extern struct kmem_cache *idxd_desc_pool;
25268 +struct idxd_device;
25269 +struct idxd_wq;
25271  #define IDXD_REG_TIMEOUT       50
25272  #define IDXD_DRAIN_TIMEOUT     5000
25274 @@ -33,6 +37,7 @@ struct idxd_device_driver {
25275  struct idxd_irq_entry {
25276         struct idxd_device *idxd;
25277         int id;
25278 +       int vector;
25279         struct llist_head pending_llist;
25280         struct list_head work_list;
25281         /*
25282 @@ -75,10 +80,10 @@ enum idxd_wq_type {
25283  };
25285  struct idxd_cdev {
25286 +       struct idxd_wq *wq;
25287         struct cdev cdev;
25288 -       struct device *dev;
25289 +       struct device dev;
25290         int minor;
25291 -       struct wait_queue_head err_queue;
25292  };
25294  #define IDXD_ALLOCATED_BATCH_SIZE      128U
25295 @@ -96,10 +101,16 @@ enum idxd_complete_type {
25296         IDXD_COMPLETE_DEV_FAIL,
25297  };
25299 +struct idxd_dma_chan {
25300 +       struct dma_chan chan;
25301 +       struct idxd_wq *wq;
25304  struct idxd_wq {
25305         void __iomem *portal;
25306         struct device conf_dev;
25307 -       struct idxd_cdev idxd_cdev;
25308 +       struct idxd_cdev *idxd_cdev;
25309 +       struct wait_queue_head err_queue;
25310         struct idxd_device *idxd;
25311         int id;
25312         enum idxd_wq_type type;
25313 @@ -125,7 +136,7 @@ struct idxd_wq {
25314         int compls_size;
25315         struct idxd_desc **descs;
25316         struct sbitmap_queue sbq;
25317 -       struct dma_chan dma_chan;
25318 +       struct idxd_dma_chan *idxd_chan;
25319         char name[WQ_NAME_SIZE + 1];
25320         u64 max_xfer_bytes;
25321         u32 max_batch_size;
25322 @@ -162,6 +173,11 @@ enum idxd_device_flag {
25323         IDXD_FLAG_PASID_ENABLED,
25324  };
25326 +struct idxd_dma_dev {
25327 +       struct idxd_device *idxd;
25328 +       struct dma_device dma;
25331  struct idxd_device {
25332         enum idxd_type type;
25333         struct device conf_dev;
25334 @@ -178,9 +194,9 @@ struct idxd_device {
25336         spinlock_t dev_lock;    /* spinlock for device */
25337         struct completion *cmd_done;
25338 -       struct idxd_group *groups;
25339 -       struct idxd_wq *wqs;
25340 -       struct idxd_engine *engines;
25341 +       struct idxd_group **groups;
25342 +       struct idxd_wq **wqs;
25343 +       struct idxd_engine **engines;
25345         struct iommu_sva *sva;
25346         unsigned int pasid;
25347 @@ -206,11 +222,10 @@ struct idxd_device {
25349         union sw_err_reg sw_err;
25350         wait_queue_head_t cmd_waitq;
25351 -       struct msix_entry *msix_entries;
25352         int num_wq_irqs;
25353         struct idxd_irq_entry *irq_entries;
25355 -       struct dma_device dma_dev;
25356 +       struct idxd_dma_dev *idxd_dma;
25357         struct workqueue_struct *wq;
25358         struct work_struct work;
25359  };
25360 @@ -242,6 +257,43 @@ extern struct bus_type dsa_bus_type;
25361  extern struct bus_type iax_bus_type;
25363  extern bool support_enqcmd;
25364 +extern struct device_type dsa_device_type;
25365 +extern struct device_type iax_device_type;
25366 +extern struct device_type idxd_wq_device_type;
25367 +extern struct device_type idxd_engine_device_type;
25368 +extern struct device_type idxd_group_device_type;
25370 +static inline bool is_dsa_dev(struct device *dev)
25372 +       return dev->type == &dsa_device_type;
25375 +static inline bool is_iax_dev(struct device *dev)
25377 +       return dev->type == &iax_device_type;
25380 +static inline bool is_idxd_dev(struct device *dev)
25382 +       return is_dsa_dev(dev) || is_iax_dev(dev);
25385 +static inline bool is_idxd_wq_dev(struct device *dev)
25387 +       return dev->type == &idxd_wq_device_type;
25390 +static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
25392 +       if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0)
25393 +               return true;
25394 +       return false;
25397 +static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
25399 +       return wq->type == IDXD_WQT_USER;
25402  static inline bool wq_dedicated(struct idxd_wq *wq)
25404 @@ -279,18 +331,6 @@ static inline int idxd_get_wq_portal_full_offset(int wq_id,
25405         return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
25408 -static inline void idxd_set_type(struct idxd_device *idxd)
25410 -       struct pci_dev *pdev = idxd->pdev;
25412 -       if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
25413 -               idxd->type = IDXD_TYPE_DSA;
25414 -       else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
25415 -               idxd->type = IDXD_TYPE_IAX;
25416 -       else
25417 -               idxd->type = IDXD_TYPE_UNKNOWN;
25420  static inline void idxd_wq_get(struct idxd_wq *wq)
25422         wq->client_count++;
25423 @@ -306,14 +346,16 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq)
25424         return wq->client_count;
25425  };
25427 +struct ida *idxd_ida(struct idxd_device *idxd);
25428  const char *idxd_get_dev_name(struct idxd_device *idxd);
25429  int idxd_register_bus_type(void);
25430  void idxd_unregister_bus_type(void);
25431 -int idxd_setup_sysfs(struct idxd_device *idxd);
25432 -void idxd_cleanup_sysfs(struct idxd_device *idxd);
25433 +int idxd_register_devices(struct idxd_device *idxd);
25434 +void idxd_unregister_devices(struct idxd_device *idxd);
25435  int idxd_register_driver(void);
25436  void idxd_unregister_driver(void);
25437  struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
25438 +struct device_type *idxd_get_device_type(struct idxd_device *idxd);
25440  /* device interrupt control */
25441  void idxd_msix_perm_setup(struct idxd_device *idxd);
25442 @@ -363,7 +405,6 @@ void idxd_unregister_dma_channel(struct idxd_wq *wq);
25443  void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
25444  void idxd_dma_complete_txd(struct idxd_desc *desc,
25445                            enum idxd_complete_type comp_type);
25446 -dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
25448  /* cdev */
25449  int idxd_cdev_register(void);
25450 diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
25451 index 6584b0ec07d5..07cf7977a045 100644
25452 --- a/drivers/dma/idxd/init.c
25453 +++ b/drivers/dma/idxd/init.c
25454 @@ -34,8 +34,7 @@ MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
25456  bool support_enqcmd;
25458 -static struct idr idxd_idrs[IDXD_TYPE_MAX];
25459 -static DEFINE_MUTEX(idxd_idr_lock);
25460 +static struct ida idxd_idas[IDXD_TYPE_MAX];
25462  static struct pci_device_id idxd_pci_tbl[] = {
25463         /* DSA ver 1.0 platforms */
25464 @@ -52,6 +51,11 @@ static char *idxd_name[] = {
25465         "iax"
25466  };
25468 +struct ida *idxd_ida(struct idxd_device *idxd)
25470 +       return &idxd_idas[idxd->type];
25473  const char *idxd_get_dev_name(struct idxd_device *idxd)
25475         return idxd_name[idxd->type];
25476 @@ -61,7 +65,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
25478         struct pci_dev *pdev = idxd->pdev;
25479         struct device *dev = &pdev->dev;
25480 -       struct msix_entry *msix;
25481         struct idxd_irq_entry *irq_entry;
25482         int i, msixcnt;
25483         int rc = 0;
25484 @@ -69,23 +72,13 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
25485         msixcnt = pci_msix_vec_count(pdev);
25486         if (msixcnt < 0) {
25487                 dev_err(dev, "Not MSI-X interrupt capable.\n");
25488 -               goto err_no_irq;
25489 -       }
25491 -       idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
25492 -                       msixcnt, GFP_KERNEL);
25493 -       if (!idxd->msix_entries) {
25494 -               rc = -ENOMEM;
25495 -               goto err_no_irq;
25496 +               return -ENOSPC;
25497         }
25499 -       for (i = 0; i < msixcnt; i++)
25500 -               idxd->msix_entries[i].entry = i;
25502 -       rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
25503 -       if (rc) {
25504 -               dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
25505 -               goto err_no_irq;
25506 +       rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
25507 +       if (rc != msixcnt) {
25508 +               dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
25509 +               return -ENOSPC;
25510         }
25511         dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
25513 @@ -93,119 +86,236 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
25514          * We implement 1 completion list per MSI-X entry except for
25515          * entry 0, which is for errors and others.
25516          */
25517 -       idxd->irq_entries = devm_kcalloc(dev, msixcnt,
25518 -                                        sizeof(struct idxd_irq_entry),
25519 -                                        GFP_KERNEL);
25520 +       idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
25521 +                                        GFP_KERNEL, dev_to_node(dev));
25522         if (!idxd->irq_entries) {
25523                 rc = -ENOMEM;
25524 -               goto err_no_irq;
25525 +               goto err_irq_entries;
25526         }
25528         for (i = 0; i < msixcnt; i++) {
25529                 idxd->irq_entries[i].id = i;
25530                 idxd->irq_entries[i].idxd = idxd;
25531 +               idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
25532                 spin_lock_init(&idxd->irq_entries[i].list_lock);
25533         }
25535 -       msix = &idxd->msix_entries[0];
25536         irq_entry = &idxd->irq_entries[0];
25537 -       rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
25538 -                                      idxd_misc_thread, 0, "idxd-misc",
25539 -                                      irq_entry);
25540 +       rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler, idxd_misc_thread,
25541 +                                 0, "idxd-misc", irq_entry);
25542         if (rc < 0) {
25543                 dev_err(dev, "Failed to allocate misc interrupt.\n");
25544 -               goto err_no_irq;
25545 +               goto err_misc_irq;
25546         }
25548 -       dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
25549 -               msix->vector);
25550 +       dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
25552         /* first MSI-X entry is not for wq interrupts */
25553         idxd->num_wq_irqs = msixcnt - 1;
25555         for (i = 1; i < msixcnt; i++) {
25556 -               msix = &idxd->msix_entries[i];
25557                 irq_entry = &idxd->irq_entries[i];
25559                 init_llist_head(&idxd->irq_entries[i].pending_llist);
25560                 INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
25561 -               rc = devm_request_threaded_irq(dev, msix->vector,
25562 -                                              idxd_irq_handler,
25563 -                                              idxd_wq_thread, 0,
25564 -                                              "idxd-portal", irq_entry);
25565 +               rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler,
25566 +                                         idxd_wq_thread, 0, "idxd-portal", irq_entry);
25567                 if (rc < 0) {
25568 -                       dev_err(dev, "Failed to allocate irq %d.\n",
25569 -                               msix->vector);
25570 -                       goto err_no_irq;
25571 +                       dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
25572 +                       goto err_wq_irqs;
25573                 }
25574 -               dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
25575 -                       i, msix->vector);
25576 +               dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
25577         }
25579         idxd_unmask_error_interrupts(idxd);
25580         idxd_msix_perm_setup(idxd);
25581         return 0;
25583 - err_no_irq:
25584 + err_wq_irqs:
25585 +       while (--i >= 0) {
25586 +               irq_entry = &idxd->irq_entries[i];
25587 +               free_irq(irq_entry->vector, irq_entry);
25588 +       }
25589 + err_misc_irq:
25590         /* Disable error interrupt generation */
25591         idxd_mask_error_interrupts(idxd);
25592 -       pci_disable_msix(pdev);
25593 + err_irq_entries:
25594 +       pci_free_irq_vectors(pdev);
25595         dev_err(dev, "No usable interrupts\n");
25596         return rc;
25599 -static int idxd_setup_internals(struct idxd_device *idxd)
25600 +static int idxd_setup_wqs(struct idxd_device *idxd)
25602         struct device *dev = &idxd->pdev->dev;
25603 -       int i;
25605 -       init_waitqueue_head(&idxd->cmd_waitq);
25606 -       idxd->groups = devm_kcalloc(dev, idxd->max_groups,
25607 -                                   sizeof(struct idxd_group), GFP_KERNEL);
25608 -       if (!idxd->groups)
25609 -               return -ENOMEM;
25611 -       for (i = 0; i < idxd->max_groups; i++) {
25612 -               idxd->groups[i].idxd = idxd;
25613 -               idxd->groups[i].id = i;
25614 -               idxd->groups[i].tc_a = -1;
25615 -               idxd->groups[i].tc_b = -1;
25616 -       }
25617 +       struct idxd_wq *wq;
25618 +       int i, rc;
25620 -       idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
25621 -                                GFP_KERNEL);
25622 +       idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
25623 +                                GFP_KERNEL, dev_to_node(dev));
25624         if (!idxd->wqs)
25625                 return -ENOMEM;
25627 -       idxd->engines = devm_kcalloc(dev, idxd->max_engines,
25628 -                                    sizeof(struct idxd_engine), GFP_KERNEL);
25629 -       if (!idxd->engines)
25630 -               return -ENOMEM;
25632         for (i = 0; i < idxd->max_wqs; i++) {
25633 -               struct idxd_wq *wq = &idxd->wqs[i];
25634 +               wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
25635 +               if (!wq) {
25636 +                       rc = -ENOMEM;
25637 +                       goto err;
25638 +               }
25640                 wq->id = i;
25641                 wq->idxd = idxd;
25642 +               device_initialize(&wq->conf_dev);
25643 +               wq->conf_dev.parent = &idxd->conf_dev;
25644 +               wq->conf_dev.bus = idxd_get_bus_type(idxd);
25645 +               wq->conf_dev.type = &idxd_wq_device_type;
25646 +               rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
25647 +               if (rc < 0) {
25648 +                       put_device(&wq->conf_dev);
25649 +                       goto err;
25650 +               }
25652                 mutex_init(&wq->wq_lock);
25653 -               wq->idxd_cdev.minor = -1;
25654 +               init_waitqueue_head(&wq->err_queue);
25655                 wq->max_xfer_bytes = idxd->max_xfer_bytes;
25656                 wq->max_batch_size = idxd->max_batch_size;
25657 -               wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
25658 -               if (!wq->wqcfg)
25659 -                       return -ENOMEM;
25660 +               wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
25661 +               if (!wq->wqcfg) {
25662 +                       put_device(&wq->conf_dev);
25663 +                       rc = -ENOMEM;
25664 +                       goto err;
25665 +               }
25666 +               idxd->wqs[i] = wq;
25667         }
25669 +       return 0;
25671 + err:
25672 +       while (--i >= 0)
25673 +               put_device(&idxd->wqs[i]->conf_dev);
25674 +       return rc;
25677 +static int idxd_setup_engines(struct idxd_device *idxd)
25679 +       struct idxd_engine *engine;
25680 +       struct device *dev = &idxd->pdev->dev;
25681 +       int i, rc;
25683 +       idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
25684 +                                    GFP_KERNEL, dev_to_node(dev));
25685 +       if (!idxd->engines)
25686 +               return -ENOMEM;
25688         for (i = 0; i < idxd->max_engines; i++) {
25689 -               idxd->engines[i].idxd = idxd;
25690 -               idxd->engines[i].id = i;
25691 +               engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
25692 +               if (!engine) {
25693 +                       rc = -ENOMEM;
25694 +                       goto err;
25695 +               }
25697 +               engine->id = i;
25698 +               engine->idxd = idxd;
25699 +               device_initialize(&engine->conf_dev);
25700 +               engine->conf_dev.parent = &idxd->conf_dev;
25701 +               engine->conf_dev.type = &idxd_engine_device_type;
25702 +               rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
25703 +               if (rc < 0) {
25704 +                       put_device(&engine->conf_dev);
25705 +                       goto err;
25706 +               }
25708 +               idxd->engines[i] = engine;
25709         }
25711 -       idxd->wq = create_workqueue(dev_name(dev));
25712 -       if (!idxd->wq)
25713 +       return 0;
25715 + err:
25716 +       while (--i >= 0)
25717 +               put_device(&idxd->engines[i]->conf_dev);
25718 +       return rc;
25721 +static int idxd_setup_groups(struct idxd_device *idxd)
25723 +       struct device *dev = &idxd->pdev->dev;
25724 +       struct idxd_group *group;
25725 +       int i, rc;
25727 +       idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
25728 +                                   GFP_KERNEL, dev_to_node(dev));
25729 +       if (!idxd->groups)
25730                 return -ENOMEM;
25732 +       for (i = 0; i < idxd->max_groups; i++) {
25733 +               group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
25734 +               if (!group) {
25735 +                       rc = -ENOMEM;
25736 +                       goto err;
25737 +               }
25739 +               group->id = i;
25740 +               group->idxd = idxd;
25741 +               device_initialize(&group->conf_dev);
25742 +               group->conf_dev.parent = &idxd->conf_dev;
25743 +               group->conf_dev.bus = idxd_get_bus_type(idxd);
25744 +               group->conf_dev.type = &idxd_group_device_type;
25745 +               rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id);
25746 +               if (rc < 0) {
25747 +                       put_device(&group->conf_dev);
25748 +                       goto err;
25749 +               }
25751 +               idxd->groups[i] = group;
25752 +               group->tc_a = -1;
25753 +               group->tc_b = -1;
25754 +       }
25756 +       return 0;
25758 + err:
25759 +       while (--i >= 0)
25760 +               put_device(&idxd->groups[i]->conf_dev);
25761 +       return rc;
25764 +static int idxd_setup_internals(struct idxd_device *idxd)
25766 +       struct device *dev = &idxd->pdev->dev;
25767 +       int rc, i;
25769 +       init_waitqueue_head(&idxd->cmd_waitq);
25771 +       rc = idxd_setup_wqs(idxd);
25772 +       if (rc < 0)
25773 +               return rc;
25775 +       rc = idxd_setup_engines(idxd);
25776 +       if (rc < 0)
25777 +               goto err_engine;
25779 +       rc = idxd_setup_groups(idxd);
25780 +       if (rc < 0)
25781 +               goto err_group;
25783 +       idxd->wq = create_workqueue(dev_name(dev));
25784 +       if (!idxd->wq) {
25785 +               rc = -ENOMEM;
25786 +               goto err_wkq_create;
25787 +       }
25789         return 0;
25791 + err_wkq_create:
25792 +       for (i = 0; i < idxd->max_groups; i++)
25793 +               put_device(&idxd->groups[i]->conf_dev);
25794 + err_group:
25795 +       for (i = 0; i < idxd->max_engines; i++)
25796 +               put_device(&idxd->engines[i]->conf_dev);
25797 + err_engine:
25798 +       for (i = 0; i < idxd->max_wqs; i++)
25799 +               put_device(&idxd->wqs[i]->conf_dev);
25800 +       return rc;
25803  static void idxd_read_table_offsets(struct idxd_device *idxd)
25804 @@ -275,16 +385,44 @@ static void idxd_read_caps(struct idxd_device *idxd)
25805         }
25808 +static inline void idxd_set_type(struct idxd_device *idxd)
25810 +       struct pci_dev *pdev = idxd->pdev;
25812 +       if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
25813 +               idxd->type = IDXD_TYPE_DSA;
25814 +       else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
25815 +               idxd->type = IDXD_TYPE_IAX;
25816 +       else
25817 +               idxd->type = IDXD_TYPE_UNKNOWN;
25820  static struct idxd_device *idxd_alloc(struct pci_dev *pdev)
25822         struct device *dev = &pdev->dev;
25823         struct idxd_device *idxd;
25824 +       int rc;
25826 -       idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
25827 +       idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
25828         if (!idxd)
25829                 return NULL;
25831         idxd->pdev = pdev;
25832 +       idxd_set_type(idxd);
25833 +       idxd->id = ida_alloc(idxd_ida(idxd), GFP_KERNEL);
25834 +       if (idxd->id < 0)
25835 +               return NULL;
25837 +       device_initialize(&idxd->conf_dev);
25838 +       idxd->conf_dev.parent = dev;
25839 +       idxd->conf_dev.bus = idxd_get_bus_type(idxd);
25840 +       idxd->conf_dev.type = idxd_get_device_type(idxd);
25841 +       rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd_get_dev_name(idxd), idxd->id);
25842 +       if (rc < 0) {
25843 +               put_device(&idxd->conf_dev);
25844 +               return NULL;
25845 +       }
25847         spin_lock_init(&idxd->dev_lock);
25849         return idxd;
25850 @@ -352,31 +490,20 @@ static int idxd_probe(struct idxd_device *idxd)
25852         rc = idxd_setup_internals(idxd);
25853         if (rc)
25854 -               goto err_setup;
25855 +               goto err;
25857         rc = idxd_setup_interrupts(idxd);
25858         if (rc)
25859 -               goto err_setup;
25860 +               goto err;
25862         dev_dbg(dev, "IDXD interrupt setup complete.\n");
25864 -       mutex_lock(&idxd_idr_lock);
25865 -       idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
25866 -       mutex_unlock(&idxd_idr_lock);
25867 -       if (idxd->id < 0) {
25868 -               rc = -ENOMEM;
25869 -               goto err_idr_fail;
25870 -       }
25872         idxd->major = idxd_cdev_get_major(idxd);
25874         dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
25875         return 0;
25877 - err_idr_fail:
25878 -       idxd_mask_error_interrupts(idxd);
25879 -       idxd_mask_msix_vectors(idxd);
25880 - err_setup:
25881 + err:
25882         if (device_pasid_enabled(idxd))
25883                 idxd_disable_system_pasid(idxd);
25884         return rc;
25885 @@ -396,34 +523,37 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
25886         struct idxd_device *idxd;
25887         int rc;
25889 -       rc = pcim_enable_device(pdev);
25890 +       rc = pci_enable_device(pdev);
25891         if (rc)
25892                 return rc;
25894         dev_dbg(dev, "Alloc IDXD context\n");
25895         idxd = idxd_alloc(pdev);
25896 -       if (!idxd)
25897 -               return -ENOMEM;
25898 +       if (!idxd) {
25899 +               rc = -ENOMEM;
25900 +               goto err_idxd_alloc;
25901 +       }
25903         dev_dbg(dev, "Mapping BARs\n");
25904 -       idxd->reg_base = pcim_iomap(pdev, IDXD_MMIO_BAR, 0);
25905 -       if (!idxd->reg_base)
25906 -               return -ENOMEM;
25907 +       idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
25908 +       if (!idxd->reg_base) {
25909 +               rc = -ENOMEM;
25910 +               goto err_iomap;
25911 +       }
25913         dev_dbg(dev, "Set DMA masks\n");
25914         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
25915         if (rc)
25916                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
25917         if (rc)
25918 -               return rc;
25919 +               goto err;
25921         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
25922         if (rc)
25923                 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
25924         if (rc)
25925 -               return rc;
25926 +               goto err;
25928 -       idxd_set_type(idxd);
25930         idxd_type_init(idxd);
25932 @@ -435,13 +565,13 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
25933         rc = idxd_probe(idxd);
25934         if (rc) {
25935                 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
25936 -               return -ENODEV;
25937 +               goto err;
25938         }
25940 -       rc = idxd_setup_sysfs(idxd);
25941 +       rc = idxd_register_devices(idxd);
25942         if (rc) {
25943                 dev_err(dev, "IDXD sysfs setup failed\n");
25944 -               return -ENODEV;
25945 +               goto err;
25946         }
25948         idxd->state = IDXD_DEV_CONF_READY;
25949 @@ -450,6 +580,14 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
25950                  idxd->hw.version);
25952         return 0;
25954 + err:
25955 +       pci_iounmap(pdev, idxd->reg_base);
25956 + err_iomap:
25957 +       put_device(&idxd->conf_dev);
25958 + err_idxd_alloc:
25959 +       pci_disable_device(pdev);
25960 +       return rc;
25963  static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
25964 @@ -495,7 +633,8 @@ static void idxd_shutdown(struct pci_dev *pdev)
25966         for (i = 0; i < msixcnt; i++) {
25967                 irq_entry = &idxd->irq_entries[i];
25968 -               synchronize_irq(idxd->msix_entries[i].vector);
25969 +               synchronize_irq(irq_entry->vector);
25970 +               free_irq(irq_entry->vector, irq_entry);
25971                 if (i == 0)
25972                         continue;
25973                 idxd_flush_pending_llist(irq_entry);
25974 @@ -503,6 +642,9 @@ static void idxd_shutdown(struct pci_dev *pdev)
25975         }
25977         idxd_msix_perm_clear(idxd);
25978 +       pci_free_irq_vectors(pdev);
25979 +       pci_iounmap(pdev, idxd->reg_base);
25980 +       pci_disable_device(pdev);
25981         destroy_workqueue(idxd->wq);
25984 @@ -511,13 +653,10 @@ static void idxd_remove(struct pci_dev *pdev)
25985         struct idxd_device *idxd = pci_get_drvdata(pdev);
25987         dev_dbg(&pdev->dev, "%s called\n", __func__);
25988 -       idxd_cleanup_sysfs(idxd);
25989         idxd_shutdown(pdev);
25990         if (device_pasid_enabled(idxd))
25991                 idxd_disable_system_pasid(idxd);
25992 -       mutex_lock(&idxd_idr_lock);
25993 -       idr_remove(&idxd_idrs[idxd->type], idxd->id);
25994 -       mutex_unlock(&idxd_idr_lock);
25995 +       idxd_unregister_devices(idxd);
25998  static struct pci_driver idxd_pci_driver = {
25999 @@ -547,7 +686,7 @@ static int __init idxd_init_module(void)
26000                 support_enqcmd = true;
26002         for (i = 0; i < IDXD_TYPE_MAX; i++)
26003 -               idr_init(&idxd_idrs[i]);
26004 +               ida_init(&idxd_idas[i]);
26006         err = idxd_register_bus_type();
26007         if (err < 0)
26008 diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
26009 index f1463fc58112..fc0781e3f36d 100644
26010 --- a/drivers/dma/idxd/irq.c
26011 +++ b/drivers/dma/idxd/irq.c
26012 @@ -45,7 +45,7 @@ static void idxd_device_reinit(struct work_struct *work)
26013                 goto out;
26015         for (i = 0; i < idxd->max_wqs; i++) {
26016 -               struct idxd_wq *wq = &idxd->wqs[i];
26017 +               struct idxd_wq *wq = idxd->wqs[i];
26019                 if (wq->state == IDXD_WQ_ENABLED) {
26020                         rc = idxd_wq_enable(wq);
26021 @@ -130,18 +130,18 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
26023                 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
26024                         int id = idxd->sw_err.wq_idx;
26025 -                       struct idxd_wq *wq = &idxd->wqs[id];
26026 +                       struct idxd_wq *wq = idxd->wqs[id];
26028                         if (wq->type == IDXD_WQT_USER)
26029 -                               wake_up_interruptible(&wq->idxd_cdev.err_queue);
26030 +                               wake_up_interruptible(&wq->err_queue);
26031                 } else {
26032                         int i;
26034                         for (i = 0; i < idxd->max_wqs; i++) {
26035 -                               struct idxd_wq *wq = &idxd->wqs[i];
26036 +                               struct idxd_wq *wq = idxd->wqs[i];
26038                                 if (wq->type == IDXD_WQT_USER)
26039 -                                       wake_up_interruptible(&wq->idxd_cdev.err_queue);
26040 +                                       wake_up_interruptible(&wq->err_queue);
26041                         }
26042                 }
26044 diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
26045 index 18bf4d148989..9586b55abce5 100644
26046 --- a/drivers/dma/idxd/sysfs.c
26047 +++ b/drivers/dma/idxd/sysfs.c
26048 @@ -16,69 +16,6 @@ static char *idxd_wq_type_names[] = {
26049         [IDXD_WQT_USER]         = "user",
26050  };
26052 -static void idxd_conf_device_release(struct device *dev)
26054 -       dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
26057 -static struct device_type idxd_group_device_type = {
26058 -       .name = "group",
26059 -       .release = idxd_conf_device_release,
26062 -static struct device_type idxd_wq_device_type = {
26063 -       .name = "wq",
26064 -       .release = idxd_conf_device_release,
26067 -static struct device_type idxd_engine_device_type = {
26068 -       .name = "engine",
26069 -       .release = idxd_conf_device_release,
26072 -static struct device_type dsa_device_type = {
26073 -       .name = "dsa",
26074 -       .release = idxd_conf_device_release,
26077 -static struct device_type iax_device_type = {
26078 -       .name = "iax",
26079 -       .release = idxd_conf_device_release,
26082 -static inline bool is_dsa_dev(struct device *dev)
26084 -       return dev ? dev->type == &dsa_device_type : false;
26087 -static inline bool is_iax_dev(struct device *dev)
26089 -       return dev ? dev->type == &iax_device_type : false;
26092 -static inline bool is_idxd_dev(struct device *dev)
26094 -       return is_dsa_dev(dev) || is_iax_dev(dev);
26097 -static inline bool is_idxd_wq_dev(struct device *dev)
26099 -       return dev ? dev->type == &idxd_wq_device_type : false;
26102 -static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
26104 -       if (wq->type == IDXD_WQT_KERNEL &&
26105 -           strcmp(wq->name, "dmaengine") == 0)
26106 -               return true;
26107 -       return false;
26110 -static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
26112 -       return wq->type == IDXD_WQT_USER;
26115  static int idxd_config_bus_match(struct device *dev,
26116                                  struct device_driver *drv)
26118 @@ -322,7 +259,7 @@ static int idxd_config_bus_remove(struct device *dev)
26119                 dev_dbg(dev, "%s removing dev %s\n", __func__,
26120                         dev_name(&idxd->conf_dev));
26121                 for (i = 0; i < idxd->max_wqs; i++) {
26122 -                       struct idxd_wq *wq = &idxd->wqs[i];
26123 +                       struct idxd_wq *wq = idxd->wqs[i];
26125                         if (wq->state == IDXD_WQ_DISABLED)
26126                                 continue;
26127 @@ -334,7 +271,7 @@ static int idxd_config_bus_remove(struct device *dev)
26128                 idxd_unregister_dma_device(idxd);
26129                 rc = idxd_device_disable(idxd);
26130                 for (i = 0; i < idxd->max_wqs; i++) {
26131 -                       struct idxd_wq *wq = &idxd->wqs[i];
26132 +                       struct idxd_wq *wq = idxd->wqs[i];
26134                         mutex_lock(&wq->wq_lock);
26135                         idxd_wq_disable_cleanup(wq);
26136 @@ -405,7 +342,7 @@ struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
26137         return idxd_bus_types[idxd->type];
26140 -static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
26141 +struct device_type *idxd_get_device_type(struct idxd_device *idxd)
26143         if (idxd->type == IDXD_TYPE_DSA)
26144                 return &dsa_device_type;
26145 @@ -488,7 +425,7 @@ static ssize_t engine_group_id_store(struct device *dev,
26147         if (prevg)
26148                 prevg->num_engines--;
26149 -       engine->group = &idxd->groups[id];
26150 +       engine->group = idxd->groups[id];
26151         engine->group->num_engines++;
26153         return count;
26154 @@ -512,6 +449,19 @@ static const struct attribute_group *idxd_engine_attribute_groups[] = {
26155         NULL,
26156  };
26158 +static void idxd_conf_engine_release(struct device *dev)
26160 +       struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev);
26162 +       kfree(engine);
26165 +struct device_type idxd_engine_device_type = {
26166 +       .name = "engine",
26167 +       .release = idxd_conf_engine_release,
26168 +       .groups = idxd_engine_attribute_groups,
26171  /* Group attributes */
26173  static void idxd_set_free_tokens(struct idxd_device *idxd)
26174 @@ -519,7 +469,7 @@ static void idxd_set_free_tokens(struct idxd_device *idxd)
26175         int i, tokens;
26177         for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
26178 -               struct idxd_group *g = &idxd->groups[i];
26179 +               struct idxd_group *g = idxd->groups[i];
26181                 tokens += g->tokens_reserved;
26182         }
26183 @@ -674,7 +624,7 @@ static ssize_t group_engines_show(struct device *dev,
26184         struct idxd_device *idxd = group->idxd;
26186         for (i = 0; i < idxd->max_engines; i++) {
26187 -               struct idxd_engine *engine = &idxd->engines[i];
26188 +               struct idxd_engine *engine = idxd->engines[i];
26190                 if (!engine->group)
26191                         continue;
26192 @@ -703,7 +653,7 @@ static ssize_t group_work_queues_show(struct device *dev,
26193         struct idxd_device *idxd = group->idxd;
26195         for (i = 0; i < idxd->max_wqs; i++) {
26196 -               struct idxd_wq *wq = &idxd->wqs[i];
26197 +               struct idxd_wq *wq = idxd->wqs[i];
26199                 if (!wq->group)
26200                         continue;
26201 @@ -824,6 +774,19 @@ static const struct attribute_group *idxd_group_attribute_groups[] = {
26202         NULL,
26203  };
26205 +static void idxd_conf_group_release(struct device *dev)
26207 +       struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev);
26209 +       kfree(group);
26212 +struct device_type idxd_group_device_type = {
26213 +       .name = "group",
26214 +       .release = idxd_conf_group_release,
26215 +       .groups = idxd_group_attribute_groups,
26218  /* IDXD work queue attribs */
26219  static ssize_t wq_clients_show(struct device *dev,
26220                                struct device_attribute *attr, char *buf)
26221 @@ -896,7 +859,7 @@ static ssize_t wq_group_id_store(struct device *dev,
26222                 return count;
26223         }
26225 -       group = &idxd->groups[id];
26226 +       group = idxd->groups[id];
26227         prevg = wq->group;
26229         if (prevg)
26230 @@ -960,7 +923,7 @@ static int total_claimed_wq_size(struct idxd_device *idxd)
26231         int wq_size = 0;
26233         for (i = 0; i < idxd->max_wqs; i++) {
26234 -               struct idxd_wq *wq = &idxd->wqs[i];
26235 +               struct idxd_wq *wq = idxd->wqs[i];
26237                 wq_size += wq->size;
26238         }
26239 @@ -1206,8 +1169,16 @@ static ssize_t wq_cdev_minor_show(struct device *dev,
26240                                   struct device_attribute *attr, char *buf)
26242         struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
26243 +       int minor = -1;
26245 -       return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
26246 +       mutex_lock(&wq->wq_lock);
26247 +       if (wq->idxd_cdev)
26248 +               minor = wq->idxd_cdev->minor;
26249 +       mutex_unlock(&wq->wq_lock);
26251 +       if (minor == -1)
26252 +               return -ENXIO;
26253 +       return sysfs_emit(buf, "%d\n", minor);
26256  static struct device_attribute dev_attr_wq_cdev_minor =
26257 @@ -1356,6 +1327,20 @@ static const struct attribute_group *idxd_wq_attribute_groups[] = {
26258         NULL,
26259  };
26261 +static void idxd_conf_wq_release(struct device *dev)
26263 +       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
26265 +       kfree(wq->wqcfg);
26266 +       kfree(wq);
26269 +struct device_type idxd_wq_device_type = {
26270 +       .name = "wq",
26271 +       .release = idxd_conf_wq_release,
26272 +       .groups = idxd_wq_attribute_groups,
26275  /* IDXD device attribs */
26276  static ssize_t version_show(struct device *dev, struct device_attribute *attr,
26277                             char *buf)
26278 @@ -1486,7 +1471,7 @@ static ssize_t clients_show(struct device *dev,
26280         spin_lock_irqsave(&idxd->dev_lock, flags);
26281         for (i = 0; i < idxd->max_wqs; i++) {
26282 -               struct idxd_wq *wq = &idxd->wqs[i];
26283 +               struct idxd_wq *wq = idxd->wqs[i];
26285                 count += wq->client_count;
26286         }
26287 @@ -1644,183 +1629,160 @@ static const struct attribute_group *idxd_attribute_groups[] = {
26288         NULL,
26289  };
26291 -static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
26292 +static void idxd_conf_device_release(struct device *dev)
26294 -       struct device *dev = &idxd->pdev->dev;
26295 -       int i, rc;
26296 +       struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
26298 +       kfree(idxd->groups);
26299 +       kfree(idxd->wqs);
26300 +       kfree(idxd->engines);
26301 +       kfree(idxd->irq_entries);
26302 +       ida_free(idxd_ida(idxd), idxd->id);
26303 +       kfree(idxd);
26306 +struct device_type dsa_device_type = {
26307 +       .name = "dsa",
26308 +       .release = idxd_conf_device_release,
26309 +       .groups = idxd_attribute_groups,
26312 +struct device_type iax_device_type = {
26313 +       .name = "iax",
26314 +       .release = idxd_conf_device_release,
26315 +       .groups = idxd_attribute_groups,
26318 +static int idxd_register_engine_devices(struct idxd_device *idxd)
26320 +       int i, j, rc;
26322         for (i = 0; i < idxd->max_engines; i++) {
26323 -               struct idxd_engine *engine = &idxd->engines[i];
26325 -               engine->conf_dev.parent = &idxd->conf_dev;
26326 -               dev_set_name(&engine->conf_dev, "engine%d.%d",
26327 -                            idxd->id, engine->id);
26328 -               engine->conf_dev.bus = idxd_get_bus_type(idxd);
26329 -               engine->conf_dev.groups = idxd_engine_attribute_groups;
26330 -               engine->conf_dev.type = &idxd_engine_device_type;
26331 -               dev_dbg(dev, "Engine device register: %s\n",
26332 -                       dev_name(&engine->conf_dev));
26333 -               rc = device_register(&engine->conf_dev);
26334 -               if (rc < 0) {
26335 -                       put_device(&engine->conf_dev);
26336 +               struct idxd_engine *engine = idxd->engines[i];
26338 +               rc = device_add(&engine->conf_dev);
26339 +               if (rc < 0)
26340                         goto cleanup;
26341 -               }
26342         }
26344         return 0;
26346  cleanup:
26347 -       while (i--) {
26348 -               struct idxd_engine *engine = &idxd->engines[i];
26349 +       j = i - 1;
26350 +       for (; i < idxd->max_engines; i++)
26351 +               put_device(&idxd->engines[i]->conf_dev);
26353 -               device_unregister(&engine->conf_dev);
26354 -       }
26355 +       while (j--)
26356 +               device_unregister(&idxd->engines[j]->conf_dev);
26357         return rc;
26360 -static int idxd_setup_group_sysfs(struct idxd_device *idxd)
26361 +static int idxd_register_group_devices(struct idxd_device *idxd)
26363 -       struct device *dev = &idxd->pdev->dev;
26364 -       int i, rc;
26365 +       int i, j, rc;
26367         for (i = 0; i < idxd->max_groups; i++) {
26368 -               struct idxd_group *group = &idxd->groups[i];
26370 -               group->conf_dev.parent = &idxd->conf_dev;
26371 -               dev_set_name(&group->conf_dev, "group%d.%d",
26372 -                            idxd->id, group->id);
26373 -               group->conf_dev.bus = idxd_get_bus_type(idxd);
26374 -               group->conf_dev.groups = idxd_group_attribute_groups;
26375 -               group->conf_dev.type = &idxd_group_device_type;
26376 -               dev_dbg(dev, "Group device register: %s\n",
26377 -                       dev_name(&group->conf_dev));
26378 -               rc = device_register(&group->conf_dev);
26379 -               if (rc < 0) {
26380 -                       put_device(&group->conf_dev);
26381 +               struct idxd_group *group = idxd->groups[i];
26383 +               rc = device_add(&group->conf_dev);
26384 +               if (rc < 0)
26385                         goto cleanup;
26386 -               }
26387         }
26389         return 0;
26391  cleanup:
26392 -       while (i--) {
26393 -               struct idxd_group *group = &idxd->groups[i];
26394 +       j = i - 1;
26395 +       for (; i < idxd->max_groups; i++)
26396 +               put_device(&idxd->groups[i]->conf_dev);
26398 -               device_unregister(&group->conf_dev);
26399 -       }
26400 +       while (j--)
26401 +               device_unregister(&idxd->groups[j]->conf_dev);
26402         return rc;
26405 -static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
26406 +static int idxd_register_wq_devices(struct idxd_device *idxd)
26408 -       struct device *dev = &idxd->pdev->dev;
26409 -       int i, rc;
26410 +       int i, rc, j;
26412         for (i = 0; i < idxd->max_wqs; i++) {
26413 -               struct idxd_wq *wq = &idxd->wqs[i];
26415 -               wq->conf_dev.parent = &idxd->conf_dev;
26416 -               dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
26417 -               wq->conf_dev.bus = idxd_get_bus_type(idxd);
26418 -               wq->conf_dev.groups = idxd_wq_attribute_groups;
26419 -               wq->conf_dev.type = &idxd_wq_device_type;
26420 -               dev_dbg(dev, "WQ device register: %s\n",
26421 -                       dev_name(&wq->conf_dev));
26422 -               rc = device_register(&wq->conf_dev);
26423 -               if (rc < 0) {
26424 -                       put_device(&wq->conf_dev);
26425 +               struct idxd_wq *wq = idxd->wqs[i];
26427 +               rc = device_add(&wq->conf_dev);
26428 +               if (rc < 0)
26429                         goto cleanup;
26430 -               }
26431         }
26433         return 0;
26435  cleanup:
26436 -       while (i--) {
26437 -               struct idxd_wq *wq = &idxd->wqs[i];
26438 +       j = i - 1;
26439 +       for (; i < idxd->max_wqs; i++)
26440 +               put_device(&idxd->wqs[i]->conf_dev);
26442 -               device_unregister(&wq->conf_dev);
26443 -       }
26444 +       while (j--)
26445 +               device_unregister(&idxd->wqs[j]->conf_dev);
26446         return rc;
26449 -static int idxd_setup_device_sysfs(struct idxd_device *idxd)
26450 +int idxd_register_devices(struct idxd_device *idxd)
26452         struct device *dev = &idxd->pdev->dev;
26453 -       int rc;
26454 -       char devname[IDXD_NAME_SIZE];
26456 -       sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
26457 -       idxd->conf_dev.parent = dev;
26458 -       dev_set_name(&idxd->conf_dev, "%s", devname);
26459 -       idxd->conf_dev.bus = idxd_get_bus_type(idxd);
26460 -       idxd->conf_dev.groups = idxd_attribute_groups;
26461 -       idxd->conf_dev.type = idxd_get_device_type(idxd);
26463 -       dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
26464 -       rc = device_register(&idxd->conf_dev);
26465 -       if (rc < 0) {
26466 -               put_device(&idxd->conf_dev);
26467 -               return rc;
26468 -       }
26469 +       int rc, i;
26471 -       return 0;
26474 -int idxd_setup_sysfs(struct idxd_device *idxd)
26476 -       struct device *dev = &idxd->pdev->dev;
26477 -       int rc;
26479 -       rc = idxd_setup_device_sysfs(idxd);
26480 -       if (rc < 0) {
26481 -               dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
26482 +       rc = device_add(&idxd->conf_dev);
26483 +       if (rc < 0)
26484                 return rc;
26485 -       }
26487 -       rc = idxd_setup_wq_sysfs(idxd);
26488 +       rc = idxd_register_wq_devices(idxd);
26489         if (rc < 0) {
26490 -               /* unregister conf dev */
26491 -               dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
26492 -               return rc;
26493 +               dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
26494 +               goto err_wq;
26495         }
26497 -       rc = idxd_setup_group_sysfs(idxd);
26498 +       rc = idxd_register_engine_devices(idxd);
26499         if (rc < 0) {
26500 -               /* unregister conf dev */
26501 -               dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
26502 -               return rc;
26503 +               dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
26504 +               goto err_engine;
26505         }
26507 -       rc = idxd_setup_engine_sysfs(idxd);
26508 +       rc = idxd_register_group_devices(idxd);
26509         if (rc < 0) {
26510 -               /* unregister conf dev */
26511 -               dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
26512 -               return rc;
26513 +               dev_dbg(dev, "Group device registering failed: %d\n", rc);
26514 +               goto err_group;
26515         }
26517         return 0;
26519 + err_group:
26520 +       for (i = 0; i < idxd->max_engines; i++)
26521 +               device_unregister(&idxd->engines[i]->conf_dev);
26522 + err_engine:
26523 +       for (i = 0; i < idxd->max_wqs; i++)
26524 +               device_unregister(&idxd->wqs[i]->conf_dev);
26525 + err_wq:
26526 +       device_del(&idxd->conf_dev);
26527 +       return rc;
26530 -void idxd_cleanup_sysfs(struct idxd_device *idxd)
26531 +void idxd_unregister_devices(struct idxd_device *idxd)
26533         int i;
26535         for (i = 0; i < idxd->max_wqs; i++) {
26536 -               struct idxd_wq *wq = &idxd->wqs[i];
26537 +               struct idxd_wq *wq = idxd->wqs[i];
26539                 device_unregister(&wq->conf_dev);
26540         }
26542         for (i = 0; i < idxd->max_engines; i++) {
26543 -               struct idxd_engine *engine = &idxd->engines[i];
26544 +               struct idxd_engine *engine = idxd->engines[i];
26546                 device_unregister(&engine->conf_dev);
26547         }
26549         for (i = 0; i < idxd->max_groups; i++) {
26550 -               struct idxd_group *group = &idxd->groups[i];
26551 +               struct idxd_group *group = idxd->groups[i];
26553                 device_unregister(&group->conf_dev);
26554         }
26555 diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
26556 index aae82db542a5..76aacbac5869 100644
26557 --- a/drivers/extcon/extcon-arizona.c
26558 +++ b/drivers/extcon/extcon-arizona.c
26559 @@ -601,7 +601,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
26560         struct arizona *arizona = info->arizona;
26561         int id_gpio = arizona->pdata.hpdet_id_gpio;
26562         unsigned int report = EXTCON_JACK_HEADPHONE;
26563 -       int ret, reading;
26564 +       int ret, reading, state;
26565         bool mic = false;
26567         mutex_lock(&info->lock);
26568 @@ -614,12 +614,11 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
26569         }
26571         /* If the cable was removed while measuring ignore the result */
26572 -       ret = extcon_get_state(info->edev, EXTCON_MECHANICAL);
26573 -       if (ret < 0) {
26574 -               dev_err(arizona->dev, "Failed to check cable state: %d\n",
26575 -                       ret);
26576 +       state = extcon_get_state(info->edev, EXTCON_MECHANICAL);
26577 +       if (state < 0) {
26578 +               dev_err(arizona->dev, "Failed to check cable state: %d\n", state);
26579                 goto out;
26580 -       } else if (!ret) {
26581 +       } else if (!state) {
26582                 dev_dbg(arizona->dev, "Ignoring HPDET for removed cable\n");
26583                 goto done;
26584         }
26585 @@ -667,7 +666,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
26586                 gpio_set_value_cansleep(id_gpio, 0);
26588         /* If we have a mic then reenable MICDET */
26589 -       if (mic || info->mic)
26590 +       if (state && (mic || info->mic))
26591                 arizona_start_mic(info);
26593         if (info->hpdet_active) {
26594 @@ -675,7 +674,9 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
26595                 info->hpdet_active = false;
26596         }
26598 -       info->hpdet_done = true;
26599 +       /* Do not set hp_det done when the cable has been unplugged */
26600 +       if (state)
26601 +               info->hpdet_done = true;
26603  out:
26604         mutex_unlock(&info->lock);
26605 @@ -1759,25 +1760,6 @@ static int arizona_extcon_remove(struct platform_device *pdev)
26606         bool change;
26607         int ret;
26609 -       ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
26610 -                                      ARIZONA_MICD_ENA, 0,
26611 -                                      &change);
26612 -       if (ret < 0) {
26613 -               dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n",
26614 -                       ret);
26615 -       } else if (change) {
26616 -               regulator_disable(info->micvdd);
26617 -               pm_runtime_put(info->dev);
26618 -       }
26620 -       gpiod_put(info->micd_pol_gpio);
26622 -       pm_runtime_disable(&pdev->dev);
26624 -       regmap_update_bits(arizona->regmap,
26625 -                          ARIZONA_MICD_CLAMP_CONTROL,
26626 -                          ARIZONA_MICD_CLAMP_MODE_MASK, 0);
26628         if (info->micd_clamp) {
26629                 jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
26630                 jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
26631 @@ -1793,10 +1775,31 @@ static int arizona_extcon_remove(struct platform_device *pdev)
26632         arizona_free_irq(arizona, jack_irq_rise, info);
26633         arizona_free_irq(arizona, jack_irq_fall, info);
26634         cancel_delayed_work_sync(&info->hpdet_work);
26635 +       cancel_delayed_work_sync(&info->micd_detect_work);
26636 +       cancel_delayed_work_sync(&info->micd_timeout_work);
26638 +       ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
26639 +                                      ARIZONA_MICD_ENA, 0,
26640 +                                      &change);
26641 +       if (ret < 0) {
26642 +               dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n",
26643 +                       ret);
26644 +       } else if (change) {
26645 +               regulator_disable(info->micvdd);
26646 +               pm_runtime_put(info->dev);
26647 +       }
26649 +       regmap_update_bits(arizona->regmap,
26650 +                          ARIZONA_MICD_CLAMP_CONTROL,
26651 +                          ARIZONA_MICD_CLAMP_MODE_MASK, 0);
26652         regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE,
26653                            ARIZONA_JD1_ENA, 0);
26654         arizona_clk32k_disable(arizona);
26656 +       gpiod_put(info->micd_pol_gpio);
26658 +       pm_runtime_disable(&pdev->dev);
26660         return 0;
26663 diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
26664 index 3f14dffb9669..5dd19dbd67a3 100644
26665 --- a/drivers/firmware/Kconfig
26666 +++ b/drivers/firmware/Kconfig
26667 @@ -237,6 +237,7 @@ config INTEL_STRATIX10_RSU
26668  config QCOM_SCM
26669         bool
26670         depends on ARM || ARM64
26671 +       depends on HAVE_ARM_SMCCC
26672         select RESET_CONTROLLER
26674  config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
26675 diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
26676 index c23466e05e60..d0537573501e 100644
26677 --- a/drivers/firmware/efi/libstub/Makefile
26678 +++ b/drivers/firmware/efi/libstub/Makefile
26679 @@ -13,7 +13,8 @@ cflags-$(CONFIG_X86)          += -m$(BITS) -D__KERNEL__ \
26680                                    -Wno-pointer-sign \
26681                                    $(call cc-disable-warning, address-of-packed-member) \
26682                                    $(call cc-disable-warning, gnu) \
26683 -                                  -fno-asynchronous-unwind-tables
26684 +                                  -fno-asynchronous-unwind-tables \
26685 +                                  $(CLANG_FLAGS)
26687  # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
26688  # disable the stackleak plugin
26689 diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c
26690 index 497c13ba98d6..d111833364ba 100644
26691 --- a/drivers/firmware/qcom_scm-smc.c
26692 +++ b/drivers/firmware/qcom_scm-smc.c
26693 @@ -77,8 +77,10 @@ static void __scm_smc_do(const struct arm_smccc_args *smc,
26694         }  while (res->a0 == QCOM_SCM_V2_EBUSY);
26697 -int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
26698 -                struct qcom_scm_res *res, bool atomic)
26700 +int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
26701 +                  enum qcom_scm_convention qcom_convention,
26702 +                  struct qcom_scm_res *res, bool atomic)
26704         int arglen = desc->arginfo & 0xf;
26705         int i;
26706 @@ -87,9 +89,8 @@ int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
26707         size_t alloc_len;
26708         gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
26709         u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
26710 -       u32 qcom_smccc_convention =
26711 -                       (qcom_scm_convention == SMC_CONVENTION_ARM_32) ?
26712 -                       ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
26713 +       u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ?
26714 +                                   ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
26715         struct arm_smccc_res smc_res;
26716         struct arm_smccc_args smc = {0};
26718 @@ -148,4 +149,5 @@ int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
26719         }
26721         return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
26724 diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
26725 index f57779fc7ee9..9ac84b5d6ce0 100644
26726 --- a/drivers/firmware/qcom_scm.c
26727 +++ b/drivers/firmware/qcom_scm.c
26728 @@ -113,14 +113,10 @@ static void qcom_scm_clk_disable(void)
26729         clk_disable_unprepare(__scm->bus_clk);
26732 -static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
26733 -                                       u32 cmd_id);
26734 +enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
26735 +static DEFINE_SPINLOCK(scm_query_lock);
26737 -enum qcom_scm_convention qcom_scm_convention;
26738 -static bool has_queried __read_mostly;
26739 -static DEFINE_SPINLOCK(query_lock);
26741 -static void __query_convention(void)
26742 +static enum qcom_scm_convention __get_convention(void)
26744         unsigned long flags;
26745         struct qcom_scm_desc desc = {
26746 @@ -133,36 +129,50 @@ static void __query_convention(void)
26747                 .owner = ARM_SMCCC_OWNER_SIP,
26748         };
26749         struct qcom_scm_res res;
26750 +       enum qcom_scm_convention probed_convention;
26751         int ret;
26752 +       bool forced = false;
26754 -       spin_lock_irqsave(&query_lock, flags);
26755 -       if (has_queried)
26756 -               goto out;
26757 +       if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
26758 +               return qcom_scm_convention;
26760 -       qcom_scm_convention = SMC_CONVENTION_ARM_64;
26761 -       // Device isn't required as there is only one argument - no device
26762 -       // needed to dma_map_single to secure world
26763 -       ret = scm_smc_call(NULL, &desc, &res, true);
26764 +       /*
26765 +        * Device isn't required as there is only one argument - no device
26766 +        * needed to dma_map_single to secure world
26767 +        */
26768 +       probed_convention = SMC_CONVENTION_ARM_64;
26769 +       ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
26770         if (!ret && res.result[0] == 1)
26771 -               goto out;
26772 +               goto found;
26774 +       /*
26775 +        * Some SC7180 firmwares didn't implement the
26776 +        * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
26777 +        * calling conventions on these firmwares. Luckily we don't make any
26778 +        * early calls into the firmware on these SoCs so the device pointer
26779 +        * will be valid here to check if the compatible matches.
26780 +        */
26781 +       if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
26782 +               forced = true;
26783 +               goto found;
26784 +       }
26786 -       qcom_scm_convention = SMC_CONVENTION_ARM_32;
26787 -       ret = scm_smc_call(NULL, &desc, &res, true);
26788 +       probed_convention = SMC_CONVENTION_ARM_32;
26789 +       ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
26790         if (!ret && res.result[0] == 1)
26791 -               goto out;
26793 -       qcom_scm_convention = SMC_CONVENTION_LEGACY;
26794 -out:
26795 -       has_queried = true;
26796 -       spin_unlock_irqrestore(&query_lock, flags);
26797 -       pr_info("qcom_scm: convention: %s\n",
26798 -               qcom_scm_convention_names[qcom_scm_convention]);
26800 +               goto found;
26802 +       probed_convention = SMC_CONVENTION_LEGACY;
26803 +found:
26804 +       spin_lock_irqsave(&scm_query_lock, flags);
26805 +       if (probed_convention != qcom_scm_convention) {
26806 +               qcom_scm_convention = probed_convention;
26807 +               pr_info("qcom_scm: convention: %s%s\n",
26808 +                       qcom_scm_convention_names[qcom_scm_convention],
26809 +                       forced ? " (forced)" : "");
26810 +       }
26811 +       spin_unlock_irqrestore(&scm_query_lock, flags);
26813 -static inline enum qcom_scm_convention __get_convention(void)
26815 -       if (unlikely(!has_queried))
26816 -               __query_convention();
26817         return qcom_scm_convention;
26820 @@ -219,8 +229,8 @@ static int qcom_scm_call_atomic(struct device *dev,
26821         }
26824 -static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
26825 -                                       u32 cmd_id)
26826 +static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
26827 +                                        u32 cmd_id)
26829         int ret;
26830         struct qcom_scm_desc desc = {
26831 @@ -247,7 +257,7 @@ static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
26833         ret = qcom_scm_call(dev, &desc, &res);
26835 -       return ret ? : res.result[0];
26836 +       return ret ? false : !!res.result[0];
26839  /**
26840 @@ -585,9 +595,8 @@ bool qcom_scm_pas_supported(u32 peripheral)
26841         };
26842         struct qcom_scm_res res;
26844 -       ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
26845 -                                          QCOM_SCM_PIL_PAS_IS_SUPPORTED);
26846 -       if (ret <= 0)
26847 +       if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
26848 +                                         QCOM_SCM_PIL_PAS_IS_SUPPORTED))
26849                 return false;
26851         ret = qcom_scm_call(__scm->dev, &desc, &res);
26852 @@ -1060,17 +1069,18 @@ EXPORT_SYMBOL(qcom_scm_ice_set_key);
26853   */
26854  bool qcom_scm_hdcp_available(void)
26856 +       bool avail;
26857         int ret = qcom_scm_clk_enable();
26859         if (ret)
26860                 return ret;
26862 -       ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
26863 +       avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
26864                                                 QCOM_SCM_HDCP_INVOKE);
26866         qcom_scm_clk_disable();
26868 -       return ret > 0;
26869 +       return avail;
26871  EXPORT_SYMBOL(qcom_scm_hdcp_available);
26873 @@ -1242,7 +1252,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
26874         __scm = scm;
26875         __scm->dev = &pdev->dev;
26877 -       __query_convention();
26878 +       __get_convention();
26880         /*
26881          * If requested enable "download mode", from this point on warmboot
26882 diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
26883 index 95cd1ac30ab0..632fe3142462 100644
26884 --- a/drivers/firmware/qcom_scm.h
26885 +++ b/drivers/firmware/qcom_scm.h
26886 @@ -61,8 +61,11 @@ struct qcom_scm_res {
26887  };
26889  #define SCM_SMC_FNID(s, c)     ((((s) & 0xFF) << 8) | ((c) & 0xFF))
26890 -extern int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
26891 -                       struct qcom_scm_res *res, bool atomic);
26892 +extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
26893 +                         enum qcom_scm_convention qcom_convention,
26894 +                         struct qcom_scm_res *res, bool atomic);
26895 +#define scm_smc_call(dev, desc, res, atomic) \
26896 +       __scm_smc_call((dev), (desc), qcom_scm_convention, (res), (atomic))
26898  #define SCM_LEGACY_FNID(s, c)  (((s) << 10) | ((c) & 0x3ff))
26899  extern int scm_legacy_call_atomic(struct device *dev,
26900 diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
26901 index 7eb9958662dd..83082e2f2e44 100644
26902 --- a/drivers/firmware/xilinx/zynqmp.c
26903 +++ b/drivers/firmware/xilinx/zynqmp.c
26904 @@ -2,7 +2,7 @@
26905  /*
26906   * Xilinx Zynq MPSoC Firmware layer
26907   *
26908 - *  Copyright (C) 2014-2020 Xilinx, Inc.
26909 + *  Copyright (C) 2014-2021 Xilinx, Inc.
26910   *
26911   *  Michal Simek <michal.simek@xilinx.com>
26912   *  Davorin Mista <davorin.mista@aggios.com>
26913 @@ -1280,12 +1280,13 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
26914  static int zynqmp_firmware_remove(struct platform_device *pdev)
26916         struct pm_api_feature_data *feature_data;
26917 +       struct hlist_node *tmp;
26918         int i;
26920         mfd_remove_devices(&pdev->dev);
26921         zynqmp_pm_api_debugfs_exit();
26923 -       hash_for_each(pm_api_features_map, i, feature_data, hentry) {
26924 +       hash_for_each_safe(pm_api_features_map, i, tmp, feature_data, hentry) {
26925                 hash_del(&feature_data->hentry);
26926                 kfree(feature_data);
26927         }
26928 diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
26929 index 04e47e266f26..b44523ea8c91 100644
26930 --- a/drivers/fpga/dfl-pci.c
26931 +++ b/drivers/fpga/dfl-pci.c
26932 @@ -69,14 +69,16 @@ static void cci_pci_free_irq(struct pci_dev *pcidev)
26935  /* PCI Device ID */
26936 -#define PCIE_DEVICE_ID_PF_INT_5_X      0xBCBD
26937 -#define PCIE_DEVICE_ID_PF_INT_6_X      0xBCC0
26938 -#define PCIE_DEVICE_ID_PF_DSC_1_X      0x09C4
26939 -#define PCIE_DEVICE_ID_INTEL_PAC_N3000 0x0B30
26940 +#define PCIE_DEVICE_ID_PF_INT_5_X              0xBCBD
26941 +#define PCIE_DEVICE_ID_PF_INT_6_X              0xBCC0
26942 +#define PCIE_DEVICE_ID_PF_DSC_1_X              0x09C4
26943 +#define PCIE_DEVICE_ID_INTEL_PAC_N3000         0x0B30
26944 +#define PCIE_DEVICE_ID_INTEL_PAC_D5005         0x0B2B
26945  /* VF Device */
26946 -#define PCIE_DEVICE_ID_VF_INT_5_X      0xBCBF
26947 -#define PCIE_DEVICE_ID_VF_INT_6_X      0xBCC1
26948 -#define PCIE_DEVICE_ID_VF_DSC_1_X      0x09C5
26949 +#define PCIE_DEVICE_ID_VF_INT_5_X              0xBCBF
26950 +#define PCIE_DEVICE_ID_VF_INT_6_X              0xBCC1
26951 +#define PCIE_DEVICE_ID_VF_DSC_1_X              0x09C5
26952 +#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF      0x0B2C
26954  static struct pci_device_id cci_pcie_id_tbl[] = {
26955         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
26956 @@ -86,6 +88,8 @@ static struct pci_device_id cci_pcie_id_tbl[] = {
26957         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
26958         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
26959         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
26960 +       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
26961 +       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
26962         {0,}
26963  };
26964  MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
26965 diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
26966 index 27defa98092d..fee4d0abf6bf 100644
26967 --- a/drivers/fpga/xilinx-spi.c
26968 +++ b/drivers/fpga/xilinx-spi.c
26969 @@ -233,25 +233,19 @@ static int xilinx_spi_probe(struct spi_device *spi)
26971         /* PROGRAM_B is active low */
26972         conf->prog_b = devm_gpiod_get(&spi->dev, "prog_b", GPIOD_OUT_LOW);
26973 -       if (IS_ERR(conf->prog_b)) {
26974 -               dev_err(&spi->dev, "Failed to get PROGRAM_B gpio: %ld\n",
26975 -                       PTR_ERR(conf->prog_b));
26976 -               return PTR_ERR(conf->prog_b);
26977 -       }
26978 +       if (IS_ERR(conf->prog_b))
26979 +               return dev_err_probe(&spi->dev, PTR_ERR(conf->prog_b),
26980 +                                    "Failed to get PROGRAM_B gpio\n");
26982         conf->init_b = devm_gpiod_get_optional(&spi->dev, "init-b", GPIOD_IN);
26983 -       if (IS_ERR(conf->init_b)) {
26984 -               dev_err(&spi->dev, "Failed to get INIT_B gpio: %ld\n",
26985 -                       PTR_ERR(conf->init_b));
26986 -               return PTR_ERR(conf->init_b);
26987 -       }
26988 +       if (IS_ERR(conf->init_b))
26989 +               return dev_err_probe(&spi->dev, PTR_ERR(conf->init_b),
26990 +                                    "Failed to get INIT_B gpio\n");
26992         conf->done = devm_gpiod_get(&spi->dev, "done", GPIOD_IN);
26993 -       if (IS_ERR(conf->done)) {
26994 -               dev_err(&spi->dev, "Failed to get DONE gpio: %ld\n",
26995 -                       PTR_ERR(conf->done));
26996 -               return PTR_ERR(conf->done);
26997 -       }
26998 +       if (IS_ERR(conf->done))
26999 +               return dev_err_probe(&spi->dev, PTR_ERR(conf->done),
27000 +                                    "Failed to get DONE gpio\n");
27002         mgr = devm_fpga_mgr_create(&spi->dev,
27003                                    "Xilinx Slave Serial FPGA Manager",
27004 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
27005 index 8a5a8ff5d362..5eee251e3335 100644
27006 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
27007 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
27008 @@ -3613,6 +3613,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
27010         dev_info(adev->dev, "amdgpu: finishing device.\n");
27011         flush_delayed_work(&adev->delayed_init_work);
27012 +       ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
27013         adev->shutdown = true;
27015         kfree(adev->pci_state);
27016 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
27017 index f753e04fee99..a2ac44cc2a6d 100644
27018 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
27019 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
27020 @@ -1355,7 +1355,7 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
27021                         }
27022                 }
27023         }
27024 -       return r;
27025 +       return 0;
27028  int amdgpu_display_resume_helper(struct amdgpu_device *adev)
27029 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
27030 index d56f4023ebb3..7e8e46c39dbd 100644
27031 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
27032 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
27033 @@ -533,6 +533,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
27035                 if (!ring || !ring->fence_drv.initialized)
27036                         continue;
27037 +               if (!ring->no_scheduler)
27038 +                       drm_sched_fini(&ring->sched);
27039                 r = amdgpu_fence_wait_empty(ring);
27040                 if (r) {
27041                         /* no need to trigger GPU reset as we are unloading */
27042 @@ -541,8 +543,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
27043                 if (ring->fence_drv.irq_src)
27044                         amdgpu_irq_put(adev, ring->fence_drv.irq_src,
27045                                        ring->fence_drv.irq_type);
27046 -               if (!ring->no_scheduler)
27047 -                       drm_sched_fini(&ring->sched);
27049                 del_timer_sync(&ring->fence_drv.fallback_timer);
27050                 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
27051                         dma_fence_put(ring->fence_drv.fences[j]);
27052 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
27053 index 7645223ea0ef..97c11aa47ad0 100644
27054 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
27055 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
27056 @@ -77,6 +77,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
27057                 }
27059                 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
27060 +               /* flush the cache before commit the IB */
27061 +               ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC;
27063                 if (!vm)
27064                         ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
27065 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
27066 index 94b069630db3..b4971e90b98c 100644
27067 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
27068 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
27069 @@ -215,7 +215,11 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
27070         /* Check if we have an idle VMID */
27071         i = 0;
27072         list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
27073 -               fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
27074 +               /* Don't use per engine and per process VMID at the same time */
27075 +               struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
27076 +                       NULL : ring;
27078 +               fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
27079                 if (!fences[i])
27080                         break;
27081                 ++i;
27082 @@ -281,7 +285,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
27083         if (updates && (*id)->flushed_updates &&
27084             updates->context == (*id)->flushed_updates->context &&
27085             !dma_fence_is_later(updates, (*id)->flushed_updates))
27086 -           updates = NULL;
27087 +               updates = NULL;
27089         if ((*id)->owner != vm->immediate.fence_context ||
27090             job->vm_pd_addr != (*id)->pd_gpu_addr ||
27091 @@ -290,6 +294,10 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
27092              !dma_fence_is_signaled((*id)->last_flush))) {
27093                 struct dma_fence *tmp;
27095 +               /* Don't use per engine and per process VMID at the same time */
27096 +               if (adev->vm_manager.concurrent_flush)
27097 +                       ring = NULL;
27099                 /* to prevent one context starved by another context */
27100                 (*id)->pd_gpu_addr = 0;
27101                 tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
27102 @@ -365,12 +373,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
27103                 if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
27104                         needs_flush = true;
27106 -               /* Concurrent flushes are only possible starting with Vega10 and
27107 -                * are broken on Navi10 and Navi14.
27108 -                */
27109 -               if (needs_flush && (adev->asic_type < CHIP_VEGA10 ||
27110 -                                   adev->asic_type == CHIP_NAVI10 ||
27111 -                                   adev->asic_type == CHIP_NAVI14))
27112 +               if (needs_flush && !adev->vm_manager.concurrent_flush)
27113                         continue;
27115                 /* Good, we can use this VMID. Remember this submission as
27116 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
27117 index afbbec82a289..9be945d8e72f 100644
27118 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
27119 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
27120 @@ -535,7 +535,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
27121                 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
27122                         struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
27124 -                       if (!src)
27125 +                       if (!src || !src->funcs || !src->funcs->set)
27126                                 continue;
27127                         for (k = 0; k < src->num_types; k++)
27128                                 amdgpu_irq_update(adev, src, k);
27129 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
27130 index 19c0a3655228..82e9ecf84352 100644
27131 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
27132 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
27133 @@ -519,8 +519,10 @@ static int init_pmu_entry_by_type_and_add(struct amdgpu_pmu_entry *pmu_entry,
27134         pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
27135                                                                 GFP_KERNEL);
27137 -       if (!pmu_entry->pmu.attr_groups)
27138 +       if (!pmu_entry->pmu.attr_groups) {
27139 +               ret = -ENOMEM;
27140                 goto err_attr_group;
27141 +       }
27143         snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix,
27144                                 adev_to_drm(pmu_entry->adev)->primary->index);
27145 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
27146 index 5efa331e3ee8..383c178cf074 100644
27147 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
27148 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
27149 @@ -942,7 +942,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
27150                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
27152         /* double check that we don't free the table twice */
27153 -       if (!ttm->sg->sgl)
27154 +       if (!ttm->sg || !ttm->sg->sgl)
27155                 return;
27157         /* unmap the pages mapped to the device */
27158 @@ -1162,13 +1162,13 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
27159         struct amdgpu_ttm_tt *gtt = (void *)ttm;
27160         int r;
27162 -       if (!gtt->bound)
27163 -               return;
27165         /* if the pages have userptr pinning then clear that first */
27166         if (gtt->userptr)
27167                 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
27169 +       if (!gtt->bound)
27170 +               return;
27172         if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
27173                 return;
27175 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
27176 index e2ed4689118a..c6dbc0801604 100644
27177 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
27178 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
27179 @@ -259,7 +259,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
27180                 if ((adev->asic_type == CHIP_POLARIS10 ||
27181                      adev->asic_type == CHIP_POLARIS11) &&
27182                     (adev->uvd.fw_version < FW_1_66_16))
27183 -                       DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
27184 +                       DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
27185                                   version_major, version_minor);
27186         } else {
27187                 unsigned int enc_major, enc_minor, dec_minor;
27188 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
27189 index 326dae31b675..a566bbe26bdd 100644
27190 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
27191 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
27192 @@ -92,13 +92,13 @@ struct amdgpu_prt_cb {
27193  static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
27195         mutex_lock(&vm->eviction_lock);
27196 -       vm->saved_flags = memalloc_nofs_save();
27197 +       vm->saved_flags = memalloc_noreclaim_save();
27200  static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
27202         if (mutex_trylock(&vm->eviction_lock)) {
27203 -               vm->saved_flags = memalloc_nofs_save();
27204 +               vm->saved_flags = memalloc_noreclaim_save();
27205                 return 1;
27206         }
27207         return 0;
27208 @@ -106,7 +106,7 @@ static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
27210  static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
27212 -       memalloc_nofs_restore(vm->saved_flags);
27213 +       memalloc_noreclaim_restore(vm->saved_flags);
27214         mutex_unlock(&vm->eviction_lock);
27217 @@ -3147,6 +3147,12 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
27219         unsigned i;
27221 +       /* Concurrent flushes are only possible starting with Vega10 and
27222 +        * are broken on Navi10 and Navi14.
27223 +        */
27224 +       adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
27225 +                                             adev->asic_type == CHIP_NAVI10 ||
27226 +                                             adev->asic_type == CHIP_NAVI14);
27227         amdgpu_vmid_mgr_init(adev);
27229         adev->vm_manager.fence_context =
27230 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
27231 index 976a12e5a8b9..4e140288159c 100644
27232 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
27233 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
27234 @@ -331,6 +331,7 @@ struct amdgpu_vm_manager {
27235         /* Handling of VMIDs */
27236         struct amdgpu_vmid_mgr                  id_mgr[AMDGPU_MAX_VMHUBS];
27237         unsigned int                            first_kfd_vmid;
27238 +       bool                                    concurrent_flush;
27240         /* Handling of VM fences */
27241         u64                                     fence_context;
27242 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
27243 index 659b385b27b5..4d3a24fdeb9c 100644
27244 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
27245 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
27246 @@ -468,15 +468,22 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
27251 + * NOTE psp_xgmi_node_info.num_hops layout is as follows:
27252 + * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
27253 + * num_hops[5:3] = reserved
27254 + * num_hops[2:0] = number of hops
27255 + */
27256  int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
27257                 struct amdgpu_device *peer_adev)
27259         struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
27260 +       uint8_t num_hops_mask = 0x7;
27261         int i;
27263         for (i = 0 ; i < top->num_nodes; ++i)
27264                 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
27265 -                       return top->nodes[i].num_hops;
27266 +                       return top->nodes[i].num_hops & num_hops_mask;
27267         return  -EINVAL;
27270 diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
27271 index 2d832fc23119..421d6069c509 100644
27272 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
27273 +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
27274 @@ -59,6 +59,7 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
27275  MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
27276  MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
27277  MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
27278 +MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin");
27279  MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
27280  MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
27281  MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
27282 @@ -243,10 +244,16 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
27283                         chip_name = "polaris10";
27284                 break;
27285         case CHIP_POLARIS12:
27286 -               if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))
27287 +               if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
27288                         chip_name = "polaris12_k";
27289 -               else
27290 -                       chip_name = "polaris12";
27291 +               } else {
27292 +                       WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159);
27293 +                       /* Polaris12 32bit ASIC needs a special MC firmware */
27294 +                       if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40)
27295 +                               chip_name = "polaris12_32";
27296 +                       else
27297 +                               chip_name = "polaris12";
27298 +               }
27299                 break;
27300         case CHIP_FIJI:
27301         case CHIP_CARRIZO:
27302 diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
27303 index def583916294..9b844e9fb16f 100644
27304 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
27305 +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
27306 @@ -584,6 +584,10 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
27307         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
27308                         VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
27309                         AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
27311 +       /* VCN global tiling registers */
27312 +       WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
27313 +               UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
27316  static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
27317 diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
27318 index 88626d83e07b..ca8efa5c6978 100644
27319 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
27320 +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
27321 @@ -220,10 +220,8 @@ static int vega10_ih_enable_ring(struct amdgpu_device *adev,
27322         tmp = vega10_ih_rb_cntl(ih, tmp);
27323         if (ih == &adev->irq.ih)
27324                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
27325 -       if (ih == &adev->irq.ih1) {
27326 -               tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
27327 +       if (ih == &adev->irq.ih1)
27328                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
27329 -       }
27330         if (amdgpu_sriov_vf(adev)) {
27331                 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
27332                         dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
27333 @@ -265,7 +263,6 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
27334         u32 ih_chicken;
27335         int ret;
27336         int i;
27337 -       u32 tmp;
27339         /* disable irqs */
27340         ret = vega10_ih_toggle_interrupts(adev, false);
27341 @@ -291,15 +288,6 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
27342                 }
27343         }
27345 -       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
27346 -       tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
27347 -                           CLIENT18_IS_STORM_CLIENT, 1);
27348 -       WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
27350 -       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
27351 -       tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
27352 -       WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
27354         pci_set_master(adev->pdev);
27356         /* enable interrupts */
27357 @@ -345,11 +333,17 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
27358         u32 wptr, tmp;
27359         struct amdgpu_ih_regs *ih_regs;
27361 -       wptr = le32_to_cpu(*ih->wptr_cpu);
27362 -       ih_regs = &ih->ih_regs;
27363 +       if (ih == &adev->irq.ih) {
27364 +               /* Only ring0 supports writeback. On other rings fall back
27365 +                * to register-based code with overflow checking below.
27366 +                */
27367 +               wptr = le32_to_cpu(*ih->wptr_cpu);
27369 -       if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
27370 -               goto out;
27371 +               if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
27372 +                       goto out;
27373 +       }
27375 +       ih_regs = &ih->ih_regs;
27377         /* Double check that the overflow wasn't already cleared. */
27378         wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
27379 @@ -440,15 +434,11 @@ static int vega10_ih_self_irq(struct amdgpu_device *adev,
27380                               struct amdgpu_irq_src *source,
27381                               struct amdgpu_iv_entry *entry)
27383 -       uint32_t wptr = cpu_to_le32(entry->src_data[0]);
27385         switch (entry->ring_id) {
27386         case 1:
27387 -               *adev->irq.ih1.wptr_cpu = wptr;
27388                 schedule_work(&adev->irq.ih1_work);
27389                 break;
27390         case 2:
27391 -               *adev->irq.ih2.wptr_cpu = wptr;
27392                 schedule_work(&adev->irq.ih2_work);
27393                 break;
27394         default: break;
27395 diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
27396 index 5a3c867d5881..86dcf448e0c2 100644
27397 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
27398 +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
27399 @@ -104,6 +104,8 @@ static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
27401         tmp = RREG32(ih_regs->ih_rb_cntl);
27402         tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
27403 +       tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
27405         /* enable_intr field is only valid in ring0 */
27406         if (ih == &adev->irq.ih)
27407                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
27408 @@ -220,10 +222,8 @@ static int vega20_ih_enable_ring(struct amdgpu_device *adev,
27409         tmp = vega20_ih_rb_cntl(ih, tmp);
27410         if (ih == &adev->irq.ih)
27411                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
27412 -       if (ih == &adev->irq.ih1) {
27413 -               tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
27414 +       if (ih == &adev->irq.ih1)
27415                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
27416 -       }
27417         if (amdgpu_sriov_vf(adev)) {
27418                 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
27419                         dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
27420 @@ -297,7 +297,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
27421         u32 ih_chicken;
27422         int ret;
27423         int i;
27424 -       u32 tmp;
27426         /* disable irqs */
27427         ret = vega20_ih_toggle_interrupts(adev, false);
27428 @@ -326,15 +325,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
27429                 }
27430         }
27432 -       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
27433 -       tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
27434 -                           CLIENT18_IS_STORM_CLIENT, 1);
27435 -       WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
27437 -       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
27438 -       tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
27439 -       WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
27441         pci_set_master(adev->pdev);
27443         /* enable interrupts */
27444 @@ -380,11 +370,17 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
27445         u32 wptr, tmp;
27446         struct amdgpu_ih_regs *ih_regs;
27448 -       wptr = le32_to_cpu(*ih->wptr_cpu);
27449 -       ih_regs = &ih->ih_regs;
27450 +       if (ih == &adev->irq.ih) {
27451 +               /* Only ring0 supports writeback. On other rings fall back
27452 +                * to register-based code with overflow checking below.
27453 +                */
27454 +               wptr = le32_to_cpu(*ih->wptr_cpu);
27456 -       if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
27457 -               goto out;
27458 +               if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
27459 +                       goto out;
27460 +       }
27462 +       ih_regs = &ih->ih_regs;
27464         /* Double check that the overflow wasn't already cleared. */
27465         wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
27466 @@ -476,15 +472,11 @@ static int vega20_ih_self_irq(struct amdgpu_device *adev,
27467                               struct amdgpu_irq_src *source,
27468                               struct amdgpu_iv_entry *entry)
27470 -       uint32_t wptr = cpu_to_le32(entry->src_data[0]);
27472         switch (entry->ring_id) {
27473         case 1:
27474 -               *adev->irq.ih1.wptr_cpu = wptr;
27475                 schedule_work(&adev->irq.ih1_work);
27476                 break;
27477         case 2:
27478 -               *adev->irq.ih2.wptr_cpu = wptr;
27479                 schedule_work(&adev->irq.ih2_work);
27480                 break;
27481         default: break;
27482 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
27483 index 511712c2e382..673d5e34f213 100644
27484 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
27485 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
27486 @@ -33,6 +33,11 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
27488         return single_open(file, show, NULL);
27490 +static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data)
27492 +       seq_printf(m, "echo gpu_id > hang_hws\n");
27493 +       return 0;
27496  static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
27497         const char __user *user_buf, size_t size, loff_t *ppos)
27498 @@ -94,7 +99,7 @@ void kfd_debugfs_init(void)
27499         debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
27500                             kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
27501         debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
27502 -                           NULL, &kfd_debugfs_hang_hws_fops);
27503 +                           kfd_debugfs_hang_hws_read, &kfd_debugfs_hang_hws_fops);
27506  void kfd_debugfs_fini(void)
27507 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
27508 index 4598a9a58125..a4266c4bca13 100644
27509 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
27510 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
27511 @@ -1128,6 +1128,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
27513  static int initialize_cpsch(struct device_queue_manager *dqm)
27515 +       uint64_t num_sdma_queues;
27516 +       uint64_t num_xgmi_sdma_queues;
27518         pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
27520         mutex_init(&dqm->lock_hidden);
27521 @@ -1136,8 +1139,18 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
27522         dqm->active_cp_queue_count = 0;
27523         dqm->gws_queue_count = 0;
27524         dqm->active_runlist = false;
27525 -       dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
27526 -       dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
27528 +       num_sdma_queues = get_num_sdma_queues(dqm);
27529 +       if (num_sdma_queues >= BITS_PER_TYPE(dqm->sdma_bitmap))
27530 +               dqm->sdma_bitmap = ULLONG_MAX;
27531 +       else
27532 +               dqm->sdma_bitmap = (BIT_ULL(num_sdma_queues) - 1);
27534 +       num_xgmi_sdma_queues = get_num_xgmi_sdma_queues(dqm);
27535 +       if (num_xgmi_sdma_queues >= BITS_PER_TYPE(dqm->xgmi_sdma_bitmap))
27536 +               dqm->xgmi_sdma_bitmap = ULLONG_MAX;
27537 +       else
27538 +               dqm->xgmi_sdma_bitmap = (BIT_ULL(num_xgmi_sdma_queues) - 1);
27540         INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
27542 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
27543 index 66bbca61e3ef..9318936aa805 100644
27544 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
27545 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
27546 @@ -20,6 +20,10 @@
27547   * OTHER DEALINGS IN THE SOFTWARE.
27548   */
27550 +#include <linux/kconfig.h>
27552 +#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
27554  #include <linux/printk.h>
27555  #include <linux/device.h>
27556  #include <linux/slab.h>
27557 @@ -355,3 +359,5 @@ int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
27559         return 0;
27562 +#endif
27563 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
27564 index dd23d9fdf6a8..afd420b01a0c 100644
27565 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
27566 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
27567 @@ -23,7 +23,9 @@
27568  #ifndef __KFD_IOMMU_H__
27569  #define __KFD_IOMMU_H__
27571 -#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2)
27572 +#include <linux/kconfig.h>
27574 +#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
27576  #define KFD_SUPPORT_IOMMU_V2
27578 @@ -46,6 +48,9 @@ static inline int kfd_iommu_check_device(struct kfd_dev *kfd)
27580  static inline int kfd_iommu_device_init(struct kfd_dev *kfd)
27582 +#if IS_MODULE(CONFIG_AMD_IOMMU_V2)
27583 +       WARN_ONCE(1, "iommu_v2 module is not usable by built-in KFD");
27584 +#endif
27585         return 0;
27588 @@ -73,6 +78,6 @@ static inline int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
27589         return 0;
27592 -#endif /* defined(CONFIG_AMD_IOMMU_V2) */
27593 +#endif /* IS_REACHABLE(CONFIG_AMD_IOMMU_V2) */
27595  #endif /* __KFD_IOMMU_H__ */
27596 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
27597 index d699a5cf6c11..71e07ebc8f88 100644
27598 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
27599 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
27600 @@ -1191,6 +1191,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
27601         if (adev->dm.dc)
27602                 dc_deinit_callbacks(adev->dm.dc);
27603  #endif
27605 +#if defined(CONFIG_DRM_AMD_DC_DCN)
27606 +       if (adev->dm.vblank_workqueue) {
27607 +               adev->dm.vblank_workqueue->dm = NULL;
27608 +               kfree(adev->dm.vblank_workqueue);
27609 +               adev->dm.vblank_workqueue = NULL;
27610 +       }
27611 +#endif
27613         if (adev->dm.dc->ctx->dmub_srv) {
27614                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
27615                 adev->dm.dc->ctx->dmub_srv = NULL;
27616 @@ -3841,6 +3850,23 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
27617         scaling_info->src_rect.x = state->src_x >> 16;
27618         scaling_info->src_rect.y = state->src_y >> 16;
27620 +       /*
27621 +        * For reasons we don't (yet) fully understand a non-zero
27622 +        * src_y coordinate into an NV12 buffer can cause a
27623 +        * system hang. To avoid hangs (and maybe be overly cautious)
27624 +        * let's reject both non-zero src_x and src_y.
27625 +        *
27626 +        * We currently know of only one use-case to reproduce a
27627 +        * scenario with non-zero src_x and src_y for NV12, which
27628 +        * is to gesture the YouTube Android app into full screen
27629 +        * on ChromeOS.
27630 +        */
27631 +       if (state->fb &&
27632 +           state->fb->format->format == DRM_FORMAT_NV12 &&
27633 +           (scaling_info->src_rect.x != 0 ||
27634 +            scaling_info->src_rect.y != 0))
27635 +               return -EINVAL;
27637         scaling_info->src_rect.width = state->src_w >> 16;
27638         if (scaling_info->src_rect.width == 0)
27639                 return -EINVAL;
27640 @@ -5863,6 +5889,15 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
27642         } while (stream == NULL && requested_bpc >= 6);
27644 +       if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
27645 +               DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
27647 +               aconnector->force_yuv420_output = true;
27648 +               stream = create_validate_stream_for_sink(aconnector, drm_mode,
27649 +                                               dm_state, old_stream);
27650 +               aconnector->force_yuv420_output = false;
27651 +       }
27653         return stream;
27656 @@ -7417,10 +7452,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
27657         int x, y;
27658         int xorigin = 0, yorigin = 0;
27660 -       position->enable = false;
27661 -       position->x = 0;
27662 -       position->y = 0;
27664         if (!crtc || !plane->state->fb)
27665                 return 0;
27667 @@ -7467,7 +7498,7 @@ static void handle_cursor_update(struct drm_plane *plane,
27668         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
27669         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
27670         uint64_t address = afb ? afb->address : 0;
27671 -       struct dc_cursor_position position;
27672 +       struct dc_cursor_position position = {0};
27673         struct dc_cursor_attributes attributes;
27674         int ret;
27676 @@ -9264,7 +9295,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
27678         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
27679         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
27680 -       if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
27681 +       if (!new_cursor_state || !new_primary_state ||
27682 +           !new_cursor_state->fb || !new_primary_state->fb) {
27683                 return 0;
27684         }
27686 @@ -9383,7 +9415,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
27687         }
27689  #if defined(CONFIG_DRM_AMD_DC_DCN)
27690 -       if (adev->asic_type >= CHIP_NAVI10) {
27691 +       if (dc_resource_is_dsc_encoding_supported(dc)) {
27692                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
27693                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
27694                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
27695 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
27696 index 8bfe901cf237..52cc81705280 100644
27697 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
27698 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
27699 @@ -68,18 +68,6 @@ struct common_irq_params {
27700         enum dc_irq_source irq_src;
27701  };
27703 -/**
27704 - * struct irq_list_head - Linked-list for low context IRQ handlers.
27705 - *
27706 - * @head: The list_head within &struct handler_data
27707 - * @work: A work_struct containing the deferred handler work
27708 - */
27709 -struct irq_list_head {
27710 -       struct list_head head;
27711 -       /* In case this interrupt needs post-processing, 'work' will be queued*/
27712 -       struct work_struct work;
27715  /**
27716   * struct dm_compressor_info - Buffer info used by frame buffer compression
27717   * @cpu_addr: MMIO cpu addr
27718 @@ -293,7 +281,7 @@ struct amdgpu_display_manager {
27719          * Note that handlers are called in the same order as they were
27720          * registered (FIFO).
27721          */
27722 -       struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
27723 +       struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
27725         /**
27726          * @irq_handler_list_high_tab:
27727 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
27728 index 360952129b6d..29139b34dbe2 100644
27729 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
27730 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
27731 @@ -150,7 +150,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
27732   *
27733   * --- to get dp configuration
27734   *
27735 - * cat link_settings
27736 + * cat /sys/kernel/debug/dri/0/DP-x/link_settings
27737   *
27738   * It will list current, verified, reported, preferred dp configuration.
27739   * current -- for current video mode
27740 @@ -163,7 +163,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
27741   * echo <lane_count>  <link_rate> > link_settings
27742   *
27743   * for example, to force to  2 lane, 2.7GHz,
27744 - * echo 4 0xa > link_settings
27745 + * echo 4 0xa > /sys/kernel/debug/dri/0/DP-x/link_settings
27746   *
27747   * spread_spectrum could not be changed dynamically.
27748   *
27749 @@ -171,7 +171,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
27750   * done. please check link settings after force operation to see if HW get
27751   * programming.
27752   *
27753 - * cat link_settings
27754 + * cat /sys/kernel/debug/dri/0/DP-x/link_settings
27755   *
27756   * check current and preferred settings.
27757   *
27758 @@ -255,7 +255,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
27759         int max_param_num = 2;
27760         uint8_t param_nums = 0;
27761         long param[2];
27762 -       bool valid_input = false;
27763 +       bool valid_input = true;
27765         if (size == 0)
27766                 return -EINVAL;
27767 @@ -282,9 +282,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
27768         case LANE_COUNT_ONE:
27769         case LANE_COUNT_TWO:
27770         case LANE_COUNT_FOUR:
27771 -               valid_input = true;
27772                 break;
27773         default:
27774 +               valid_input = false;
27775                 break;
27776         }
27778 @@ -294,9 +294,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
27779         case LINK_RATE_RBR2:
27780         case LINK_RATE_HIGH2:
27781         case LINK_RATE_HIGH3:
27782 -               valid_input = true;
27783                 break;
27784         default:
27785 +               valid_input = false;
27786                 break;
27787         }
27789 @@ -310,10 +310,11 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
27790          * spread spectrum will not be changed
27791          */
27792         prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
27793 +       prefer_link_settings.use_link_rate_set = false;
27794         prefer_link_settings.lane_count = param[0];
27795         prefer_link_settings.link_rate = param[1];
27797 -       dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
27798 +       dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true);
27800         kfree(wr_buf);
27801         return size;
27802 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
27803 index 0cdbfcd475ec..71a15f68514b 100644
27804 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
27805 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
27806 @@ -644,6 +644,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
27808         /* File created at /sys/class/drm/card0/device/hdcp_srm*/
27809         hdcp_work[0].attr = data_attr;
27810 +       sysfs_bin_attr_init(&hdcp_work[0].attr);
27812         if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
27813                 DRM_WARN("Failed to create device file hdcp_srm");
27814 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
27815 index e0000c180ed1..8ce10d0973c5 100644
27816 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
27817 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
27818 @@ -82,6 +82,7 @@ struct amdgpu_dm_irq_handler_data {
27819         struct amdgpu_display_manager *dm;
27820         /* DAL irq source which registered for this interrupt. */
27821         enum dc_irq_source irq_source;
27822 +       struct work_struct work;
27823  };
27825  #define DM_IRQ_TABLE_LOCK(adev, flags) \
27826 @@ -111,20 +112,10 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
27827   */
27828  static void dm_irq_work_func(struct work_struct *work)
27830 -       struct irq_list_head *irq_list_head =
27831 -               container_of(work, struct irq_list_head, work);
27832 -       struct list_head *handler_list = &irq_list_head->head;
27833 -       struct amdgpu_dm_irq_handler_data *handler_data;
27835 -       list_for_each_entry(handler_data, handler_list, list) {
27836 -               DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
27837 -                               handler_data->irq_source);
27838 +       struct amdgpu_dm_irq_handler_data *handler_data =
27839 +               container_of(work, struct amdgpu_dm_irq_handler_data, work);
27841 -               DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
27842 -                       handler_data->irq_source);
27844 -               handler_data->handler(handler_data->handler_arg);
27845 -       }
27846 +       handler_data->handler(handler_data->handler_arg);
27848         /* Call a DAL subcomponent which registered for interrupt notification
27849          * at INTERRUPT_LOW_IRQ_CONTEXT.
27850 @@ -156,7 +147,7 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
27851                 break;
27852         case INTERRUPT_LOW_IRQ_CONTEXT:
27853         default:
27854 -               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
27855 +               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
27856                 break;
27857         }
27859 @@ -290,7 +281,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
27860                 break;
27861         case INTERRUPT_LOW_IRQ_CONTEXT:
27862         default:
27863 -               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
27864 +               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
27865 +               INIT_WORK(&handler_data->work, dm_irq_work_func);
27866                 break;
27867         }
27869 @@ -372,7 +364,7 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
27870  int amdgpu_dm_irq_init(struct amdgpu_device *adev)
27872         int src;
27873 -       struct irq_list_head *lh;
27874 +       struct list_head *lh;
27876         DRM_DEBUG_KMS("DM_IRQ\n");
27878 @@ -381,9 +373,7 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
27879         for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
27880                 /* low context handler list init */
27881                 lh = &adev->dm.irq_handler_list_low_tab[src];
27882 -               INIT_LIST_HEAD(&lh->head);
27883 -               INIT_WORK(&lh->work, dm_irq_work_func);
27885 +               INIT_LIST_HEAD(lh);
27886                 /* high context handler init */
27887                 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
27888         }
27889 @@ -400,8 +390,11 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
27890  void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
27892         int src;
27893 -       struct irq_list_head *lh;
27894 +       struct list_head *lh;
27895 +       struct list_head *entry, *tmp;
27896 +       struct amdgpu_dm_irq_handler_data *handler;
27897         unsigned long irq_table_flags;
27899         DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
27900         for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
27901                 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
27902 @@ -410,7 +403,16 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
27903                  * (because no code can schedule a new one). */
27904                 lh = &adev->dm.irq_handler_list_low_tab[src];
27905                 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
27906 -               flush_work(&lh->work);
27908 +               if (!list_empty(lh)) {
27909 +                       list_for_each_safe(entry, tmp, lh) {
27910 +                               handler = list_entry(
27911 +                                       entry,
27912 +                                       struct amdgpu_dm_irq_handler_data,
27913 +                                       list);
27914 +                               flush_work(&handler->work);
27915 +                       }
27916 +               }
27917         }
27920 @@ -420,6 +422,8 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
27921         struct list_head *hnd_list_h;
27922         struct list_head *hnd_list_l;
27923         unsigned long irq_table_flags;
27924 +       struct list_head *entry, *tmp;
27925 +       struct amdgpu_dm_irq_handler_data *handler;
27927         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
27929 @@ -430,14 +434,22 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
27930          * will be disabled from manage_dm_interrupts on disable CRTC.
27931          */
27932         for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
27933 -               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
27934 +               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
27935                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
27936                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
27937                         dc_interrupt_set(adev->dm.dc, src, false);
27939                 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
27940 -               flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
27942 +               if (!list_empty(hnd_list_l)) {
27943 +                       list_for_each_safe (entry, tmp, hnd_list_l) {
27944 +                               handler = list_entry(
27945 +                                       entry,
27946 +                                       struct amdgpu_dm_irq_handler_data,
27947 +                                       list);
27948 +                               flush_work(&handler->work);
27949 +                       }
27950 +               }
27951                 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
27952         }
27954 @@ -457,7 +469,7 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
27956         /* re-enable short pulse interrupts HW interrupt */
27957         for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
27958 -               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
27959 +               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
27960                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
27961                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
27962                         dc_interrupt_set(adev->dm.dc, src, true);
27963 @@ -483,7 +495,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
27964          * will be enabled from manage_dm_interrupts on enable CRTC.
27965          */
27966         for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
27967 -               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
27968 +               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
27969                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
27970                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
27971                         dc_interrupt_set(adev->dm.dc, src, true);
27972 @@ -500,22 +512,53 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
27973  static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
27974                                         enum dc_irq_source irq_source)
27976 -       unsigned long irq_table_flags;
27977 -       struct work_struct *work = NULL;
27978 +       struct  list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source];
27979 +       struct  amdgpu_dm_irq_handler_data *handler_data;
27980 +       bool    work_queued = false;
27982 -       DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
27983 +       if (list_empty(handler_list))
27984 +               return;
27986 +       list_for_each_entry (handler_data, handler_list, list) {
27987 +               if (!queue_work(system_highpri_wq, &handler_data->work)) {
27988 +                       continue;
27989 +               } else {
27990 +                       work_queued = true;
27991 +                       break;
27992 +               }
27993 +       }
27995 -       if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
27996 -               work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
27997 +       if (!work_queued) {
27998 +               struct  amdgpu_dm_irq_handler_data *handler_data_add;
27999 +               /*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/
28000 +               handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
28002 -       DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
28003 +               /*allocate a new amdgpu_dm_irq_handler_data*/
28004 +               handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL);
28005 +               if (!handler_data_add) {
28006 +                       DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
28007 +                       return;
28008 +               }
28010 -       if (work) {
28011 -               if (!schedule_work(work))
28012 -                       DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
28013 -                                               irq_source);
28014 -       }
28015 +               /*copy new amdgpu_dm_irq_handler_data members from handler_data*/
28016 +               handler_data_add->handler       = handler_data->handler;
28017 +               handler_data_add->handler_arg   = handler_data->handler_arg;
28018 +               handler_data_add->dm            = handler_data->dm;
28019 +               handler_data_add->irq_source    = irq_source;
28021 +               list_add_tail(&handler_data_add->list, handler_list);
28023 +               INIT_WORK(&handler_data_add->work, dm_irq_work_func);
28025 +               if (queue_work(system_highpri_wq, &handler_data_add->work))
28026 +                       DRM_DEBUG("Queued work for handling interrupt from "
28027 +                                 "display for IRQ source %d\n",
28028 +                                 irq_source);
28029 +               else
28030 +                       DRM_ERROR("Failed to queue work for handling interrupt "
28031 +                                 "from display for IRQ source %d\n",
28032 +                                 irq_source);
28033 +       }
28036  /*
28037 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
28038 index 995ffbbf64e7..1ee27f2f28f1 100644
28039 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
28040 +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
28041 @@ -217,6 +217,9 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
28042                 if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
28043                         dcn3_clk_mgr_destroy(clk_mgr);
28044                 }
28045 +               if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
28046 +                       dcn3_clk_mgr_destroy(clk_mgr);
28047 +               }
28048                 break;
28050         case FAMILY_VGH:
28051 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
28052 index c7e5a64e06af..81ea5d3a1947 100644
28053 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
28054 +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
28055 @@ -252,6 +252,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
28056         bool force_reset = false;
28057         bool update_uclk = false;
28058         bool p_state_change_support;
28059 +       int total_plane_count;
28061         if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
28062                 return;
28063 @@ -292,7 +293,8 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
28064                 clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
28066         clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
28067 -       p_state_change_support = new_clocks->p_state_change_support || (display_count == 0);
28068 +       total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
28069 +       p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
28070         if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
28071                 clk_mgr_base->clks.p_state_change_support = p_state_change_support;
28073 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
28074 index 8f8a13c7cf73..4781279024a9 100644
28075 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c
28076 +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
28077 @@ -2398,7 +2398,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
28078                                         if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
28079                                                 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
28081 -                                       dc->hwss.optimize_bandwidth(dc, dc->current_state);
28082 +                                       dc->optimized_required = true;
28084                                 } else {
28085                                         if (dc->optimize_seamless_boot_streams == 0)
28086                                                 dc->hwss.prepare_bandwidth(dc, dc->current_state);
28087 @@ -2545,6 +2546,10 @@ static void commit_planes_for_stream(struct dc *dc,
28088                                                 plane_state->triplebuffer_flips = true;
28089                                 }
28090                         }
28091 +                       if (update_type == UPDATE_TYPE_FULL) {
28092 +                               /* force vsync flip when reconfiguring pipes to prevent underflow */
28093 +                               plane_state->flip_immediate = false;
28094 +                       }
28095                 }
28096         }
28098 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
28099 index bd0101013ec8..440bf0a0e12a 100644
28100 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
28101 +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
28102 @@ -1603,6 +1603,7 @@ static bool dc_link_construct(struct dc_link *link,
28103         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
28105         DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__);
28106 +       kfree(info);
28107         return true;
28108  device_tag_fail:
28109         link->link_enc->funcs->destroy(&link->link_enc);
28110 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
28111 index 4e87e70237e3..874b132fe1d7 100644
28112 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
28113 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
28114 @@ -283,7 +283,7 @@ struct abm *dce_abm_create(
28115         const struct dce_abm_shift *abm_shift,
28116         const struct dce_abm_mask *abm_mask)
28118 -       struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
28119 +       struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC);
28121         if (abm_dce == NULL) {
28122                 BREAK_TO_DEBUGGER();
28123 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
28124 index 277484cf853e..d4be5954d7aa 100644
28125 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
28126 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
28127 @@ -99,7 +99,6 @@ struct dce110_aux_registers {
28128         AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
28129         AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
28130         AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
28131 -       AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
28132         AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
28133         AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
28134         AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
28135 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
28136 index ddc789daf3b1..09d4cb5c97b6 100644
28137 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
28138 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
28139 @@ -1049,7 +1049,7 @@ struct dmcu *dcn10_dmcu_create(
28140         const struct dce_dmcu_shift *dmcu_shift,
28141         const struct dce_dmcu_mask *dmcu_mask)
28143 -       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
28144 +       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
28146         if (dmcu_dce == NULL) {
28147                 BREAK_TO_DEBUGGER();
28148 @@ -1070,7 +1070,7 @@ struct dmcu *dcn20_dmcu_create(
28149         const struct dce_dmcu_shift *dmcu_shift,
28150         const struct dce_dmcu_mask *dmcu_mask)
28152 -       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
28153 +       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
28155         if (dmcu_dce == NULL) {
28156                 BREAK_TO_DEBUGGER();
28157 @@ -1091,7 +1091,7 @@ struct dmcu *dcn21_dmcu_create(
28158         const struct dce_dmcu_shift *dmcu_shift,
28159         const struct dce_dmcu_mask *dmcu_mask)
28161 -       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
28162 +       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
28164         if (dmcu_dce == NULL) {
28165                 BREAK_TO_DEBUGGER();
28166 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
28167 index 69e34bef274c..febccb35ddad 100644
28168 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
28169 +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
28170 @@ -81,13 +81,18 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
28172         struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
28173         uint32_t raw_state;
28174 +       enum dmub_status status = DMUB_STATUS_INVALID;
28176         // Send gpint command and wait for ack
28177 -       dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
28179 -       dmub_srv_get_gpint_response(srv, &raw_state);
28181 -       *state = convert_psr_state(raw_state);
28182 +       status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
28184 +       if (status == DMUB_STATUS_OK) {
28185 +               // GPINT was executed, get response
28186 +               dmub_srv_get_gpint_response(srv, &raw_state);
28187 +               *state = convert_psr_state(raw_state);
28188 +       } else
28189 +               // Return invalid state when GPINT times out
28190 +               *state = 0xFF;
28193  /*
28194 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
28195 index 62cc2651e00c..8774406120fc 100644
28196 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
28197 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
28198 @@ -112,7 +112,7 @@ struct dccg *dccg2_create(
28199         const struct dccg_shift *dccg_shift,
28200         const struct dccg_mask *dccg_mask)
28202 -       struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
28203 +       struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC);
28204         struct dccg *base;
28206         if (dccg_dcn == NULL) {
28207 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
28208 index bec7059f6d5d..a1318c31bcfa 100644
28209 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
28210 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
28211 @@ -1,5 +1,5 @@
28212  /*
28213 - * Copyright 2012-17 Advanced Micro Devices, Inc.
28214 + * Copyright 2012-2021 Advanced Micro Devices, Inc.
28215   *
28216   * Permission is hereby granted, free of charge, to any person obtaining a
28217   * copy of this software and associated documentation files (the "Software"),
28218 @@ -181,11 +181,14 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
28219         else
28220                 Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
28221         */
28222 -       if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
28223 -               + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
28224 -               value = 1;
28225 -       } else
28226 -               value = 0;
28227 +       if (pipe_dest->htotal != 0) {
28228 +               if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
28229 +                       + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
28230 +                       value = 1;
28231 +               } else
28232 +                       value = 0;
28233 +       }
28235         REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
28238 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
28239 index 2c2dbfcd8957..bfbc23b76cd5 100644
28240 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
28241 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
28242 @@ -1104,7 +1104,7 @@ struct dpp *dcn20_dpp_create(
28243         uint32_t inst)
28245         struct dcn20_dpp *dpp =
28246 -               kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
28247 +               kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC);
28249         if (!dpp)
28250                 return NULL;
28251 @@ -1122,7 +1122,7 @@ struct input_pixel_processor *dcn20_ipp_create(
28252         struct dc_context *ctx, uint32_t inst)
28254         struct dcn10_ipp *ipp =
28255 -               kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
28256 +               kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
28258         if (!ipp) {
28259                 BREAK_TO_DEBUGGER();
28260 @@ -1139,7 +1139,7 @@ struct output_pixel_processor *dcn20_opp_create(
28261         struct dc_context *ctx, uint32_t inst)
28263         struct dcn20_opp *opp =
28264 -               kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
28265 +               kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC);
28267         if (!opp) {
28268                 BREAK_TO_DEBUGGER();
28269 @@ -1156,7 +1156,7 @@ struct dce_aux *dcn20_aux_engine_create(
28270         uint32_t inst)
28272         struct aux_engine_dce110 *aux_engine =
28273 -               kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
28274 +               kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
28276         if (!aux_engine)
28277                 return NULL;
28278 @@ -1194,7 +1194,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
28279         uint32_t inst)
28281         struct dce_i2c_hw *dce_i2c_hw =
28282 -               kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
28283 +               kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
28285         if (!dce_i2c_hw)
28286                 return NULL;
28287 @@ -1207,7 +1207,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
28288  struct mpc *dcn20_mpc_create(struct dc_context *ctx)
28290         struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
28291 -                                         GFP_KERNEL);
28292 +                                         GFP_ATOMIC);
28294         if (!mpc20)
28295                 return NULL;
28296 @@ -1225,7 +1225,7 @@ struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
28298         int i;
28299         struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
28300 -                                         GFP_KERNEL);
28301 +                                         GFP_ATOMIC);
28303         if (!hubbub)
28304                 return NULL;
28305 @@ -1253,7 +1253,7 @@ struct timing_generator *dcn20_timing_generator_create(
28306                 uint32_t instance)
28308         struct optc *tgn10 =
28309 -               kzalloc(sizeof(struct optc), GFP_KERNEL);
28310 +               kzalloc(sizeof(struct optc), GFP_ATOMIC);
28312         if (!tgn10)
28313                 return NULL;
28314 @@ -1332,7 +1332,7 @@ static struct clock_source *dcn20_clock_source_create(
28315         bool dp_clk_src)
28317         struct dce110_clk_src *clk_src =
28318 -               kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
28319 +               kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
28321         if (!clk_src)
28322                 return NULL;
28323 @@ -1438,7 +1438,7 @@ struct display_stream_compressor *dcn20_dsc_create(
28324         struct dc_context *ctx, uint32_t inst)
28326         struct dcn20_dsc *dsc =
28327 -               kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
28328 +               kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC);
28330         if (!dsc) {
28331                 BREAK_TO_DEBUGGER();
28332 @@ -1572,7 +1572,7 @@ struct hubp *dcn20_hubp_create(
28333         uint32_t inst)
28335         struct dcn20_hubp *hubp2 =
28336 -               kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
28337 +               kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC);
28339         if (!hubp2)
28340                 return NULL;
28341 @@ -3390,7 +3390,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
28343  static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
28345 -       struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
28346 +       struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC);
28348         if (!pp_smu)
28349                 return pp_smu;
28350 @@ -4034,7 +4034,7 @@ struct resource_pool *dcn20_create_resource_pool(
28351                 struct dc *dc)
28353         struct dcn20_resource_pool *pool =
28354 -               kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
28355 +               kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC);
28357         if (!pool)
28358                 return NULL;
28359 diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
28360 index 06dc1e2e8383..07c8d2e2c09c 100644
28361 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
28362 +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
28363 @@ -848,7 +848,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
28365                                         cmd.mall.cursor_copy_src.quad_part = cursor_attr.address.quad_part;
28366                                         cmd.mall.cursor_copy_dst.quad_part =
28367 -                                                       plane->address.grph.cursor_cache_addr.quad_part;
28368 +                                                       (plane->address.grph.cursor_cache_addr.quad_part + 2047) & ~2047;
28369                                         cmd.mall.cursor_width = cursor_attr.width;
28370                                         cmd.mall.cursor_height = cursor_attr.height;
28371                                         cmd.mall.cursor_pitch = cursor_attr.pitch;
28372 @@ -858,8 +858,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
28373                                         dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
28375                                         /* Use copied cursor, and it's okay to not switch back */
28376 -                                       cursor_attr.address.quad_part =
28377 -                                                       plane->address.grph.cursor_cache_addr.quad_part;
28378 +                                       cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part;
28379                                         dc_stream_set_cursor_attributes(stream, &cursor_attr);
28380                                 }
28382 diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
28383 index 3e6f76096119..a7598356f37d 100644
28384 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
28385 +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
28386 @@ -143,16 +143,18 @@ static void mpc3_power_on_ogam_lut(
28388         struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
28390 -       if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
28391 -               // Force power on
28392 -               REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_DIS, power_on == true ? 1:0);
28393 -               // Wait for confirmation when powering on
28394 -               if (power_on)
28395 -                       REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10);
28396 -       } else {
28397 -               REG_SET(MPCC_MEM_PWR_CTRL[mpcc_id], 0,
28398 -                               MPCC_OGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
28399 -       }
28400 +       /*
28401 +        * Powering on: force memory active so the LUT can be updated.
28402 +        * Powering off: allow entering memory low power mode
28403 +        *
28404 +        * Memory low power mode is controlled during MPC OGAM LUT init.
28405 +        */
28406 +       REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id],
28407 +                  MPCC_OGAM_MEM_PWR_DIS, power_on != 0);
28409 +       /* Wait for memory to be powered on - we won't be able to write to it otherwise. */
28410 +       if (power_on)
28411 +               REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10);
28414  static void mpc3_configure_ogam_lut(
28415 @@ -1427,7 +1429,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
28416         .acquire_rmu = mpcc3_acquire_rmu,
28417         .program_3dlut = mpc3_program_3dlut,
28418         .release_rmu = mpcc3_release_rmu,
28419 -       .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
28420 +       .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
28421         .get_mpc_out_mux = mpc1_get_mpc_out_mux,
28423  };
28424 diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
28425 index fb7f1dea3c46..71e2d5e02571 100644
28426 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
28427 +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
28428 @@ -181,7 +181,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
28429                 },
28430         .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
28431         .num_states = 1,
28432 -       .sr_exit_time_us = 12,
28433 +       .sr_exit_time_us = 15.5,
28434         .sr_enter_plus_exit_time_us = 20,
28435         .urgent_latency_us = 4.0,
28436         .urgent_latency_pixel_data_only_us = 4.0,
28437 diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
28438 index 4b659b63f75b..d03b1975e417 100644
28439 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
28440 +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
28441 @@ -164,7 +164,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
28443                 .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
28444                 .num_states = 1,
28445 -               .sr_exit_time_us = 12,
28446 +               .sr_exit_time_us = 15.5,
28447                 .sr_enter_plus_exit_time_us = 20,
28448                 .urgent_latency_us = 4.0,
28449                 .urgent_latency_pixel_data_only_us = 4.0,
28450 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
28451 index 0f3f510fd83b..9729cf292e84 100644
28452 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
28453 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
28454 @@ -3437,6 +3437,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
28455                         mode_lib->vba.DCCEnabledInAnyPlane = true;
28456                 }
28457         }
28458 +       mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
28459         for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
28460                 locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
28461                                 mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
28462 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
28463 index 210c96cd5b03..51098c2c9854 100644
28464 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
28465 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
28466 @@ -3544,6 +3544,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
28467                         mode_lib->vba.DCCEnabledInAnyPlane = true;
28468                 }
28469         }
28470 +       mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
28471         for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
28472                 locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
28473                                 mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
28474 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
28475 index 72423dc425dc..799bae229e67 100644
28476 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
28477 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
28478 @@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
28479         if (surf_linear) {
28480                 log2_swath_height_l = 0;
28481                 log2_swath_height_c = 0;
28482 -       } else if (!surf_vert) {
28483 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
28484 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
28485         } else {
28486 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
28487 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
28488 +               unsigned int swath_height_l;
28489 +               unsigned int swath_height_c;
28491 +               if (!surf_vert) {
28492 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
28493 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
28494 +               } else {
28495 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
28496 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
28497 +               }
28499 +               if (swath_height_l > 0)
28500 +                       log2_swath_height_l = dml_log2(swath_height_l);
28502 +               if (req128_l && log2_swath_height_l > 0)
28503 +                       log2_swath_height_l -= 1;
28505 +               if (swath_height_c > 0)
28506 +                       log2_swath_height_c = dml_log2(swath_height_c);
28508 +               if (req128_c && log2_swath_height_c > 0)
28509 +                       log2_swath_height_c -= 1;
28510         }
28512         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
28513         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
28515 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
28516 index 9c78446c3a9d..6a6d5970d1d5 100644
28517 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
28518 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
28519 @@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
28520         if (surf_linear) {
28521                 log2_swath_height_l = 0;
28522                 log2_swath_height_c = 0;
28523 -       } else if (!surf_vert) {
28524 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
28525 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
28526         } else {
28527 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
28528 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
28529 +               unsigned int swath_height_l;
28530 +               unsigned int swath_height_c;
28532 +               if (!surf_vert) {
28533 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
28534 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
28535 +               } else {
28536 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
28537 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
28538 +               }
28540 +               if (swath_height_l > 0)
28541 +                       log2_swath_height_l = dml_log2(swath_height_l);
28543 +               if (req128_l && log2_swath_height_l > 0)
28544 +                       log2_swath_height_l -= 1;
28546 +               if (swath_height_c > 0)
28547 +                       log2_swath_height_c = dml_log2(swath_height_c);
28549 +               if (req128_c && log2_swath_height_c > 0)
28550 +                       log2_swath_height_c -= 1;
28551         }
28553         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
28554         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
28556 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
28557 index edd41d358291..dc1c81a6e377 100644
28558 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
28559 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
28560 @@ -277,13 +277,31 @@ static void handle_det_buf_split(
28561         if (surf_linear) {
28562                 log2_swath_height_l = 0;
28563                 log2_swath_height_c = 0;
28564 -       } else if (!surf_vert) {
28565 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
28566 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
28567         } else {
28568 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
28569 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
28570 +               unsigned int swath_height_l;
28571 +               unsigned int swath_height_c;
28573 +               if (!surf_vert) {
28574 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
28575 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
28576 +               } else {
28577 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
28578 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
28579 +               }
28581 +               if (swath_height_l > 0)
28582 +                       log2_swath_height_l = dml_log2(swath_height_l);
28584 +               if (req128_l && log2_swath_height_l > 0)
28585 +                       log2_swath_height_l -= 1;
28587 +               if (swath_height_c > 0)
28588 +                       log2_swath_height_c = dml_log2(swath_height_c);
28590 +               if (req128_c && log2_swath_height_c > 0)
28591 +                       log2_swath_height_c -= 1;
28592         }
28594         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
28595         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
28597 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
28598 index 0f14f205ebe5..04601a767a8f 100644
28599 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
28600 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
28601 @@ -237,13 +237,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
28602         if (surf_linear) {
28603                 log2_swath_height_l = 0;
28604                 log2_swath_height_c = 0;
28605 -       } else if (!surf_vert) {
28606 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
28607 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
28608         } else {
28609 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
28610 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
28611 +               unsigned int swath_height_l;
28612 +               unsigned int swath_height_c;
28614 +               if (!surf_vert) {
28615 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
28616 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
28617 +               } else {
28618 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
28619 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
28620 +               }
28622 +               if (swath_height_l > 0)
28623 +                       log2_swath_height_l = dml_log2(swath_height_l);
28625 +               if (req128_l && log2_swath_height_l > 0)
28626 +                       log2_swath_height_l -= 1;
28628 +               if (swath_height_c > 0)
28629 +                       log2_swath_height_c = dml_log2(swath_height_c);
28631 +               if (req128_c && log2_swath_height_c > 0)
28632 +                       log2_swath_height_c -= 1;
28633         }
28635         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
28636         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
28638 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
28639 index 4c3e9cc30167..414da64f5734 100644
28640 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
28641 +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
28642 @@ -344,13 +344,31 @@ static void handle_det_buf_split(
28643         if (surf_linear) {
28644                 log2_swath_height_l = 0;
28645                 log2_swath_height_c = 0;
28646 -       } else if (!surf_vert) {
28647 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
28648 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
28649         } else {
28650 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
28651 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
28652 +               unsigned int swath_height_l;
28653 +               unsigned int swath_height_c;
28655 +               if (!surf_vert) {
28656 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
28657 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
28658 +               } else {
28659 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
28660 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
28661 +               }
28663 +               if (swath_height_l > 0)
28664 +                       log2_swath_height_l = dml_log2(swath_height_l);
28666 +               if (req128_l && log2_swath_height_l > 0)
28667 +                       log2_swath_height_l -= 1;
28669 +               if (swath_height_c > 0)
28670 +                       log2_swath_height_c = dml_log2(swath_height_c);
28672 +               if (req128_c && log2_swath_height_c > 0)
28673 +                       log2_swath_height_c -= 1;
28674         }
28676         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
28677         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
28679 diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
28680 index 5e384a8a83dc..51855a2624cf 100644
28681 --- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
28682 +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
28683 @@ -39,7 +39,7 @@
28684  #define HDCP14_KSV_SIZE 5
28685  #define HDCP14_MAX_KSV_FIFO_SIZE 127*HDCP14_KSV_SIZE
28687 -static const bool hdcp_cmd_is_read[] = {
28688 +static const bool hdcp_cmd_is_read[HDCP_MESSAGE_ID_MAX] = {
28689         [HDCP_MESSAGE_ID_READ_BKSV] = true,
28690         [HDCP_MESSAGE_ID_READ_RI_R0] = true,
28691         [HDCP_MESSAGE_ID_READ_PJ] = true,
28692 @@ -75,7 +75,7 @@ static const bool hdcp_cmd_is_read[] = {
28693         [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = false
28694  };
28696 -static const uint8_t hdcp_i2c_offsets[] = {
28697 +static const uint8_t hdcp_i2c_offsets[HDCP_MESSAGE_ID_MAX] = {
28698         [HDCP_MESSAGE_ID_READ_BKSV] = 0x0,
28699         [HDCP_MESSAGE_ID_READ_RI_R0] = 0x8,
28700         [HDCP_MESSAGE_ID_READ_PJ] = 0xA,
28701 @@ -106,7 +106,8 @@ static const uint8_t hdcp_i2c_offsets[] = {
28702         [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
28703         [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
28704         [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
28705 -       [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70
28706 +       [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70,
28707 +       [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x0,
28708  };
28710  struct protection_properties {
28711 @@ -184,7 +185,7 @@ static const struct protection_properties hdmi_14_protection = {
28712         .process_transaction = hdmi_14_process_transaction
28713  };
28715 -static const uint32_t hdcp_dpcd_addrs[] = {
28716 +static const uint32_t hdcp_dpcd_addrs[HDCP_MESSAGE_ID_MAX] = {
28717         [HDCP_MESSAGE_ID_READ_BKSV] = 0x68000,
28718         [HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005,
28719         [HDCP_MESSAGE_ID_READ_PJ] = 0xFFFFFFFF,
28720 diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
28721 index 904ce9b88088..afbe8856468a 100644
28722 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
28723 +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
28724 @@ -791,6 +791,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
28725                            TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
28726                         hdcp->connection.is_hdcp2_revoked = 1;
28727                         status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
28728 +               } else {
28729 +                       status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
28730                 }
28731         }
28732         mutex_unlock(&psp->hdcp_context.mutex);
28733 diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
28734 index ed05a30d1139..e2a56a7f3d7a 100644
28735 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
28736 +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
28737 @@ -1526,20 +1526,6 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
28739                 smu10_data->gfx_actual_soft_min_freq = min_freq;
28740                 smu10_data->gfx_actual_soft_max_freq = max_freq;
28742 -               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
28743 -                                       PPSMC_MSG_SetHardMinGfxClk,
28744 -                                       min_freq,
28745 -                                       NULL);
28746 -               if (ret)
28747 -                       return ret;
28749 -               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
28750 -                                       PPSMC_MSG_SetSoftMaxGfxClk,
28751 -                                       max_freq,
28752 -                                       NULL);
28753 -               if (ret)
28754 -                       return ret;
28755         } else if (type == PP_OD_COMMIT_DPM_TABLE) {
28756                 if (size != 0) {
28757                         pr_err("Input parameter number not correct\n");
28758 diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
28759 index 599ec9726601..959143eff651 100644
28760 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
28761 +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
28762 @@ -5160,7 +5160,7 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
28764  out:
28765         smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
28766 -                                               1 << power_profile_mode,
28767 +                                               (!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
28768                                                 NULL);
28769         hwmgr->power_profile_mode = power_profile_mode;
28771 diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
28772 index cd905e41080e..ec0037a21331 100644
28773 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
28774 +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
28775 @@ -279,35 +279,25 @@ static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_
28776         if (smu->adev->in_suspend)
28777                 return;
28779 -       /*
28780 -        * mclk, fclk and socclk are interdependent
28781 -        * on each other
28782 -        */
28783         if (clk == SMU_MCLK) {
28784 -               /* reset clock dependency */
28785                 smu->user_dpm_profile.clk_dependency = 0;
28786 -               /* set mclk dependent clocks(fclk and socclk) */
28787                 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
28788         } else if (clk == SMU_FCLK) {
28789 -               /* give priority to mclk, if mclk dependent clocks are set */
28790 +               /* MCLK takes precedence over FCLK */
28791                 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
28792                         return;
28794 -               /* reset clock dependency */
28795                 smu->user_dpm_profile.clk_dependency = 0;
28796 -               /* set fclk dependent clocks(mclk and socclk) */
28797                 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
28798         } else if (clk == SMU_SOCCLK) {
28799 -               /* give priority to mclk, if mclk dependent clocks are set */
28800 +               /* MCLK takes precedence over SOCCLK */
28801                 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
28802                         return;
28804 -               /* reset clock dependency */
28805                 smu->user_dpm_profile.clk_dependency = 0;
28806 -               /* set socclk dependent clocks(mclk and fclk) */
28807                 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
28808         } else
28809 -               /* add clk dependencies here, if any */
28810 +               /* Add clk dependencies here, if any */
28811                 return;
28814 @@ -331,7 +321,7 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
28815                 return;
28817         /* Enable restore flag */
28818 -       smu->user_dpm_profile.flags = SMU_DPM_USER_PROFILE_RESTORE;
28819 +       smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
28821         /* set the user dpm power limit */
28822         if (smu->user_dpm_profile.power_limit) {
28823 @@ -354,8 +344,8 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
28824                                 ret = smu_force_clk_levels(smu, clk_type,
28825                                                 smu->user_dpm_profile.clk_mask[clk_type]);
28826                                 if (ret)
28827 -                                       dev_err(smu->adev->dev, "Failed to set clock type = %d\n",
28828 -                                                       clk_type);
28829 +                                       dev_err(smu->adev->dev,
28830 +                                               "Failed to set clock type = %d\n", clk_type);
28831                         }
28832                 }
28833         }
28834 @@ -1777,7 +1767,7 @@ int smu_force_clk_levels(struct smu_context *smu,
28836         if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
28837                 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
28838 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) {
28839 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
28840                         smu->user_dpm_profile.clk_mask[clk_type] = mask;
28841                         smu_set_user_clk_dependencies(smu, clk_type);
28842                 }
28843 @@ -2034,7 +2024,7 @@ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
28844         if (smu->ppt_funcs->set_fan_speed_percent) {
28845                 percent = speed * 100 / smu->fan_max_rpm;
28846                 ret = smu->ppt_funcs->set_fan_speed_percent(smu, percent);
28847 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
28848 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
28849                         smu->user_dpm_profile.fan_speed_percent = percent;
28850         }
28852 @@ -2096,6 +2086,7 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
28853                 dev_err(smu->adev->dev,
28854                         "New power limit (%d) is over the max allowed %d\n",
28855                         limit, smu->max_power_limit);
28856 +               ret = -EINVAL;
28857                 goto out;
28858         }
28860 @@ -2104,7 +2095,7 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
28862         if (smu->ppt_funcs->set_power_limit) {
28863                 ret = smu->ppt_funcs->set_power_limit(smu, limit);
28864 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
28865 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
28866                         smu->user_dpm_profile.power_limit = limit;
28867         }
28869 @@ -2285,7 +2276,7 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
28871         if (smu->ppt_funcs->set_fan_control_mode) {
28872                 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
28873 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
28874 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
28875                         smu->user_dpm_profile.fan_mode = value;
28876         }
28878 @@ -2293,7 +2284,7 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
28880         /* reset user dpm fan speed */
28881         if (!ret && value != AMD_FAN_CTRL_MANUAL &&
28882 -                       smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
28883 +                       !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
28884                 smu->user_dpm_profile.fan_speed_percent = 0;
28886         return ret;
28887 @@ -2335,7 +2326,7 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
28888                 if (speed > 100)
28889                         speed = 100;
28890                 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
28891 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
28892 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
28893                         smu->user_dpm_profile.fan_speed_percent = speed;
28894         }
28896 diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
28897 index 101eaa20db9b..a80f551771b9 100644
28898 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
28899 +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
28900 @@ -1462,7 +1462,6 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
28901                                         long input[], uint32_t size)
28903         int ret = 0;
28904 -       int i;
28905         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
28907         if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
28908 @@ -1535,43 +1534,6 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
28909                         smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
28910                         smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
28911                         smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
28913 -                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
28914 -                                                                       smu->gfx_actual_hard_min_freq, NULL);
28915 -                       if (ret) {
28916 -                               dev_err(smu->adev->dev, "Restore the default hard min sclk failed!");
28917 -                               return ret;
28918 -                       }
28920 -                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
28921 -                                                                       smu->gfx_actual_soft_max_freq, NULL);
28922 -                       if (ret) {
28923 -                               dev_err(smu->adev->dev, "Restore the default soft max sclk failed!");
28924 -                               return ret;
28925 -                       }
28927 -                       if (smu->adev->pm.fw_version < 0x43f1b00) {
28928 -                               dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
28929 -                               break;
28930 -                       }
28932 -                       for (i = 0; i < smu->cpu_core_num; i++) {
28933 -                               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
28934 -                                                                     (i << 20) | smu->cpu_actual_soft_min_freq,
28935 -                                                                     NULL);
28936 -                               if (ret) {
28937 -                                       dev_err(smu->adev->dev, "Set hard min cclk failed!");
28938 -                                       return ret;
28939 -                               }
28941 -                               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
28942 -                                                                     (i << 20) | smu->cpu_actual_soft_max_freq,
28943 -                                                                     NULL);
28944 -                               if (ret) {
28945 -                                       dev_err(smu->adev->dev, "Set soft max cclk failed!");
28946 -                                       return ret;
28947 -                               }
28948 -                       }
28949                 }
28950                 break;
28951         case PP_OD_COMMIT_DPM_TABLE:
28952 diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
28953 index 5493388fcb10..dbe6d0caddb7 100644
28954 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
28955 +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
28956 @@ -389,24 +389,6 @@ static int renoir_od_edit_dpm_table(struct smu_context *smu,
28957                 }
28958                 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
28959                 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
28961 -               ret = smu_cmn_send_smc_msg_with_param(smu,
28962 -                                                               SMU_MSG_SetHardMinGfxClk,
28963 -                                                               smu->gfx_actual_hard_min_freq,
28964 -                                                               NULL);
28965 -               if (ret) {
28966 -                       dev_err(smu->adev->dev, "Restore the default hard min sclk failed!");
28967 -                       return ret;
28968 -               }
28970 -               ret = smu_cmn_send_smc_msg_with_param(smu,
28971 -                                                               SMU_MSG_SetSoftMaxGfxClk,
28972 -                                                               smu->gfx_actual_soft_max_freq,
28973 -                                                               NULL);
28974 -               if (ret) {
28975 -                       dev_err(smu->adev->dev, "Restore the default soft max sclk failed!");
28976 -                       return ret;
28977 -               }
28978                 break;
28979         case PP_OD_COMMIT_DPM_TABLE:
28980                 if (size != 0) {
28981 diff --git a/drivers/gpu/drm/arm/display/include/malidp_utils.h b/drivers/gpu/drm/arm/display/include/malidp_utils.h
28982 index 3bc383d5bf73..49a1d7f3539c 100644
28983 --- a/drivers/gpu/drm/arm/display/include/malidp_utils.h
28984 +++ b/drivers/gpu/drm/arm/display/include/malidp_utils.h
28985 @@ -13,9 +13,6 @@
28986  #define has_bit(nr, mask)      (BIT(nr) & (mask))
28987  #define has_bits(bits, mask)   (((bits) & (mask)) == (bits))
28989 -#define dp_for_each_set_bit(bit, mask) \
28990 -       for_each_set_bit((bit), ((unsigned long *)&(mask)), sizeof(mask) * 8)
28992  #define dp_wait_cond(__cond, __tries, __min_range, __max_range)        \
28993  ({                                                     \
28994         int num_tries = __tries;                        \
28995 diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
28996 index 719a79728e24..06c595378dda 100644
28997 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
28998 +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
28999 @@ -46,8 +46,9 @@ void komeda_pipeline_destroy(struct komeda_dev *mdev,
29001         struct komeda_component *c;
29002         int i;
29003 +       unsigned long avail_comps = pipe->avail_comps;
29005 -       dp_for_each_set_bit(i, pipe->avail_comps) {
29006 +       for_each_set_bit(i, &avail_comps, 32) {
29007                 c = komeda_pipeline_get_component(pipe, i);
29008                 komeda_component_destroy(mdev, c);
29009         }
29010 @@ -247,6 +248,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
29012         struct komeda_component *c;
29013         int id;
29014 +       unsigned long avail_comps = pipe->avail_comps;
29016         DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s.\n",
29017                  pipe->id, pipe->n_layers, pipe->n_scalers,
29018 @@ -258,7 +260,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
29019                  pipe->of_output_links[1] ?
29020                  pipe->of_output_links[1]->full_name : "none");
29022 -       dp_for_each_set_bit(id, pipe->avail_comps) {
29023 +       for_each_set_bit(id, &avail_comps, 32) {
29024                 c = komeda_pipeline_get_component(pipe, id);
29026                 komeda_component_dump(c);
29027 @@ -270,8 +272,9 @@ static void komeda_component_verify_inputs(struct komeda_component *c)
29028         struct komeda_pipeline *pipe = c->pipeline;
29029         struct komeda_component *input;
29030         int id;
29031 +       unsigned long supported_inputs = c->supported_inputs;
29033 -       dp_for_each_set_bit(id, c->supported_inputs) {
29034 +       for_each_set_bit(id, &supported_inputs, 32) {
29035                 input = komeda_pipeline_get_component(pipe, id);
29036                 if (!input) {
29037                         c->supported_inputs &= ~(BIT(id));
29038 @@ -302,8 +305,9 @@ static void komeda_pipeline_assemble(struct komeda_pipeline *pipe)
29039         struct komeda_component *c;
29040         struct komeda_layer *layer;
29041         int i, id;
29042 +       unsigned long avail_comps = pipe->avail_comps;
29044 -       dp_for_each_set_bit(id, pipe->avail_comps) {
29045 +       for_each_set_bit(id, &avail_comps, 32) {
29046                 c = komeda_pipeline_get_component(pipe, id);
29047                 komeda_component_verify_inputs(c);
29048         }
29049 @@ -355,13 +359,15 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
29051         struct komeda_component *c;
29052         u32 id;
29053 +       unsigned long avail_comps;
29055         seq_printf(sf, "\n======== Pipeline-%d ==========\n", pipe->id);
29057         if (pipe->funcs && pipe->funcs->dump_register)
29058                 pipe->funcs->dump_register(pipe, sf);
29060 -       dp_for_each_set_bit(id, pipe->avail_comps) {
29061 +       avail_comps = pipe->avail_comps;
29062 +       for_each_set_bit(id, &avail_comps, 32) {
29063                 c = komeda_pipeline_get_component(pipe, id);
29065                 seq_printf(sf, "\n------%s------\n", c->name);
29066 diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
29067 index 5c085116de3f..e672b9cffee3 100644
29068 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
29069 +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
29070 @@ -1231,14 +1231,15 @@ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
29071         struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state);
29072         struct komeda_component_state *c_st;
29073         struct komeda_component *c;
29074 -       u32 disabling_comps, id;
29075 +       u32 id;
29076 +       unsigned long disabling_comps;
29078         WARN_ON(!old);
29080         disabling_comps = (~new->active_comps) & old->active_comps;
29082         /* unbound all disabling component */
29083 -       dp_for_each_set_bit(id, disabling_comps) {
29084 +       for_each_set_bit(id, &disabling_comps, 32) {
29085                 c = komeda_pipeline_get_component(pipe, id);
29086                 c_st = komeda_component_get_state_and_set_user(c,
29087                                 drm_st, NULL, new->crtc);
29088 @@ -1286,7 +1287,8 @@ bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
29089         struct komeda_pipeline_state *old;
29090         struct komeda_component *c;
29091         struct komeda_component_state *c_st;
29092 -       u32 id, disabling_comps = 0;
29093 +       u32 id;
29094 +       unsigned long disabling_comps;
29096         old = komeda_pipeline_get_old_state(pipe, old_state);
29098 @@ -1296,10 +1298,10 @@ bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
29099                 disabling_comps = old->active_comps &
29100                                   pipe->standalone_disabled_comps;
29102 -       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%x.\n",
29103 +       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%lx.\n",
29104                          pipe->id, old->active_comps, disabling_comps);
29106 -       dp_for_each_set_bit(id, disabling_comps) {
29107 +       for_each_set_bit(id, &disabling_comps, 32) {
29108                 c = komeda_pipeline_get_component(pipe, id);
29109                 c_st = priv_to_comp_st(c->obj.state);
29111 @@ -1330,16 +1332,17 @@ void komeda_pipeline_update(struct komeda_pipeline *pipe,
29112         struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state);
29113         struct komeda_pipeline_state *old;
29114         struct komeda_component *c;
29115 -       u32 id, changed_comps = 0;
29116 +       u32 id;
29117 +       unsigned long changed_comps;
29119         old = komeda_pipeline_get_old_state(pipe, old_state);
29121         changed_comps = new->active_comps | old->active_comps;
29123 -       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%x.\n",
29124 +       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%lx.\n",
29125                          pipe->id, new->active_comps, changed_comps);
29127 -       dp_for_each_set_bit(id, changed_comps) {
29128 +       for_each_set_bit(id, &changed_comps, 32) {
29129                 c = komeda_pipeline_get_component(pipe, id);
29131                 if (new->active_comps & BIT(c->id))
29132 diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
29133 index ea8164e7a6dc..01837bea18c2 100644
29134 --- a/drivers/gpu/drm/ast/ast_drv.c
29135 +++ b/drivers/gpu/drm/ast/ast_drv.c
29136 @@ -30,6 +30,7 @@
29137  #include <linux/module.h>
29138  #include <linux/pci.h>
29140 +#include <drm/drm_atomic_helper.h>
29141  #include <drm/drm_crtc_helper.h>
29142  #include <drm/drm_drv.h>
29143  #include <drm/drm_fb_helper.h>
29144 @@ -138,6 +139,7 @@ static void ast_pci_remove(struct pci_dev *pdev)
29145         struct drm_device *dev = pci_get_drvdata(pdev);
29147         drm_dev_unregister(dev);
29148 +       drm_atomic_helper_shutdown(dev);
29151  static int ast_drm_freeze(struct drm_device *dev)
29152 diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
29153 index 988b270fea5e..758c69aa7232 100644
29154 --- a/drivers/gpu/drm/ast/ast_mode.c
29155 +++ b/drivers/gpu/drm/ast/ast_mode.c
29156 @@ -688,7 +688,7 @@ ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
29157         unsigned int offset_x, offset_y;
29159         offset_x = AST_MAX_HWC_WIDTH - fb->width;
29160 -       offset_y = AST_MAX_HWC_WIDTH - fb->height;
29161 +       offset_y = AST_MAX_HWC_HEIGHT - fb->height;
29163         if (state->fb != old_state->fb) {
29164                 /* A new cursor image was installed. */
29165 diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
29166 index e4110d6ca7b3..bc60fc4728d7 100644
29167 --- a/drivers/gpu/drm/bridge/Kconfig
29168 +++ b/drivers/gpu/drm/bridge/Kconfig
29169 @@ -67,6 +67,7 @@ config DRM_LONTIUM_LT9611UXC
29170         depends on OF
29171         select DRM_PANEL_BRIDGE
29172         select DRM_KMS_HELPER
29173 +       select DRM_MIPI_DSI
29174         select REGMAP_I2C
29175         help
29176           Driver for Lontium LT9611UXC DSI to HDMI bridge
29177 @@ -151,6 +152,7 @@ config DRM_SII902X
29178         tristate "Silicon Image sii902x RGB/HDMI bridge"
29179         depends on OF
29180         select DRM_KMS_HELPER
29181 +       select DRM_MIPI_DSI
29182         select REGMAP_I2C
29183         select I2C_MUX
29184         select SND_SOC_HDMI_CODEC if SND_SOC
29185 @@ -200,6 +202,7 @@ config DRM_TOSHIBA_TC358767
29186         tristate "Toshiba TC358767 eDP bridge"
29187         depends on OF
29188         select DRM_KMS_HELPER
29189 +       select DRM_MIPI_DSI
29190         select REGMAP_I2C
29191         select DRM_PANEL
29192         help
29193 diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig
29194 index 024ea2a570e7..9160fd80dd70 100644
29195 --- a/drivers/gpu/drm/bridge/analogix/Kconfig
29196 +++ b/drivers/gpu/drm/bridge/analogix/Kconfig
29197 @@ -30,6 +30,7 @@ config DRM_ANALOGIX_ANX7625
29198         tristate "Analogix Anx7625 MIPI to DP interface support"
29199         depends on DRM
29200         depends on OF
29201 +       select DRM_MIPI_DSI
29202         help
29203           ANX7625 is an ultra-low power 4K mobile HD transmitter
29204           designed for portable devices. It converts MIPI/DPI to
29205 diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
29206 index 0ddc37551194..c916f4b8907e 100644
29207 --- a/drivers/gpu/drm/bridge/panel.c
29208 +++ b/drivers/gpu/drm/bridge/panel.c
29209 @@ -87,6 +87,18 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
29211  static void panel_bridge_detach(struct drm_bridge *bridge)
29213 +       struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
29214 +       struct drm_connector *connector = &panel_bridge->connector;
29216 +       /*
29217 +        * Cleanup the connector if we know it was initialized.
29218 +        *
29219 +        * FIXME: This wouldn't be needed if the panel_bridge structure was
29220 +        * allocated with drmm_kzalloc(). This might be tricky since the
29221 +        * drm_device pointer can only be retrieved when the bridge is attached.
29222 +        */
29223 +       if (connector->dev)
29224 +               drm_connector_cleanup(connector);
29227  static void panel_bridge_pre_enable(struct drm_bridge *bridge)
29228 diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
29229 index 309afe61afdd..9c75c8815056 100644
29230 --- a/drivers/gpu/drm/drm_dp_mst_topology.c
29231 +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
29232 @@ -1154,6 +1154,7 @@ static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
29234         req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
29235         drm_dp_encode_sideband_req(&req, msg);
29236 +       msg->path_msg = true;
29239  static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
29240 @@ -2824,15 +2825,21 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
29242         req_type = txmsg->msg[0] & 0x7f;
29243         if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
29244 -               req_type == DP_RESOURCE_STATUS_NOTIFY)
29245 +               req_type == DP_RESOURCE_STATUS_NOTIFY ||
29246 +               req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
29247                 hdr->broadcast = 1;
29248         else
29249                 hdr->broadcast = 0;
29250         hdr->path_msg = txmsg->path_msg;
29251 -       hdr->lct = mstb->lct;
29252 -       hdr->lcr = mstb->lct - 1;
29253 -       if (mstb->lct > 1)
29254 -               memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
29255 +       if (hdr->broadcast) {
29256 +               hdr->lct = 1;
29257 +               hdr->lcr = 6;
29258 +       } else {
29259 +               hdr->lct = mstb->lct;
29260 +               hdr->lcr = mstb->lct - 1;
29261 +       }
29263 +       memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
29265         return 0;
29267 diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
29268 index 58f5dc2f6dd5..f6bdec7fa925 100644
29269 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
29270 +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
29271 @@ -84,6 +84,13 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
29272         .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
29273  };
29275 +static const struct drm_dmi_panel_orientation_data onegx1_pro = {
29276 +       .width = 1200,
29277 +       .height = 1920,
29278 +       .bios_dates = (const char * const []){ "12/17/2020", NULL },
29279 +       .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
29282  static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
29283         .width = 720,
29284         .height = 1280,
29285 @@ -211,6 +218,13 @@ static const struct dmi_system_id orientation_data[] = {
29286                   DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
29287                 },
29288                 .driver_data = (void *)&lcd1200x1920_rightside_up,
29289 +       }, {    /* OneGX1 Pro */
29290 +               .matches = {
29291 +                 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"),
29292 +                 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SYSTEM_PRODUCT_NAME"),
29293 +                 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
29294 +               },
29295 +               .driver_data = (void *)&onegx1_pro,
29296         }, {    /* VIOS LTH17 */
29297                 .matches = {
29298                   DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
29299 diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
29300 index ad59a51eab6d..e7e1ee2aa352 100644
29301 --- a/drivers/gpu/drm/drm_probe_helper.c
29302 +++ b/drivers/gpu/drm/drm_probe_helper.c
29303 @@ -624,6 +624,7 @@ static void output_poll_execute(struct work_struct *work)
29304         struct drm_connector_list_iter conn_iter;
29305         enum drm_connector_status old_status;
29306         bool repoll = false, changed;
29307 +       u64 old_epoch_counter;
29309         if (!dev->mode_config.poll_enabled)
29310                 return;
29311 @@ -660,8 +661,9 @@ static void output_poll_execute(struct work_struct *work)
29313                 repoll = true;
29315 +               old_epoch_counter = connector->epoch_counter;
29316                 connector->status = drm_helper_probe_detect(connector, NULL, false);
29317 -               if (old_status != connector->status) {
29318 +               if (old_epoch_counter != connector->epoch_counter) {
29319                         const char *old, *new;
29321                         /*
29322 @@ -690,6 +692,9 @@ static void output_poll_execute(struct work_struct *work)
29323                                       connector->base.id,
29324                                       connector->name,
29325                                       old, new);
29326 +                       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] epoch counter %llu -> %llu\n",
29327 +                                     connector->base.id, connector->name,
29328 +                                     old_epoch_counter, connector->epoch_counter);
29330                         changed = true;
29331                 }
29332 diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
29333 index 775d89b6c3fc..5a5103632564 100644
29334 --- a/drivers/gpu/drm/i915/display/intel_dp.c
29335 +++ b/drivers/gpu/drm/i915/display/intel_dp.c
29336 @@ -1174,44 +1174,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
29337         return -EINVAL;
29340 -/* Optimize link config in order: max bpp, min lanes, min clock */
29341 -static int
29342 -intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
29343 -                                 struct intel_crtc_state *pipe_config,
29344 -                                 const struct link_config_limits *limits)
29346 -       const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
29347 -       int bpp, clock, lane_count;
29348 -       int mode_rate, link_clock, link_avail;
29350 -       for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
29351 -               int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
29353 -               mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
29354 -                                                  output_bpp);
29356 -               for (lane_count = limits->min_lane_count;
29357 -                    lane_count <= limits->max_lane_count;
29358 -                    lane_count <<= 1) {
29359 -                       for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
29360 -                               link_clock = intel_dp->common_rates[clock];
29361 -                               link_avail = intel_dp_max_data_rate(link_clock,
29362 -                                                                   lane_count);
29364 -                               if (mode_rate <= link_avail) {
29365 -                                       pipe_config->lane_count = lane_count;
29366 -                                       pipe_config->pipe_bpp = bpp;
29367 -                                       pipe_config->port_clock = link_clock;
29369 -                                       return 0;
29370 -                               }
29371 -                       }
29372 -               }
29373 -       }
29375 -       return -EINVAL;
29378  static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
29380         int i, num_bpc;
29381 @@ -1461,22 +1423,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
29382             intel_dp_can_bigjoiner(intel_dp))
29383                 pipe_config->bigjoiner = true;
29385 -       if (intel_dp_is_edp(intel_dp))
29386 -               /*
29387 -                * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
29388 -                * section A.1: "It is recommended that the minimum number of
29389 -                * lanes be used, using the minimum link rate allowed for that
29390 -                * lane configuration."
29391 -                *
29392 -                * Note that we fall back to the max clock and lane count for eDP
29393 -                * panels that fail with the fast optimal settings (see
29394 -                * intel_dp->use_max_params), in which case the fast vs. wide
29395 -                * choice doesn't matter.
29396 -                */
29397 -               ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits);
29398 -       else
29399 -               /* Optimize for slow and wide. */
29400 -               ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
29401 +       /*
29402 +        * Optimize for slow and wide for everything, because there are some
29403 +        * eDP 1.3 and 1.4 panels don't work well with fast and narrow.
29404 +        */
29405 +       ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
29407         /* enable compression if the mode doesn't fit available BW */
29408         drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
29409 diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
29410 index f455040fa989..7cbc81da80b7 100644
29411 --- a/drivers/gpu/drm/i915/display/intel_overlay.c
29412 +++ b/drivers/gpu/drm/i915/display/intel_overlay.c
29413 @@ -383,7 +383,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
29414                 i830_overlay_clock_gating(dev_priv, true);
29417 -static void
29418 +__i915_active_call static void
29419  intel_overlay_last_flip_retire(struct i915_active *active)
29421         struct intel_overlay *overlay =
29422 diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
29423 index ec28a6cde49b..0b2434e29d00 100644
29424 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
29425 +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
29426 @@ -189,7 +189,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
29427         struct i915_ggtt_view view;
29429         if (i915_gem_object_is_tiled(obj))
29430 -               chunk = roundup(chunk, tile_row_pages(obj));
29431 +               chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
29433         view.type = I915_GGTT_VIEW_PARTIAL;
29434         view.partial.offset = rounddown(page_offset, chunk);
29435 diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
29436 index 755522ced60d..3ae16945bd43 100644
29437 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
29438 +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
29439 @@ -630,7 +630,6 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
29441                 err = pin_pt_dma(vm, pde->pt.base);
29442                 if (err) {
29443 -                       i915_gem_object_put(pde->pt.base);
29444                         free_pd(vm, pde);
29445                         return err;
29446                 }
29447 diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
29448 index 67de2b189598..4b09490c20c0 100644
29449 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
29450 +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
29451 @@ -670,8 +670,8 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
29452                  * banks of memory are paired and unswizzled on the
29453                  * uneven portion, so leave that as unknown.
29454                  */
29455 -               if (intel_uncore_read(uncore, C0DRB3) ==
29456 -                   intel_uncore_read(uncore, C1DRB3)) {
29457 +               if (intel_uncore_read16(uncore, C0DRB3) ==
29458 +                   intel_uncore_read16(uncore, C1DRB3)) {
29459                         swizzle_x = I915_BIT_6_SWIZZLE_9_10;
29460                         swizzle_y = I915_BIT_6_SWIZZLE_9;
29461                 }
29462 diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
29463 index d1d8ee4a5f16..57578bf28d77 100644
29464 --- a/drivers/gpu/drm/i915/gvt/gvt.c
29465 +++ b/drivers/gpu/drm/i915/gvt/gvt.c
29466 @@ -126,7 +126,7 @@ static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups
29467         return true;
29470 -static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
29471 +static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
29473         int i, j;
29474         struct intel_vgpu_type *type;
29475 @@ -144,7 +144,7 @@ static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
29476                 gvt_vgpu_type_groups[i] = group;
29477         }
29479 -       return true;
29480 +       return 0;
29482  unwind:
29483         for (j = 0; j < i; j++) {
29484 @@ -152,7 +152,7 @@ static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
29485                 kfree(group);
29486         }
29488 -       return false;
29489 +       return -ENOMEM;
29492  static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
29493 @@ -360,7 +360,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
29494                 goto out_clean_thread;
29496         ret = intel_gvt_init_vgpu_type_groups(gvt);
29497 -       if (ret == false) {
29498 +       if (ret) {
29499                 gvt_err("failed to init vgpu type groups: %d\n", ret);
29500                 goto out_clean_types;
29501         }
29502 diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
29503 index 3bc616cc1ad2..ea660e541c90 100644
29504 --- a/drivers/gpu/drm/i915/i915_active.c
29505 +++ b/drivers/gpu/drm/i915/i915_active.c
29506 @@ -1156,7 +1156,8 @@ static int auto_active(struct i915_active *ref)
29507         return 0;
29510 -static void auto_retire(struct i915_active *ref)
29511 +__i915_active_call static void
29512 +auto_retire(struct i915_active *ref)
29514         i915_active_put(ref);
29516 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
29517 index 8e9cb44e66e5..4ecb813c9bc7 100644
29518 --- a/drivers/gpu/drm/i915/i915_drv.c
29519 +++ b/drivers/gpu/drm/i915/i915_drv.c
29520 @@ -1049,6 +1049,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
29521  void i915_driver_shutdown(struct drm_i915_private *i915)
29523         disable_rpm_wakeref_asserts(&i915->runtime_pm);
29524 +       intel_runtime_pm_disable(&i915->runtime_pm);
29525 +       intel_power_domains_disable(i915);
29527         i915_gem_suspend(i915);
29529 @@ -1064,7 +1066,15 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
29530         intel_suspend_encoders(i915);
29531         intel_shutdown_encoders(i915);
29533 +       /*
29534 +        * The only requirement is to reboot with display DC states disabled,
29535 +        * for now leaving all display power wells in the INIT power domain
29536 +        * enabled matching the driver reload sequence.
29537 +        */
29538 +       intel_power_domains_driver_remove(i915);
29539         enable_rpm_wakeref_asserts(&i915->runtime_pm);
29541 +       intel_runtime_pm_driver_release(&i915->runtime_pm);
29544  static bool suspend_to_idle(struct drm_i915_private *dev_priv)
29545 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
29546 index 4b4d8d034782..4ba20f959a71 100644
29547 --- a/drivers/gpu/drm/i915/intel_pm.c
29548 +++ b/drivers/gpu/drm/i915/intel_pm.c
29549 @@ -2993,7 +2993,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
29551  static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
29552                                    const char *name,
29553 -                                  const u16 wm[8])
29554 +                                  const u16 wm[])
29556         int level, max_level = ilk_wm_max_level(dev_priv);
29558 diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
29559 index 7bb31fbee29d..fd8870edde0e 100644
29560 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
29561 +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
29562 @@ -554,7 +554,7 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
29563                 height = state->src_h >> 16;
29564                 cpp = state->fb->format->cpp[0];
29566 -               if (priv->soc_info->has_osd && plane->type == DRM_PLANE_TYPE_OVERLAY)
29567 +               if (!priv->soc_info->has_osd || plane->type == DRM_PLANE_TYPE_OVERLAY)
29568                         hwdesc = &priv->dma_hwdescs->hwdesc_f0;
29569                 else
29570                         hwdesc = &priv->dma_hwdescs->hwdesc_f1;
29571 @@ -826,6 +826,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
29572         const struct jz_soc_info *soc_info;
29573         struct ingenic_drm *priv;
29574         struct clk *parent_clk;
29575 +       struct drm_plane *primary;
29576         struct drm_bridge *bridge;
29577         struct drm_panel *panel;
29578         struct drm_encoder *encoder;
29579 @@ -940,9 +941,11 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
29580         if (soc_info->has_osd)
29581                 priv->ipu_plane = drm_plane_from_index(drm, 0);
29583 -       drm_plane_helper_add(&priv->f1, &ingenic_drm_plane_helper_funcs);
29584 +       primary = priv->soc_info->has_osd ? &priv->f1 : &priv->f0;
29586 -       ret = drm_universal_plane_init(drm, &priv->f1, 1,
29587 +       drm_plane_helper_add(primary, &ingenic_drm_plane_helper_funcs);
29589 +       ret = drm_universal_plane_init(drm, primary, 1,
29590                                        &ingenic_drm_primary_plane_funcs,
29591                                        priv->soc_info->formats_f1,
29592                                        priv->soc_info->num_formats_f1,
29593 @@ -954,7 +957,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
29595         drm_crtc_helper_add(&priv->crtc, &ingenic_drm_crtc_helper_funcs);
29597 -       ret = drm_crtc_init_with_planes(drm, &priv->crtc, &priv->f1,
29598 +       ret = drm_crtc_init_with_planes(drm, &priv->crtc, primary,
29599                                         NULL, &ingenic_drm_crtc_funcs, NULL);
29600         if (ret) {
29601                 dev_err(dev, "Failed to init CRTC: %i\n", ret);
29602 diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
29603 index 2314c8122992..b3fd3501c412 100644
29604 --- a/drivers/gpu/drm/mcde/mcde_dsi.c
29605 +++ b/drivers/gpu/drm/mcde/mcde_dsi.c
29606 @@ -760,7 +760,7 @@ static void mcde_dsi_start(struct mcde_dsi *d)
29607                 DSI_MCTL_MAIN_DATA_CTL_BTA_EN |
29608                 DSI_MCTL_MAIN_DATA_CTL_READ_EN |
29609                 DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN;
29610 -       if (d->mdsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET)
29611 +       if (!(d->mdsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
29612                 val |= DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN;
29613         writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
29615 diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
29616 index 8ee55f9e2954..7fb358167f8d 100644
29617 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
29618 +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
29619 @@ -153,7 +153,7 @@ struct mtk_hdmi_conf {
29620  struct mtk_hdmi {
29621         struct drm_bridge bridge;
29622         struct drm_bridge *next_bridge;
29623 -       struct drm_connector conn;
29624 +       struct drm_connector *curr_conn;/* current connector (only valid when 'enabled') */
29625         struct device *dev;
29626         const struct mtk_hdmi_conf *conf;
29627         struct phy *phy;
29628 @@ -186,11 +186,6 @@ static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
29629         return container_of(b, struct mtk_hdmi, bridge);
29632 -static inline struct mtk_hdmi *hdmi_ctx_from_conn(struct drm_connector *c)
29634 -       return container_of(c, struct mtk_hdmi, conn);
29637  static u32 mtk_hdmi_read(struct mtk_hdmi *hdmi, u32 offset)
29639         return readl(hdmi->regs + offset);
29640 @@ -974,7 +969,7 @@ static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
29641         ssize_t err;
29643         err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
29644 -                                                      &hdmi->conn, mode);
29645 +                                                      hdmi->curr_conn, mode);
29646         if (err < 0) {
29647                 dev_err(hdmi->dev,
29648                         "Failed to get AVI infoframe from mode: %zd\n", err);
29649 @@ -1054,7 +1049,7 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
29650         ssize_t err;
29652         err = drm_hdmi_vendor_infoframe_from_display_mode(&frame,
29653 -                                                         &hdmi->conn, mode);
29654 +                                                         hdmi->curr_conn, mode);
29655         if (err) {
29656                 dev_err(hdmi->dev,
29657                         "Failed to get vendor infoframe from mode: %zd\n", err);
29658 @@ -1201,48 +1196,16 @@ mtk_hdmi_update_plugged_status(struct mtk_hdmi *hdmi)
29659                connector_status_connected : connector_status_disconnected;
29662 -static enum drm_connector_status hdmi_conn_detect(struct drm_connector *conn,
29663 -                                                 bool force)
29664 +static enum drm_connector_status mtk_hdmi_detect(struct mtk_hdmi *hdmi)
29666 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
29667         return mtk_hdmi_update_plugged_status(hdmi);
29670 -static void hdmi_conn_destroy(struct drm_connector *conn)
29672 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
29674 -       mtk_cec_set_hpd_event(hdmi->cec_dev, NULL, NULL);
29676 -       drm_connector_cleanup(conn);
29679 -static int mtk_hdmi_conn_get_modes(struct drm_connector *conn)
29681 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
29682 -       struct edid *edid;
29683 -       int ret;
29685 -       if (!hdmi->ddc_adpt)
29686 -               return -ENODEV;
29688 -       edid = drm_get_edid(conn, hdmi->ddc_adpt);
29689 -       if (!edid)
29690 -               return -ENODEV;
29692 -       hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
29694 -       drm_connector_update_edid_property(conn, edid);
29696 -       ret = drm_add_edid_modes(conn, edid);
29697 -       kfree(edid);
29698 -       return ret;
29701 -static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
29702 -                                   struct drm_display_mode *mode)
29703 +static int mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
29704 +                                     const struct drm_display_info *info,
29705 +                                     const struct drm_display_mode *mode)
29707 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
29708 +       struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
29709         struct drm_bridge *next_bridge;
29711         dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
29712 @@ -1267,74 +1230,57 @@ static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
29713         return drm_mode_validate_size(mode, 0x1fff, 0x1fff);
29716 -static struct drm_encoder *mtk_hdmi_conn_best_enc(struct drm_connector *conn)
29718 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
29720 -       return hdmi->bridge.encoder;
29723 -static const struct drm_connector_funcs mtk_hdmi_connector_funcs = {
29724 -       .detect = hdmi_conn_detect,
29725 -       .fill_modes = drm_helper_probe_single_connector_modes,
29726 -       .destroy = hdmi_conn_destroy,
29727 -       .reset = drm_atomic_helper_connector_reset,
29728 -       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
29729 -       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
29732 -static const struct drm_connector_helper_funcs
29733 -               mtk_hdmi_connector_helper_funcs = {
29734 -       .get_modes = mtk_hdmi_conn_get_modes,
29735 -       .mode_valid = mtk_hdmi_conn_mode_valid,
29736 -       .best_encoder = mtk_hdmi_conn_best_enc,
29739  static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
29741         struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
29743 -       if (hdmi && hdmi->bridge.encoder && hdmi->bridge.encoder->dev)
29744 +       if (hdmi && hdmi->bridge.encoder && hdmi->bridge.encoder->dev) {
29745 +               static enum drm_connector_status status;
29747 +               status = mtk_hdmi_detect(hdmi);
29748                 drm_helper_hpd_irq_event(hdmi->bridge.encoder->dev);
29749 +               drm_bridge_hpd_notify(&hdmi->bridge, status);
29750 +       }
29753  /*
29754   * Bridge callbacks
29755   */
29757 +static enum drm_connector_status mtk_hdmi_bridge_detect(struct drm_bridge *bridge)
29759 +       struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
29761 +       return mtk_hdmi_detect(hdmi);
29764 +static struct edid *mtk_hdmi_bridge_get_edid(struct drm_bridge *bridge,
29765 +                                            struct drm_connector *connector)
29767 +       struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
29768 +       struct edid *edid;
29770 +       if (!hdmi->ddc_adpt)
29771 +               return NULL;
29772 +       edid = drm_get_edid(connector, hdmi->ddc_adpt);
29773 +       if (!edid)
29774 +               return NULL;
29775 +       hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
29776 +       return edid;
29779  static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
29780                                   enum drm_bridge_attach_flags flags)
29782         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
29783         int ret;
29785 -       if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
29786 -               DRM_ERROR("Fix bridge driver to make connector optional!");
29787 +       if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
29788 +               DRM_ERROR("%s: The flag DRM_BRIDGE_ATTACH_NO_CONNECTOR must be supplied\n",
29789 +                         __func__);
29790                 return -EINVAL;
29791         }
29793 -       ret = drm_connector_init_with_ddc(bridge->encoder->dev, &hdmi->conn,
29794 -                                         &mtk_hdmi_connector_funcs,
29795 -                                         DRM_MODE_CONNECTOR_HDMIA,
29796 -                                         hdmi->ddc_adpt);
29797 -       if (ret) {
29798 -               dev_err(hdmi->dev, "Failed to initialize connector: %d\n", ret);
29799 -               return ret;
29800 -       }
29801 -       drm_connector_helper_add(&hdmi->conn, &mtk_hdmi_connector_helper_funcs);
29803 -       hdmi->conn.polled = DRM_CONNECTOR_POLL_HPD;
29804 -       hdmi->conn.interlace_allowed = true;
29805 -       hdmi->conn.doublescan_allowed = false;
29807 -       ret = drm_connector_attach_encoder(&hdmi->conn,
29808 -                                               bridge->encoder);
29809 -       if (ret) {
29810 -               dev_err(hdmi->dev,
29811 -                       "Failed to attach connector to encoder: %d\n", ret);
29812 -               return ret;
29813 -       }
29815         if (hdmi->next_bridge) {
29816                 ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
29817                                         bridge, flags);
29818 @@ -1357,7 +1303,8 @@ static bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
29819         return true;
29822 -static void mtk_hdmi_bridge_disable(struct drm_bridge *bridge)
29823 +static void mtk_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
29824 +                                          struct drm_bridge_state *old_bridge_state)
29826         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
29828 @@ -1368,10 +1315,13 @@ static void mtk_hdmi_bridge_disable(struct drm_bridge *bridge)
29829         clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
29830         clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
29832 +       hdmi->curr_conn = NULL;
29834         hdmi->enabled = false;
29837 -static void mtk_hdmi_bridge_post_disable(struct drm_bridge *bridge)
29838 +static void mtk_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
29839 +                                               struct drm_bridge_state *old_state)
29841         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
29843 @@ -1406,7 +1356,8 @@ static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
29844         drm_mode_copy(&hdmi->mode, adjusted_mode);
29847 -static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
29848 +static void mtk_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
29849 +                                             struct drm_bridge_state *old_state)
29851         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
29853 @@ -1426,10 +1377,16 @@ static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi,
29854                 mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
29857 -static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
29858 +static void mtk_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
29859 +                                         struct drm_bridge_state *old_state)
29861 +       struct drm_atomic_state *state = old_state->base.state;
29862         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
29864 +       /* Retrieve the connector through the atomic state. */
29865 +       hdmi->curr_conn = drm_atomic_get_new_connector_for_encoder(state,
29866 +                                                                  bridge->encoder);
29868         mtk_hdmi_output_set_display_mode(hdmi, &hdmi->mode);
29869         clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
29870         clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
29871 @@ -1440,13 +1397,19 @@ static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
29874  static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = {
29875 +       .mode_valid = mtk_hdmi_bridge_mode_valid,
29876 +       .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
29877 +       .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
29878 +       .atomic_reset = drm_atomic_helper_bridge_reset,
29879         .attach = mtk_hdmi_bridge_attach,
29880         .mode_fixup = mtk_hdmi_bridge_mode_fixup,
29881 -       .disable = mtk_hdmi_bridge_disable,
29882 -       .post_disable = mtk_hdmi_bridge_post_disable,
29883 +       .atomic_disable = mtk_hdmi_bridge_atomic_disable,
29884 +       .atomic_post_disable = mtk_hdmi_bridge_atomic_post_disable,
29885         .mode_set = mtk_hdmi_bridge_mode_set,
29886 -       .pre_enable = mtk_hdmi_bridge_pre_enable,
29887 -       .enable = mtk_hdmi_bridge_enable,
29888 +       .atomic_pre_enable = mtk_hdmi_bridge_atomic_pre_enable,
29889 +       .atomic_enable = mtk_hdmi_bridge_atomic_enable,
29890 +       .detect = mtk_hdmi_bridge_detect,
29891 +       .get_edid = mtk_hdmi_bridge_get_edid,
29892  };
29894  static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
29895 @@ -1662,8 +1625,10 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
29897         struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
29899 -       memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len));
29901 +       if (hdmi->enabled)
29902 +               memcpy(buf, hdmi->curr_conn->eld, min(sizeof(hdmi->curr_conn->eld), len));
29903 +       else
29904 +               memset(buf, 0, len);
29905         return 0;
29908 @@ -1755,6 +1720,9 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
29910         hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
29911         hdmi->bridge.of_node = pdev->dev.of_node;
29912 +       hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
29913 +                        | DRM_BRIDGE_OP_HPD;
29914 +       hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
29915         drm_bridge_add(&hdmi->bridge);
29917         ret = mtk_hdmi_clk_enable_audio(hdmi);
29918 diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
29919 index 91cf46f84025..3d55e153fa9c 100644
29920 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
29921 +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
29922 @@ -246,7 +246,7 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
29925  struct a6xx_gmu_oob_bits {
29926 -       int set, ack, set_new, ack_new;
29927 +       int set, ack, set_new, ack_new, clear, clear_new;
29928         const char *name;
29929  };
29931 @@ -260,6 +260,8 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
29932                 .ack = 24,
29933                 .set_new = 30,
29934                 .ack_new = 31,
29935 +               .clear = 24,
29936 +               .clear_new = 31,
29937         },
29939         [GMU_OOB_PERFCOUNTER_SET] = {
29940 @@ -268,18 +270,22 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
29941                 .ack = 25,
29942                 .set_new = 28,
29943                 .ack_new = 30,
29944 +               .clear = 25,
29945 +               .clear_new = 29,
29946         },
29948         [GMU_OOB_BOOT_SLUMBER] = {
29949                 .name = "BOOT_SLUMBER",
29950                 .set = 22,
29951                 .ack = 30,
29952 +               .clear = 30,
29953         },
29955         [GMU_OOB_DCVS_SET] = {
29956                 .name = "GPU_DCVS",
29957                 .set = 23,
29958                 .ack = 31,
29959 +               .clear = 31,
29960         },
29961  };
29963 @@ -335,9 +341,9 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
29964                 return;
29966         if (gmu->legacy)
29967 -               bit = a6xx_gmu_oob_bits[state].ack;
29968 +               bit = a6xx_gmu_oob_bits[state].clear;
29969         else
29970 -               bit = a6xx_gmu_oob_bits[state].ack_new;
29971 +               bit = a6xx_gmu_oob_bits[state].clear_new;
29973         gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
29975 diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
29976 index d553f62f4eeb..b4d8e1b01ee4 100644
29977 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
29978 +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
29979 @@ -1153,10 +1153,6 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
29981         struct device_node *phandle;
29983 -       a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
29984 -       if (IS_ERR(a6xx_gpu->llc_mmio))
29985 -               return;
29987         /*
29988          * There is a different programming path for targets with an mmu500
29989          * attached, so detect if that is the case
29990 @@ -1166,6 +1162,11 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
29991                 of_device_is_compatible(phandle, "arm,mmu-500"));
29992         of_node_put(phandle);
29994 +       if (a6xx_gpu->have_mmu500)
29995 +               a6xx_gpu->llc_mmio = NULL;
29996 +       else
29997 +               a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
29999         a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
30000         a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
30002 diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
30003 index 189f3533525c..e4444452759c 100644
30004 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
30005 +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
30006 @@ -22,7 +22,7 @@
30007         (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
30009  #define VIG_SM8250_MASK \
30010 -       (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3LITE))
30011 +       (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
30013  #define DMA_SDM845_MASK \
30014         (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
30015 diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
30016 index ff2c1d583c79..0392d4dfe270 100644
30017 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
30018 +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
30019 @@ -20,7 +20,7 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
30021         struct mdp5_kms *mdp5_kms = get_kms(encoder);
30022         struct device *dev = encoder->dev->dev;
30023 -       u32 total_lines_x100, vclks_line, cfg;
30024 +       u32 total_lines, vclks_line, cfg;
30025         long vsync_clk_speed;
30026         struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
30027         int pp_id = mixer->pp;
30028 @@ -30,8 +30,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
30029                 return -EINVAL;
30030         }
30032 -       total_lines_x100 = mode->vtotal * drm_mode_vrefresh(mode);
30033 -       if (!total_lines_x100) {
30034 +       total_lines = mode->vtotal * drm_mode_vrefresh(mode);
30035 +       if (!total_lines) {
30036                 DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
30037                               __func__, mode->vtotal, drm_mode_vrefresh(mode));
30038                 return -EINVAL;
30039 @@ -43,15 +43,23 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
30040                                                         vsync_clk_speed);
30041                 return -EINVAL;
30042         }
30043 -       vclks_line = vsync_clk_speed * 100 / total_lines_x100;
30044 +       vclks_line = vsync_clk_speed / total_lines;
30046         cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN
30047                 | MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
30048         cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
30050 +       /*
30051 +        * Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on
30052 +        * the vsync_clk equating to roughly half the desired panel refresh rate.
30053 +        * This is only necessary as stability fallback if interrupts from the
30054 +        * panel arrive too late or not at all, but is currently used by default
30055 +        * because these panel interrupts are not wired up yet.
30056 +        */
30057         mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
30058         mdp5_write(mdp5_kms,
30059 -               REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0);
30060 +               REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal));
30062         mdp5_write(mdp5_kms,
30063                 REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
30064         mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);
30065 diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
30066 index 82a8673ab8da..d7e4a39a904e 100644
30067 --- a/drivers/gpu/drm/msm/dp/dp_audio.c
30068 +++ b/drivers/gpu/drm/msm/dp/dp_audio.c
30069 @@ -527,6 +527,7 @@ int dp_audio_hw_params(struct device *dev,
30070         dp_audio_setup_acr(audio);
30071         dp_audio_safe_to_exit_level(audio);
30072         dp_audio_enable(audio, true);
30073 +       dp_display_signal_audio_start(dp_display);
30074         dp_display->audio_enabled = true;
30076  end:
30077 diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
30078 index 5a39da6e1eaf..1784e119269b 100644
30079 --- a/drivers/gpu/drm/msm/dp/dp_display.c
30080 +++ b/drivers/gpu/drm/msm/dp/dp_display.c
30081 @@ -178,6 +178,15 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
30082         return 0;
30085 +void dp_display_signal_audio_start(struct msm_dp *dp_display)
30087 +       struct dp_display_private *dp;
30089 +       dp = container_of(dp_display, struct dp_display_private, dp_display);
30091 +       reinit_completion(&dp->audio_comp);
30094  void dp_display_signal_audio_complete(struct msm_dp *dp_display)
30096         struct dp_display_private *dp;
30097 @@ -586,10 +595,8 @@ static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data)
30098         mutex_lock(&dp->event_mutex);
30100         state = dp->hpd_state;
30101 -       if (state == ST_CONNECT_PENDING) {
30102 -               dp_display_enable(dp, 0);
30103 +       if (state == ST_CONNECT_PENDING)
30104                 dp->hpd_state = ST_CONNECTED;
30105 -       }
30107         mutex_unlock(&dp->event_mutex);
30109 @@ -651,7 +658,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
30110         dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
30112         /* signal the disconnect event early to ensure proper teardown */
30113 -       reinit_completion(&dp->audio_comp);
30114         dp_display_handle_plugged_change(g_dp_display, false);
30116         dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
30117 @@ -669,10 +675,8 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data
30118         mutex_lock(&dp->event_mutex);
30120         state =  dp->hpd_state;
30121 -       if (state == ST_DISCONNECT_PENDING) {
30122 -               dp_display_disable(dp, 0);
30123 +       if (state == ST_DISCONNECT_PENDING)
30124                 dp->hpd_state = ST_DISCONNECTED;
30125 -       }
30127         mutex_unlock(&dp->event_mutex);
30129 @@ -898,7 +902,6 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
30130         /* wait only if audio was enabled */
30131         if (dp_display->audio_enabled) {
30132                 /* signal the disconnect event */
30133 -               reinit_completion(&dp->audio_comp);
30134                 dp_display_handle_plugged_change(dp_display, false);
30135                 if (!wait_for_completion_timeout(&dp->audio_comp,
30136                                 HZ * 5))
30137 @@ -1272,7 +1275,12 @@ static int dp_pm_resume(struct device *dev)
30139         status = dp_catalog_link_is_connected(dp->catalog);
30141 -       if (status)
30142 +       /*
30143 +        * can not declared display is connected unless
30144 +        * HDMI cable is plugged in and sink_count of
30145 +        * dongle become 1
30146 +        */
30147 +       if (status && dp->link->sink_count)
30148                 dp->dp_display.is_connected = true;
30149         else
30150                 dp->dp_display.is_connected = false;
30151 diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
30152 index 6092ba1ed85e..5173c89eedf7 100644
30153 --- a/drivers/gpu/drm/msm/dp/dp_display.h
30154 +++ b/drivers/gpu/drm/msm/dp/dp_display.h
30155 @@ -34,6 +34,7 @@ int dp_display_get_modes(struct msm_dp *dp_display,
30156  int dp_display_request_irq(struct msm_dp *dp_display);
30157  bool dp_display_check_video_test(struct msm_dp *dp_display);
30158  int dp_display_get_test_bpp(struct msm_dp *dp_display);
30159 +void dp_display_signal_audio_start(struct msm_dp *dp_display);
30160  void dp_display_signal_audio_complete(struct msm_dp *dp_display);
30162  #endif /* _DP_DISPLAY_H_ */
30163 diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
30164 index 5b8fe32022b5..e1c90fa47411 100644
30165 --- a/drivers/gpu/drm/msm/dp/dp_hpd.c
30166 +++ b/drivers/gpu/drm/msm/dp/dp_hpd.c
30167 @@ -34,8 +34,8 @@ int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
30169         dp_usbpd->hpd_high = hpd;
30171 -       if (!hpd_priv->dp_cb && !hpd_priv->dp_cb->configure
30172 -                               && !hpd_priv->dp_cb->disconnect) {
30173 +       if (!hpd_priv->dp_cb || !hpd_priv->dp_cb->configure
30174 +                               || !hpd_priv->dp_cb->disconnect) {
30175                 pr_err("hpd dp_cb not initialized\n");
30176                 return -EINVAL;
30177         }
30178 diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
30179 index 85ad0babc326..d611cc8e54a4 100644
30180 --- a/drivers/gpu/drm/msm/msm_debugfs.c
30181 +++ b/drivers/gpu/drm/msm/msm_debugfs.c
30182 @@ -111,23 +111,15 @@ static const struct file_operations msm_gpu_fops = {
30183  static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
30185         struct msm_drm_private *priv = dev->dev_private;
30186 -       struct msm_gpu *gpu = priv->gpu;
30187         int ret;
30189 -       ret = mutex_lock_interruptible(&priv->mm_lock);
30190 +       ret = mutex_lock_interruptible(&priv->obj_lock);
30191         if (ret)
30192                 return ret;
30194 -       if (gpu) {
30195 -               seq_printf(m, "Active Objects (%s):\n", gpu->name);
30196 -               msm_gem_describe_objects(&gpu->active_list, m);
30197 -       }
30199 -       seq_printf(m, "Inactive Objects:\n");
30200 -       msm_gem_describe_objects(&priv->inactive_dontneed, m);
30201 -       msm_gem_describe_objects(&priv->inactive_willneed, m);
30202 +       msm_gem_describe_objects(&priv->objects, m);
30204 -       mutex_unlock(&priv->mm_lock);
30205 +       mutex_unlock(&priv->obj_lock);
30207         return 0;
30209 diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
30210 index 196907689c82..18ea1c66de71 100644
30211 --- a/drivers/gpu/drm/msm/msm_drv.c
30212 +++ b/drivers/gpu/drm/msm/msm_drv.c
30213 @@ -446,6 +446,9 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
30215         priv->wq = alloc_ordered_workqueue("msm", 0);
30217 +       INIT_LIST_HEAD(&priv->objects);
30218 +       mutex_init(&priv->obj_lock);
30220         INIT_LIST_HEAD(&priv->inactive_willneed);
30221         INIT_LIST_HEAD(&priv->inactive_dontneed);
30222         mutex_init(&priv->mm_lock);
30223 diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
30224 index 591c47a654e8..6b58e49754cb 100644
30225 --- a/drivers/gpu/drm/msm/msm_drv.h
30226 +++ b/drivers/gpu/drm/msm/msm_drv.h
30227 @@ -174,7 +174,14 @@ struct msm_drm_private {
30228         struct msm_rd_state *hangrd;   /* debugfs to dump hanging submits */
30229         struct msm_perf_state *perf;
30231 -       /*
30232 +       /**
30233 +        * List of all GEM objects (mainly for debugfs, protected by obj_lock
30234 +        * (acquire before per GEM object lock)
30235 +        */
30236 +       struct list_head objects;
30237 +       struct mutex obj_lock;
30239 +       /**
30240          * Lists of inactive GEM objects.  Every bo is either in one of the
30241          * inactive lists (depending on whether or not it is shrinkable) or
30242          * gpu->active_list (for the gpu it is active on[1])
30243 diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
30244 index f091c1e164fa..aeba3eb8ce46 100644
30245 --- a/drivers/gpu/drm/msm/msm_gem.c
30246 +++ b/drivers/gpu/drm/msm/msm_gem.c
30247 @@ -951,7 +951,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
30248         size_t size = 0;
30250         seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
30251 -       list_for_each_entry(msm_obj, list, mm_list) {
30252 +       list_for_each_entry(msm_obj, list, node) {
30253                 struct drm_gem_object *obj = &msm_obj->base;
30254                 seq_puts(m, "   ");
30255                 msm_gem_describe(obj, m);
30256 @@ -970,6 +970,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
30257         struct drm_device *dev = obj->dev;
30258         struct msm_drm_private *priv = dev->dev_private;
30260 +       mutex_lock(&priv->obj_lock);
30261 +       list_del(&msm_obj->node);
30262 +       mutex_unlock(&priv->obj_lock);
30264         mutex_lock(&priv->mm_lock);
30265         list_del(&msm_obj->mm_list);
30266         mutex_unlock(&priv->mm_lock);
30267 @@ -1157,6 +1161,10 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
30268         list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
30269         mutex_unlock(&priv->mm_lock);
30271 +       mutex_lock(&priv->obj_lock);
30272 +       list_add_tail(&msm_obj->node, &priv->objects);
30273 +       mutex_unlock(&priv->obj_lock);
30275         return obj;
30277  fail:
30278 @@ -1227,6 +1235,10 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
30279         list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
30280         mutex_unlock(&priv->mm_lock);
30282 +       mutex_lock(&priv->obj_lock);
30283 +       list_add_tail(&msm_obj->node, &priv->objects);
30284 +       mutex_unlock(&priv->obj_lock);
30286         return obj;
30288  fail:
30289 diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
30290 index b3a0a880cbab..99d4c0e9465e 100644
30291 --- a/drivers/gpu/drm/msm/msm_gem.h
30292 +++ b/drivers/gpu/drm/msm/msm_gem.h
30293 @@ -55,8 +55,16 @@ struct msm_gem_object {
30294          */
30295         uint8_t vmap_count;
30297 -       /* And object is either:
30298 -        *  inactive - on priv->inactive_list
30299 +       /**
30300 +        * Node in list of all objects (mainly for debugfs, protected by
30301 +        * priv->obj_lock
30302 +        */
30303 +       struct list_head node;
30305 +       /**
30306 +        * An object is either:
30307 +        *  inactive - on priv->inactive_dontneed or priv->inactive_willneed
30308 +        *     (depending on purgability status)
30309          *  active   - on one one of the gpu's active_list..  well, at
30310          *     least for now we don't have (I don't think) hw sync between
30311          *     2d and 3d one devices which have both, meaning we need to
30312 diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
30313 index b31d750c425a..5f1722b040f4 100644
30314 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c
30315 +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
30316 @@ -4327,7 +4327,8 @@ static int omap_dsi_register_te_irq(struct dsi_data *dsi,
30317         irq_set_status_flags(te_irq, IRQ_NOAUTOEN);
30319         err = request_threaded_irq(te_irq, NULL, omap_dsi_te_irq_handler,
30320 -                                  IRQF_TRIGGER_RISING, "TE", dsi);
30321 +                                  IRQF_TRIGGER_RISING | IRQF_ONESHOT,
30322 +                                  "TE", dsi);
30323         if (err) {
30324                 dev_err(dsi->dev, "request irq failed with %d\n", err);
30325                 gpiod_put(dsi->te_gpio);
30326 diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
30327 index b9a0e56f33e2..ef70140c5b09 100644
30328 --- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
30329 +++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
30330 @@ -898,8 +898,7 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
30331          */
30332         dsi->hs_rate = 349440000;
30333         dsi->lp_rate = 9600000;
30334 -       dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
30335 -               MIPI_DSI_MODE_EOT_PACKET;
30336 +       dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
30338         /*
30339          * Every new incarnation of this display must have a unique
30340 diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
30341 index 4aac0d1573dd..70560cac53a9 100644
30342 --- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
30343 +++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
30344 @@ -184,9 +184,7 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
30345          * As we only send commands we do not need to be continuously
30346          * clocked.
30347          */
30348 -       dsi->mode_flags =
30349 -               MIPI_DSI_CLOCK_NON_CONTINUOUS |
30350 -               MIPI_DSI_MODE_EOT_PACKET;
30351 +       dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
30353         s6->supply = devm_regulator_get(dev, "vdd1");
30354         if (IS_ERR(s6->supply))
30355 diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
30356 index eec74c10ddda..9c3563c61e8c 100644
30357 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
30358 +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
30359 @@ -97,7 +97,6 @@ static int s6e63m0_dsi_probe(struct mipi_dsi_device *dsi)
30360         dsi->hs_rate = 349440000;
30361         dsi->lp_rate = 9600000;
30362         dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
30363 -               MIPI_DSI_MODE_EOT_PACKET |
30364                 MIPI_DSI_MODE_VIDEO_BURST;
30366         ret = s6e63m0_probe(dev, s6e63m0_dsi_dcs_read, s6e63m0_dsi_dcs_write,
30367 diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
30368 index 4e2dad314c79..e8b1a0e873ea 100644
30369 --- a/drivers/gpu/drm/panel/panel-simple.c
30370 +++ b/drivers/gpu/drm/panel/panel-simple.c
30371 @@ -406,7 +406,7 @@ static int panel_simple_prepare(struct drm_panel *panel)
30372                 if (IS_ERR(p->hpd_gpio)) {
30373                         err = panel_simple_get_hpd_gpio(panel->dev, p, false);
30374                         if (err)
30375 -                               return err;
30376 +                               goto error;
30377                 }
30379                 err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
30380 @@ -418,13 +418,20 @@ static int panel_simple_prepare(struct drm_panel *panel)
30381                 if (err) {
30382                         dev_err(panel->dev,
30383                                 "error waiting for hpd GPIO: %d\n", err);
30384 -                       return err;
30385 +                       goto error;
30386                 }
30387         }
30389         p->prepared_time = ktime_get();
30391         return 0;
30393 +error:
30394 +       gpiod_set_value_cansleep(p->enable_gpio, 0);
30395 +       regulator_disable(p->supply);
30396 +       p->unprepared_time = ktime_get();
30398 +       return err;
30401  static int panel_simple_enable(struct drm_panel *panel)
30402 diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
30403 index 065efae213f5..95659a4d15e9 100644
30404 --- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c
30405 +++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
30406 @@ -449,8 +449,7 @@ static int acx424akp_probe(struct mipi_dsi_device *dsi)
30407                         MIPI_DSI_MODE_VIDEO_BURST;
30408         else
30409                 dsi->mode_flags =
30410 -                       MIPI_DSI_CLOCK_NON_CONTINUOUS |
30411 -                       MIPI_DSI_MODE_EOT_PACKET;
30412 +                       MIPI_DSI_CLOCK_NON_CONTINUOUS;
30414         acx->supply = devm_regulator_get(dev, "vddi");
30415         if (IS_ERR(acx->supply))
30416 diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
30417 index 7c1b3481b785..21e552d1ac71 100644
30418 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
30419 +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
30420 @@ -488,8 +488,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
30421                 }
30422                 bo->base.pages = pages;
30423                 bo->base.pages_use_count = 1;
30424 -       } else
30425 +       } else {
30426                 pages = bo->base.pages;
30427 +               if (pages[page_offset]) {
30428 +                       /* Pages are already mapped, bail out. */
30429 +                       mutex_unlock(&bo->base.pages_lock);
30430 +                       goto out;
30431 +               }
30432 +       }
30434         mapping = bo->base.base.filp->f_mapping;
30435         mapping_set_unevictable(mapping);
30436 @@ -522,6 +528,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
30438         dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
30440 +out:
30441         panfrost_gem_mapping_put(bomapping);
30443         return 0;
30444 @@ -593,6 +600,8 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
30445                 access_type = (fault_status >> 8) & 0x3;
30446                 source_id = (fault_status >> 16);
30448 +               mmu_write(pfdev, MMU_INT_CLEAR, mask);
30450                 /* Page fault only */
30451                 ret = -1;
30452                 if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
30453 @@ -616,8 +625,6 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
30454                                 access_type, access_type_name(pfdev, fault_status),
30455                                 source_id);
30457 -               mmu_write(pfdev, MMU_INT_CLEAR, mask);
30459                 status &= ~mask;
30460         }
30462 diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
30463 index 54e3c3a97440..741cc983daf1 100644
30464 --- a/drivers/gpu/drm/qxl/qxl_cmd.c
30465 +++ b/drivers/gpu/drm/qxl/qxl_cmd.c
30466 @@ -268,7 +268,7 @@ int qxl_alloc_bo_reserved(struct qxl_device *qdev,
30467         int ret;
30469         ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
30470 -                           false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
30471 +                           false, QXL_GEM_DOMAIN_VRAM, 0, NULL, &bo);
30472         if (ret) {
30473                 DRM_ERROR("failed to allocate VRAM BO\n");
30474                 return ret;
30475 diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
30476 index 10738e04c09b..3f432ec8e771 100644
30477 --- a/drivers/gpu/drm/qxl/qxl_display.c
30478 +++ b/drivers/gpu/drm/qxl/qxl_display.c
30479 @@ -798,8 +798,8 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
30480                                 qdev->dumb_shadow_bo = NULL;
30481                         }
30482                         qxl_bo_create(qdev, surf.height * surf.stride,
30483 -                                     true, true, QXL_GEM_DOMAIN_SURFACE, &surf,
30484 -                                     &qdev->dumb_shadow_bo);
30485 +                                     true, true, QXL_GEM_DOMAIN_SURFACE, 0,
30486 +                                     &surf, &qdev->dumb_shadow_bo);
30487                 }
30488                 if (user_bo->shadow != qdev->dumb_shadow_bo) {
30489                         if (user_bo->shadow) {
30490 @@ -1228,6 +1228,10 @@ int qxl_modeset_init(struct qxl_device *qdev)
30492  void qxl_modeset_fini(struct qxl_device *qdev)
30494 +       if (qdev->dumb_shadow_bo) {
30495 +               drm_gem_object_put(&qdev->dumb_shadow_bo->tbo.base);
30496 +               qdev->dumb_shadow_bo = NULL;
30497 +       }
30498         qxl_destroy_monitors_object(qdev);
30499         drm_mode_config_cleanup(&qdev->ddev);
30501 diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
30502 index 48e096285b4c..a08da0bd9098 100644
30503 --- a/drivers/gpu/drm/qxl/qxl_gem.c
30504 +++ b/drivers/gpu/drm/qxl/qxl_gem.c
30505 @@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
30506         /* At least align on page size */
30507         if (alignment < PAGE_SIZE)
30508                 alignment = PAGE_SIZE;
30509 -       r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
30510 +       r = qxl_bo_create(qdev, size, kernel, false, initial_domain, 0, surf, &qbo);
30511         if (r) {
30512                 if (r != -ERESTARTSYS)
30513                         DRM_ERROR(
30514 diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
30515 index ceebc5881f68..a5806667697a 100644
30516 --- a/drivers/gpu/drm/qxl/qxl_object.c
30517 +++ b/drivers/gpu/drm/qxl/qxl_object.c
30518 @@ -103,8 +103,8 @@ static const struct drm_gem_object_funcs qxl_object_funcs = {
30519         .print_info = drm_gem_ttm_print_info,
30520  };
30522 -int qxl_bo_create(struct qxl_device *qdev,
30523 -                 unsigned long size, bool kernel, bool pinned, u32 domain,
30524 +int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
30525 +                 bool kernel, bool pinned, u32 domain, u32 priority,
30526                   struct qxl_surface *surf,
30527                   struct qxl_bo **bo_ptr)
30529 @@ -137,6 +137,7 @@ int qxl_bo_create(struct qxl_device *qdev,
30531         qxl_ttm_placement_from_domain(bo, domain);
30533 +       bo->tbo.priority = priority;
30534         r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
30535                                  &bo->placement, 0, &ctx, size,
30536                                  NULL, NULL, &qxl_ttm_bo_destroy);
30537 diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
30538 index e60a8f88e226..dc1659e717f1 100644
30539 --- a/drivers/gpu/drm/qxl/qxl_object.h
30540 +++ b/drivers/gpu/drm/qxl/qxl_object.h
30541 @@ -61,6 +61,7 @@ static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
30542  extern int qxl_bo_create(struct qxl_device *qdev,
30543                          unsigned long size,
30544                          bool kernel, bool pinned, u32 domain,
30545 +                        u32 priority,
30546                          struct qxl_surface *surf,
30547                          struct qxl_bo **bo_ptr);
30548  extern int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map);
30549 diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
30550 index b372455e2729..801ce77b1dac 100644
30551 --- a/drivers/gpu/drm/qxl/qxl_release.c
30552 +++ b/drivers/gpu/drm/qxl/qxl_release.c
30553 @@ -199,11 +199,12 @@ qxl_release_free(struct qxl_device *qdev,
30556  static int qxl_release_bo_alloc(struct qxl_device *qdev,
30557 -                               struct qxl_bo **bo)
30558 +                               struct qxl_bo **bo,
30559 +                               u32 priority)
30561         /* pin releases bo's they are too messy to evict */
30562         return qxl_bo_create(qdev, PAGE_SIZE, false, true,
30563 -                            QXL_GEM_DOMAIN_VRAM, NULL, bo);
30564 +                            QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
30567  int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
30568 @@ -326,13 +327,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
30569         int ret = 0;
30570         union qxl_release_info *info;
30571         int cur_idx;
30572 +       u32 priority;
30574 -       if (type == QXL_RELEASE_DRAWABLE)
30575 +       if (type == QXL_RELEASE_DRAWABLE) {
30576                 cur_idx = 0;
30577 -       else if (type == QXL_RELEASE_SURFACE_CMD)
30578 +               priority = 0;
30579 +       } else if (type == QXL_RELEASE_SURFACE_CMD) {
30580                 cur_idx = 1;
30581 -       else if (type == QXL_RELEASE_CURSOR_CMD)
30582 +               priority = 1;
30583 +       } else if (type == QXL_RELEASE_CURSOR_CMD) {
30584                 cur_idx = 2;
30585 +               priority = 1;
30586 +       }
30587         else {
30588                 DRM_ERROR("got illegal type: %d\n", type);
30589                 return -EINVAL;
30590 @@ -352,7 +358,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
30591                 qdev->current_release_bo[cur_idx] = NULL;
30592         }
30593         if (!qdev->current_release_bo[cur_idx]) {
30594 -               ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
30595 +               ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
30596                 if (ret) {
30597                         mutex_unlock(&qdev->release_mutex);
30598                         if (free_bo) {
30599 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
30600 index 3effc8c71494..ea44423376c4 100644
30601 --- a/drivers/gpu/drm/radeon/radeon.h
30602 +++ b/drivers/gpu/drm/radeon/radeon.h
30603 @@ -1558,6 +1558,7 @@ struct radeon_dpm {
30604         void                    *priv;
30605         u32                     new_active_crtcs;
30606         int                     new_active_crtc_count;
30607 +       int                     high_pixelclock_count;
30608         u32                     current_active_crtcs;
30609         int                     current_active_crtc_count;
30610         bool single_display;
30611 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
30612 index 42301b4e56f5..28c4413f4dc8 100644
30613 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
30614 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
30615 @@ -2120,11 +2120,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
30616                 return state_index;
30617         /* last mode is usually default, array is low to high */
30618         for (i = 0; i < num_modes; i++) {
30619 -               rdev->pm.power_state[state_index].clock_info =
30620 -                       kcalloc(1, sizeof(struct radeon_pm_clock_info),
30621 -                               GFP_KERNEL);
30622 +               /* avoid memory leaks from invalid modes or unknown frev. */
30623 +               if (!rdev->pm.power_state[state_index].clock_info) {
30624 +                       rdev->pm.power_state[state_index].clock_info =
30625 +                               kzalloc(sizeof(struct radeon_pm_clock_info),
30626 +                                       GFP_KERNEL);
30627 +               }
30628                 if (!rdev->pm.power_state[state_index].clock_info)
30629 -                       return state_index;
30630 +                       goto out;
30631                 rdev->pm.power_state[state_index].num_clock_modes = 1;
30632                 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
30633                 switch (frev) {
30634 @@ -2243,17 +2246,24 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
30635                         break;
30636                 }
30637         }
30638 +out:
30639 +       /* free any unused clock_info allocation. */
30640 +       if (state_index && state_index < num_modes) {
30641 +               kfree(rdev->pm.power_state[state_index].clock_info);
30642 +               rdev->pm.power_state[state_index].clock_info = NULL;
30643 +       }
30645         /* last mode is usually default */
30646 -       if (rdev->pm.default_power_state_index == -1) {
30647 +       if (state_index && rdev->pm.default_power_state_index == -1) {
30648                 rdev->pm.power_state[state_index - 1].type =
30649                         POWER_STATE_TYPE_DEFAULT;
30650                 rdev->pm.default_power_state_index = state_index - 1;
30651                 rdev->pm.power_state[state_index - 1].default_clock_mode =
30652                         &rdev->pm.power_state[state_index - 1].clock_info[0];
30653 -               rdev->pm.power_state[state_index].flags &=
30654 +               rdev->pm.power_state[state_index - 1].flags &=
30655                         ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
30656 -               rdev->pm.power_state[state_index].misc = 0;
30657 -               rdev->pm.power_state[state_index].misc2 = 0;
30658 +               rdev->pm.power_state[state_index - 1].misc = 0;
30659 +               rdev->pm.power_state[state_index - 1].misc2 = 0;
30660         }
30661         return state_index;
30663 diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
30664 index 2c32186c4acd..4e4c937c36c6 100644
30665 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
30666 +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
30667 @@ -242,6 +242,9 @@ radeon_dp_mst_detect(struct drm_connector *connector,
30668                 to_radeon_connector(connector);
30669         struct radeon_connector *master = radeon_connector->mst_port;
30671 +       if (drm_connector_is_unregistered(connector))
30672 +               return connector_status_disconnected;
30674         return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
30675                                       radeon_connector->port);
30677 diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
30678 index 2479d6ab7a36..58876bb4ef2a 100644
30679 --- a/drivers/gpu/drm/radeon/radeon_kms.c
30680 +++ b/drivers/gpu/drm/radeon/radeon_kms.c
30681 @@ -518,6 +518,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
30682                         *value = rdev->config.si.backend_enable_mask;
30683                 } else {
30684                         DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
30685 +                       return -EINVAL;
30686                 }
30687                 break;
30688         case RADEON_INFO_MAX_SCLK:
30689 diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
30690 index 9b81786782de..499ce55e34cc 100644
30691 --- a/drivers/gpu/drm/radeon/radeon_object.c
30692 +++ b/drivers/gpu/drm/radeon/radeon_object.c
30693 @@ -384,6 +384,8 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
30694         }
30695  #endif
30696         man = ttm_manager_type(bdev, TTM_PL_VRAM);
30697 +       if (!man)
30698 +               return 0;
30699         return ttm_resource_manager_evict_all(bdev, man);
30702 diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
30703 index 1995dad59dd0..2db4a8b1542d 100644
30704 --- a/drivers/gpu/drm/radeon/radeon_pm.c
30705 +++ b/drivers/gpu/drm/radeon/radeon_pm.c
30706 @@ -1775,6 +1775,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
30707         struct drm_device *ddev = rdev->ddev;
30708         struct drm_crtc *crtc;
30709         struct radeon_crtc *radeon_crtc;
30710 +       struct radeon_connector *radeon_connector;
30712         if (!rdev->pm.dpm_enabled)
30713                 return;
30714 @@ -1784,6 +1785,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
30715         /* update active crtc counts */
30716         rdev->pm.dpm.new_active_crtcs = 0;
30717         rdev->pm.dpm.new_active_crtc_count = 0;
30718 +       rdev->pm.dpm.high_pixelclock_count = 0;
30719         if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
30720                 list_for_each_entry(crtc,
30721                                     &ddev->mode_config.crtc_list, head) {
30722 @@ -1791,6 +1793,12 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
30723                         if (crtc->enabled) {
30724                                 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
30725                                 rdev->pm.dpm.new_active_crtc_count++;
30726 +                               if (!radeon_crtc->connector)
30727 +                                       continue;
30729 +                               radeon_connector = to_radeon_connector(radeon_crtc->connector);
30730 +                               if (radeon_connector->pixelclock_for_modeset > 297000)
30731 +                                       rdev->pm.dpm.high_pixelclock_count++;
30732                         }
30733                 }
30734         }
30735 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
30736 index 78893bea85ae..c0258d213a72 100644
30737 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
30738 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
30739 @@ -485,13 +485,14 @@ static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt
30740         struct radeon_ttm_tt *gtt = (void *)ttm;
30741         struct radeon_device *rdev = radeon_get_rdev(bdev);
30743 +       if (gtt->userptr)
30744 +               radeon_ttm_tt_unpin_userptr(bdev, ttm);
30746         if (!gtt->bound)
30747                 return;
30749         radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages);
30751 -       if (gtt->userptr)
30752 -               radeon_ttm_tt_unpin_userptr(bdev, ttm);
30753         gtt->bound = false;
30756 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
30757 index 91bfc4762767..43b63705d073 100644
30758 --- a/drivers/gpu/drm/radeon/si_dpm.c
30759 +++ b/drivers/gpu/drm/radeon/si_dpm.c
30760 @@ -2979,6 +2979,9 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
30761                     (rdev->pdev->device == 0x6605)) {
30762                         max_sclk = 75000;
30763                 }
30765 +               if (rdev->pm.dpm.high_pixelclock_count > 1)
30766 +                       disable_sclk_switching = true;
30767         }
30769         if (rps->vce_active) {
30770 diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
30771 index 7812094f93d6..6f3b523e16e8 100644
30772 --- a/drivers/gpu/drm/stm/ltdc.c
30773 +++ b/drivers/gpu/drm/stm/ltdc.c
30774 @@ -525,13 +525,42 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
30776         struct ltdc_device *ldev = crtc_to_ltdc(crtc);
30777         struct drm_device *ddev = crtc->dev;
30778 +       struct drm_connector_list_iter iter;
30779 +       struct drm_connector *connector = NULL;
30780 +       struct drm_encoder *encoder = NULL;
30781 +       struct drm_bridge *bridge = NULL;
30782         struct drm_display_mode *mode = &crtc->state->adjusted_mode;
30783         struct videomode vm;
30784         u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
30785         u32 total_width, total_height;
30786 +       u32 bus_flags = 0;
30787         u32 val;
30788         int ret;
30790 +       /* get encoder from crtc */
30791 +       drm_for_each_encoder(encoder, ddev)
30792 +               if (encoder->crtc == crtc)
30793 +                       break;
30795 +       if (encoder) {
30796 +               /* get bridge from encoder */
30797 +               list_for_each_entry(bridge, &encoder->bridge_chain, chain_node)
30798 +                       if (bridge->encoder == encoder)
30799 +                               break;
30801 +               /* Get the connector from encoder */
30802 +               drm_connector_list_iter_begin(ddev, &iter);
30803 +               drm_for_each_connector_iter(connector, &iter)
30804 +                       if (connector->encoder == encoder)
30805 +                               break;
30806 +               drm_connector_list_iter_end(&iter);
30807 +       }
30809 +       if (bridge && bridge->timings)
30810 +               bus_flags = bridge->timings->input_bus_flags;
30811 +       else if (connector)
30812 +               bus_flags = connector->display_info.bus_flags;
30814         if (!pm_runtime_active(ddev->dev)) {
30815                 ret = pm_runtime_get_sync(ddev->dev);
30816                 if (ret) {
30817 @@ -567,10 +596,10 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
30818         if (vm.flags & DISPLAY_FLAGS_VSYNC_HIGH)
30819                 val |= GCR_VSPOL;
30821 -       if (vm.flags & DISPLAY_FLAGS_DE_LOW)
30822 +       if (bus_flags & DRM_BUS_FLAG_DE_LOW)
30823                 val |= GCR_DEPOL;
30825 -       if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
30826 +       if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
30827                 val |= GCR_PCPOL;
30829         reg_update_bits(ldev->regs, LTDC_GCR,
30830 diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
30831 index 30213708fc99..d99afd19ca08 100644
30832 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
30833 +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
30834 @@ -515,6 +515,15 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
30836         drm_crtc_vblank_off(crtc);
30838 +       spin_lock_irq(&crtc->dev->event_lock);
30840 +       if (crtc->state->event) {
30841 +               drm_crtc_send_vblank_event(crtc, crtc->state->event);
30842 +               crtc->state->event = NULL;
30843 +       }
30845 +       spin_unlock_irq(&crtc->dev->event_lock);
30847         tilcdc_crtc_disable_irqs(dev);
30849         pm_runtime_put_sync(dev->dev);
30850 diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
30851 index 23eb6d772e40..669f2ee39515 100644
30852 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
30853 +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
30854 @@ -174,7 +174,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
30855                 if (!sync_file) {
30856                         dma_fence_put(&out_fence->f);
30857                         ret = -ENOMEM;
30858 -                       goto out_memdup;
30859 +                       goto out_unresv;
30860                 }
30862                 exbuf->fence_fd = out_fence_fd;
30863 diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
30864 index d69a5b6da553..4ff1ec28e630 100644
30865 --- a/drivers/gpu/drm/virtio/virtgpu_object.c
30866 +++ b/drivers/gpu/drm/virtio/virtgpu_object.c
30867 @@ -248,6 +248,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
30869         ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
30870         if (ret != 0) {
30871 +               virtio_gpu_array_put_free(objs);
30872                 virtio_gpu_free_object(&shmem_obj->base);
30873                 return ret;
30874         }
30875 diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
30876 index 0443b7deeaef..758d8a98d96b 100644
30877 --- a/drivers/gpu/drm/vkms/vkms_crtc.c
30878 +++ b/drivers/gpu/drm/vkms/vkms_crtc.c
30879 @@ -18,7 +18,8 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
30881         ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
30882                                           output->period_ns);
30883 -       WARN_ON(ret_overrun != 1);
30884 +       if (ret_overrun != 1)
30885 +               pr_warn("%s: vblank timer overrun\n", __func__);
30887         spin_lock(&output->lock);
30888         ret = drm_crtc_handle_vblank(crtc);
30889 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
30890 index 6c2a569f1fcb..8d7feeb0d7ab 100644
30891 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
30892 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
30893 @@ -201,7 +201,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
30894                         break;
30895                 }
30896                 if (lazy)
30897 -                       schedule_timeout(1);
30898 +                       schedule_min_hrtimeout();
30899                 else if ((++count & 0x0F) == 0) {
30900                         /**
30901                          * FIXME: Use schedule_hr_timeout here for
30902 diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
30903 index 99158ee67d02..59d1fb017da0 100644
30904 --- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
30905 +++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
30906 @@ -866,7 +866,7 @@ static int zynqmp_dp_train(struct zynqmp_dp *dp)
30907                 return ret;
30909         zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, 1);
30910 -       memset(dp->train_set, 0, 4);
30911 +       memset(dp->train_set, 0, sizeof(dp->train_set));
30912         ret = zynqmp_dp_link_train_cr(dp);
30913         if (ret)
30914                 return ret;
30915 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
30916 index 67fd8a2f5aba..ba338973e968 100644
30917 --- a/drivers/hid/hid-ids.h
30918 +++ b/drivers/hid/hid-ids.h
30919 @@ -946,6 +946,7 @@
30920  #define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S   0x8003
30922  #define USB_VENDOR_ID_PLANTRONICS      0x047f
30923 +#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES        0xc056
30925  #define USB_VENDOR_ID_PANASONIC                0x04da
30926  #define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
30927 diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
30928 index c6c8e20f3e8d..0ff03fed9770 100644
30929 --- a/drivers/hid/hid-lenovo.c
30930 +++ b/drivers/hid/hid-lenovo.c
30931 @@ -33,6 +33,9 @@
30933  #include "hid-ids.h"
30935 +/* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */
30936 +#define LENOVO_KEY_MICMUTE KEY_F20
30938  struct lenovo_drvdata {
30939         u8 led_report[3]; /* Must be first for proper alignment */
30940         int led_state;
30941 @@ -62,8 +65,8 @@ struct lenovo_drvdata {
30942  #define TP10UBKBD_LED_OFF              1
30943  #define TP10UBKBD_LED_ON               2
30945 -static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
30946 -                                    enum led_brightness value)
30947 +static int lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
30948 +                                   enum led_brightness value)
30950         struct lenovo_drvdata *data = hid_get_drvdata(hdev);
30951         int ret;
30952 @@ -75,10 +78,18 @@ static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
30953         data->led_report[2] = value ? TP10UBKBD_LED_ON : TP10UBKBD_LED_OFF;
30954         ret = hid_hw_raw_request(hdev, data->led_report[0], data->led_report, 3,
30955                                  HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
30956 -       if (ret)
30957 -               hid_err(hdev, "Set LED output report error: %d\n", ret);
30958 +       if (ret != 3) {
30959 +               if (ret != -ENODEV)
30960 +                       hid_err(hdev, "Set LED output report error: %d\n", ret);
30962 +               ret = ret < 0 ? ret : -EIO;
30963 +       } else {
30964 +               ret = 0;
30965 +       }
30967         mutex_unlock(&data->led_report_mutex);
30969 +       return ret;
30972  static void lenovo_tp10ubkbd_sync_fn_lock(struct work_struct *work)
30973 @@ -126,7 +137,7 @@ static int lenovo_input_mapping_tpkbd(struct hid_device *hdev,
30974         if (usage->hid == (HID_UP_BUTTON | 0x0010)) {
30975                 /* This sub-device contains trackpoint, mark it */
30976                 hid_set_drvdata(hdev, (void *)1);
30977 -               map_key_clear(KEY_MICMUTE);
30978 +               map_key_clear(LENOVO_KEY_MICMUTE);
30979                 return 1;
30980         }
30981         return 0;
30982 @@ -141,7 +152,7 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
30983             (usage->hid & HID_USAGE_PAGE) == HID_UP_LNVENDOR) {
30984                 switch (usage->hid & HID_USAGE) {
30985                 case 0x00f1: /* Fn-F4: Mic mute */
30986 -                       map_key_clear(KEY_MICMUTE);
30987 +                       map_key_clear(LENOVO_KEY_MICMUTE);
30988                         return 1;
30989                 case 0x00f2: /* Fn-F5: Brightness down */
30990                         map_key_clear(KEY_BRIGHTNESSDOWN);
30991 @@ -231,7 +242,7 @@ static int lenovo_input_mapping_tp10_ultrabook_kbd(struct hid_device *hdev,
30992                         map_key_clear(KEY_FN_ESC);
30993                         return 1;
30994                 case 9: /* Fn-F4: Mic mute */
30995 -                       map_key_clear(KEY_MICMUTE);
30996 +                       map_key_clear(LENOVO_KEY_MICMUTE);
30997                         return 1;
30998                 case 10: /* Fn-F7: Control panel */
30999                         map_key_clear(KEY_CONFIG);
31000 @@ -349,7 +360,7 @@ static ssize_t attr_fn_lock_store(struct device *dev,
31002         struct hid_device *hdev = to_hid_device(dev);
31003         struct lenovo_drvdata *data = hid_get_drvdata(hdev);
31004 -       int value;
31005 +       int value, ret;
31007         if (kstrtoint(buf, 10, &value))
31008                 return -EINVAL;
31009 @@ -364,7 +375,9 @@ static ssize_t attr_fn_lock_store(struct device *dev,
31010                 lenovo_features_set_cptkbd(hdev);
31011                 break;
31012         case USB_DEVICE_ID_LENOVO_TP10UBKBD:
31013 -               lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
31014 +               ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
31015 +               if (ret)
31016 +                       return ret;
31017                 break;
31018         }
31020 @@ -498,6 +511,9 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
31021  static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
31022                 struct hid_usage *usage, __s32 value)
31024 +       if (!hid_get_drvdata(hdev))
31025 +               return 0;
31027         switch (hdev->product) {
31028         case USB_DEVICE_ID_LENOVO_CUSBKBD:
31029         case USB_DEVICE_ID_LENOVO_CBTKBD:
31030 @@ -777,7 +793,7 @@ static enum led_brightness lenovo_led_brightness_get(
31031                                 : LED_OFF;
31034 -static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
31035 +static int lenovo_led_brightness_set(struct led_classdev *led_cdev,
31036                         enum led_brightness value)
31038         struct device *dev = led_cdev->dev->parent;
31039 @@ -785,6 +801,7 @@ static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
31040         struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
31041         u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED };
31042         int led_nr = 0;
31043 +       int ret = 0;
31045         if (led_cdev == &data_pointer->led_micmute)
31046                 led_nr = 1;
31047 @@ -799,9 +816,11 @@ static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
31048                 lenovo_led_set_tpkbd(hdev);
31049                 break;
31050         case USB_DEVICE_ID_LENOVO_TP10UBKBD:
31051 -               lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
31052 +               ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
31053                 break;
31054         }
31056 +       return ret;
31059  static int lenovo_register_leds(struct hid_device *hdev)
31060 @@ -822,7 +841,8 @@ static int lenovo_register_leds(struct hid_device *hdev)
31062         data->led_mute.name = name_mute;
31063         data->led_mute.brightness_get = lenovo_led_brightness_get;
31064 -       data->led_mute.brightness_set = lenovo_led_brightness_set;
31065 +       data->led_mute.brightness_set_blocking = lenovo_led_brightness_set;
31066 +       data->led_mute.flags = LED_HW_PLUGGABLE;
31067         data->led_mute.dev = &hdev->dev;
31068         ret = led_classdev_register(&hdev->dev, &data->led_mute);
31069         if (ret < 0)
31070 @@ -830,7 +850,8 @@ static int lenovo_register_leds(struct hid_device *hdev)
31072         data->led_micmute.name = name_micm;
31073         data->led_micmute.brightness_get = lenovo_led_brightness_get;
31074 -       data->led_micmute.brightness_set = lenovo_led_brightness_set;
31075 +       data->led_micmute.brightness_set_blocking = lenovo_led_brightness_set;
31076 +       data->led_micmute.flags = LED_HW_PLUGGABLE;
31077         data->led_micmute.dev = &hdev->dev;
31078         ret = led_classdev_register(&hdev->dev, &data->led_micmute);
31079         if (ret < 0) {
31080 diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
31081 index 85b685efc12f..e81b7cec2d12 100644
31082 --- a/drivers/hid/hid-plantronics.c
31083 +++ b/drivers/hid/hid-plantronics.c
31084 @@ -13,6 +13,7 @@
31086  #include <linux/hid.h>
31087  #include <linux/module.h>
31088 +#include <linux/jiffies.h>
31090  #define PLT_HID_1_0_PAGE       0xffa00000
31091  #define PLT_HID_2_0_PAGE       0xffa20000
31092 @@ -36,6 +37,16 @@
31093  #define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \
31094                             (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
31096 +#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0)
31098 +#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */
31100 +struct plt_drv_data {
31101 +       unsigned long device_type;
31102 +       unsigned long last_volume_key_ts;
31103 +       u32 quirks;
31106  static int plantronics_input_mapping(struct hid_device *hdev,
31107                                      struct hid_input *hi,
31108                                      struct hid_field *field,
31109 @@ -43,7 +54,8 @@ static int plantronics_input_mapping(struct hid_device *hdev,
31110                                      unsigned long **bit, int *max)
31112         unsigned short mapped_key;
31113 -       unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev);
31114 +       struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
31115 +       unsigned long plt_type = drv_data->device_type;
31117         /* special case for PTT products */
31118         if (field->application == HID_GD_JOYSTICK)
31119 @@ -105,6 +117,30 @@ static int plantronics_input_mapping(struct hid_device *hdev,
31120         return 1;
31123 +static int plantronics_event(struct hid_device *hdev, struct hid_field *field,
31124 +                            struct hid_usage *usage, __s32 value)
31126 +       struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
31128 +       if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) {
31129 +               unsigned long prev_ts, cur_ts;
31131 +               /* Usages are filtered in plantronics_usages. */
31133 +               if (!value) /* Handle key presses only. */
31134 +                       return 0;
31136 +               prev_ts = drv_data->last_volume_key_ts;
31137 +               cur_ts = jiffies;
31138 +               if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT)
31139 +                       return 1; /* Ignore the repeated key. */
31141 +               drv_data->last_volume_key_ts = cur_ts;
31142 +       }
31144 +       return 0;
31147  static unsigned long plantronics_device_type(struct hid_device *hdev)
31149         unsigned i, col_page;
31150 @@ -133,15 +169,24 @@ static unsigned long plantronics_device_type(struct hid_device *hdev)
31151  static int plantronics_probe(struct hid_device *hdev,
31152                              const struct hid_device_id *id)
31154 +       struct plt_drv_data *drv_data;
31155         int ret;
31157 +       drv_data = devm_kzalloc(&hdev->dev, sizeof(*drv_data), GFP_KERNEL);
31158 +       if (!drv_data)
31159 +               return -ENOMEM;
31161         ret = hid_parse(hdev);
31162         if (ret) {
31163                 hid_err(hdev, "parse failed\n");
31164                 goto err;
31165         }
31167 -       hid_set_drvdata(hdev, (void *)plantronics_device_type(hdev));
31168 +       drv_data->device_type = plantronics_device_type(hdev);
31169 +       drv_data->quirks = id->driver_data;
31170 +       drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT);
31172 +       hid_set_drvdata(hdev, drv_data);
31174         ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT |
31175                 HID_CONNECT_HIDINPUT_FORCE | HID_CONNECT_HIDDEV_FORCE);
31176 @@ -153,15 +198,26 @@ static int plantronics_probe(struct hid_device *hdev,
31179  static const struct hid_device_id plantronics_devices[] = {
31180 +       { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
31181 +                                        USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
31182 +               .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
31183         { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
31184         { }
31185  };
31186  MODULE_DEVICE_TABLE(hid, plantronics_devices);
31188 +static const struct hid_usage_id plantronics_usages[] = {
31189 +       { HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID },
31190 +       { HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID },
31191 +       { HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR }
31194  static struct hid_driver plantronics_driver = {
31195         .name = "plantronics",
31196         .id_table = plantronics_devices,
31197 +       .usage_table = plantronics_usages,
31198         .input_mapping = plantronics_input_mapping,
31199 +       .event = plantronics_event,
31200         .probe = plantronics_probe,
31201  };
31202  module_hid_driver(plantronics_driver);
31203 diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c
31204 index c3fb5beb846e..ec90713564e3 100644
31205 --- a/drivers/hsi/hsi_core.c
31206 +++ b/drivers/hsi/hsi_core.c
31207 @@ -210,8 +210,6 @@ static void hsi_add_client_from_dt(struct hsi_port *port,
31208         if (err)
31209                 goto err;
31211 -       dev_set_name(&cl->device, "%s", name);
31213         err = hsi_of_property_parse_mode(client, "hsi-mode", &mode);
31214         if (err) {
31215                 err = hsi_of_property_parse_mode(client, "hsi-rx-mode",
31216 @@ -293,6 +291,7 @@ static void hsi_add_client_from_dt(struct hsi_port *port,
31217         cl->device.release = hsi_client_release;
31218         cl->device.of_node = client;
31220 +       dev_set_name(&cl->device, "%s", name);
31221         if (device_register(&cl->device) < 0) {
31222                 pr_err("hsi: failed to register client: %s\n", name);
31223                 put_device(&cl->device);
31224 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31225 index 0bd202de7960..945e41f5e3a8 100644
31226 --- a/drivers/hv/channel.c
31227 +++ b/drivers/hv/channel.c
31228 @@ -653,7 +653,7 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
31230         if (newchannel->rescind) {
31231                 err = -ENODEV;
31232 -               goto error_free_info;
31233 +               goto error_clean_msglist;
31234         }
31236         err = vmbus_post_msg(open_msg,
31237 diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
31238 index f0ed730e2e4e..ecebf1235fd5 100644
31239 --- a/drivers/hv/channel_mgmt.c
31240 +++ b/drivers/hv/channel_mgmt.c
31241 @@ -756,6 +756,12 @@ static void init_vp_index(struct vmbus_channel *channel)
31242         free_cpumask_var(available_mask);
31245 +#define UNLOAD_DELAY_UNIT_MS   10              /* 10 milliseconds */
31246 +#define UNLOAD_WAIT_MS         (100*1000)      /* 100 seconds */
31247 +#define UNLOAD_WAIT_LOOPS      (UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS)
31248 +#define UNLOAD_MSG_MS          (5*1000)        /* Every 5 seconds */
31249 +#define UNLOAD_MSG_LOOPS       (UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS)
31251  static void vmbus_wait_for_unload(void)
31253         int cpu;
31254 @@ -773,12 +779,17 @@ static void vmbus_wait_for_unload(void)
31255          * vmbus_connection.unload_event. If not, the last thing we can do is
31256          * read message pages for all CPUs directly.
31257          *
31258 -        * Wait no more than 10 seconds so that the panic path can't get
31259 -        * hung forever in case the response message isn't seen.
31260 +        * Wait up to 100 seconds since an Azure host must writeback any dirty
31261 +        * data in its disk cache before the VMbus UNLOAD request will
31262 +        * complete. This flushing has been empirically observed to take up
31263 +        * to 50 seconds in cases with a lot of dirty data, so allow additional
31264 +        * leeway and for inaccuracies in mdelay(). But eventually time out so
31265 +        * that the panic path can't get hung forever in case the response
31266 +        * message isn't seen.
31267          */
31268 -       for (i = 0; i < 1000; i++) {
31269 +       for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) {
31270                 if (completion_done(&vmbus_connection.unload_event))
31271 -                       break;
31272 +                       goto completed;
31274                 for_each_online_cpu(cpu) {
31275                         struct hv_per_cpu_context *hv_cpu
31276 @@ -801,9 +812,18 @@ static void vmbus_wait_for_unload(void)
31277                         vmbus_signal_eom(msg, message_type);
31278                 }
31280 -               mdelay(10);
31281 +               /*
31282 +                * Give a notice periodically so someone watching the
31283 +                * serial output won't think it is completely hung.
31284 +                */
31285 +               if (!(i % UNLOAD_MSG_LOOPS))
31286 +                       pr_notice("Waiting for VMBus UNLOAD to complete\n");
31288 +               mdelay(UNLOAD_DELAY_UNIT_MS);
31289         }
31290 +       pr_err("Continuing even though VMBus UNLOAD did not complete\n");
31292 +completed:
31293         /*
31294          * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
31295          * maybe-pending messages on all CPUs to be able to receive new
31296 diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
31297 index 35833d4d1a1d..ecd82ebfd5bc 100644
31298 --- a/drivers/hv/ring_buffer.c
31299 +++ b/drivers/hv/ring_buffer.c
31300 @@ -313,7 +313,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
31301                 rqst_id = vmbus_next_request_id(&channel->requestor, requestid);
31302                 if (rqst_id == VMBUS_RQST_ERROR) {
31303                         spin_unlock_irqrestore(&outring_info->ring_lock, flags);
31304 -                       pr_err("No request id available\n");
31305                         return -EAGAIN;
31306                 }
31307         }
31308 diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
31309 index 29f5fed28c2a..974cb08c7aa7 100644
31310 --- a/drivers/hwmon/fam15h_power.c
31311 +++ b/drivers/hwmon/fam15h_power.c
31312 @@ -221,7 +221,7 @@ static ssize_t power1_average_show(struct device *dev,
31313                 prev_ptsc[cu] = data->cpu_sw_pwr_ptsc[cu];
31314         }
31316 -       leftover = schedule_timeout_interruptible(msecs_to_jiffies(data->power_period));
31317 +       leftover = schedule_msec_hrtimeout_interruptible((data->power_period));
31318         if (leftover)
31319                 return 0;
31321 diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
31322 index 4382105bf142..2a4bed0ab226 100644
31323 --- a/drivers/hwmon/ltc2992.c
31324 +++ b/drivers/hwmon/ltc2992.c
31325 @@ -900,11 +900,15 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
31327         fwnode_for_each_available_child_node(fwnode, child) {
31328                 ret = fwnode_property_read_u32(child, "reg", &addr);
31329 -               if (ret < 0)
31330 +               if (ret < 0) {
31331 +                       fwnode_handle_put(child);
31332                         return ret;
31333 +               }
31335 -               if (addr > 1)
31336 +               if (addr > 1) {
31337 +                       fwnode_handle_put(child);
31338                         return -EINVAL;
31339 +               }
31341                 ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
31342                 if (!ret)
31343 diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
31344 index 7a5e539b567b..580e63d7daa0 100644
31345 --- a/drivers/hwmon/occ/common.c
31346 +++ b/drivers/hwmon/occ/common.c
31347 @@ -217,9 +217,9 @@ int occ_update_response(struct occ *occ)
31348                 return rc;
31350         /* limit the maximum rate of polling the OCC */
31351 -       if (time_after(jiffies, occ->last_update + OCC_UPDATE_FREQUENCY)) {
31352 +       if (time_after(jiffies, occ->next_update)) {
31353                 rc = occ_poll(occ);
31354 -               occ->last_update = jiffies;
31355 +               occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
31356         } else {
31357                 rc = occ->last_error;
31358         }
31359 @@ -1164,6 +1164,7 @@ int occ_setup(struct occ *occ, const char *name)
31360                 return rc;
31361         }
31363 +       occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
31364         occ_parse_poll_response(occ);
31366         rc = occ_setup_sensor_attrs(occ);
31367 diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
31368 index 67e6968b8978..e6df719770e8 100644
31369 --- a/drivers/hwmon/occ/common.h
31370 +++ b/drivers/hwmon/occ/common.h
31371 @@ -99,7 +99,7 @@ struct occ {
31372         u8 poll_cmd_data;               /* to perform OCC poll command */
31373         int (*send_cmd)(struct occ *occ, u8 *cmd);
31375 -       unsigned long last_update;
31376 +       unsigned long next_update;
31377         struct mutex lock;              /* lock OCC access */
31379         struct device *hwmon;
31380 diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c
31381 index da27ce34ee3f..eb4a06003b7f 100644
31382 --- a/drivers/hwmon/pmbus/pxe1610.c
31383 +++ b/drivers/hwmon/pmbus/pxe1610.c
31384 @@ -41,6 +41,15 @@ static int pxe1610_identify(struct i2c_client *client,
31385                                 info->vrm_version[i] = vr13;
31386                                 break;
31387                         default:
31388 +                               /*
31389 +                                * If prior pages are available limit operation
31390 +                                * to them
31391 +                                */
31392 +                               if (i != 0) {
31393 +                                       info->pages = i;
31394 +                                       return 0;
31395 +                               }
31397                                 return -ENODEV;
31398                         }
31399                 }
31400 diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
31401 index 0f603b4094f2..a706ba11b93e 100644
31402 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c
31403 +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
31404 @@ -52,7 +52,7 @@ static ssize_t format_attr_contextid_show(struct device *dev,
31406         int pid_fmt = ETM_OPT_CTXTID;
31408 -#if defined(CONFIG_CORESIGHT_SOURCE_ETM4X)
31409 +#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
31410         pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID;
31411  #endif
31412         return sprintf(page, "config:%d\n", pid_fmt);
31413 diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
31414 index 3629b7885aca..c594f45319fc 100644
31415 --- a/drivers/hwtracing/coresight/coresight-platform.c
31416 +++ b/drivers/hwtracing/coresight/coresight-platform.c
31417 @@ -90,6 +90,12 @@ static void of_coresight_get_ports_legacy(const struct device_node *node,
31418         struct of_endpoint endpoint;
31419         int in = 0, out = 0;
31421 +       /*
31422 +        * Avoid warnings in of_graph_get_next_endpoint()
31423 +        * if the device doesn't have any graph connections
31424 +        */
31425 +       if (!of_graph_is_present(node))
31426 +               return;
31427         do {
31428                 ep = of_graph_get_next_endpoint(node, ep);
31429                 if (!ep)
31430 diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
31431 index f72803a02391..28509b02a0b5 100644
31432 --- a/drivers/hwtracing/intel_th/gth.c
31433 +++ b/drivers/hwtracing/intel_th/gth.c
31434 @@ -543,7 +543,7 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
31435         output->active = false;
31437         for_each_set_bit(master, gth->output[output->port].master,
31438 -                        TH_CONFIGURABLE_MASTERS) {
31439 +                        TH_CONFIGURABLE_MASTERS + 1) {
31440                 gth_master_set(gth, master, -1);
31441         }
31442         spin_unlock(&gth->gth_lock);
31443 @@ -697,7 +697,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
31444         othdev->output.port = -1;
31445         othdev->output.active = false;
31446         gth->output[port].output = NULL;
31447 -       for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
31448 +       for (master = 0; master < TH_CONFIGURABLE_MASTERS + 1; master++)
31449                 if (gth->master[master] == port)
31450                         gth->master[master] = -1;
31451         spin_unlock(&gth->gth_lock);
31452 diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
31453 index 251e75c9ba9d..817cdb29bbd8 100644
31454 --- a/drivers/hwtracing/intel_th/pci.c
31455 +++ b/drivers/hwtracing/intel_th/pci.c
31456 @@ -273,11 +273,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
31457                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
31458                 .driver_data = (kernel_ulong_t)&intel_th_2x,
31459         },
31460 +       {
31461 +               /* Alder Lake-M */
31462 +               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x54a6),
31463 +               .driver_data = (kernel_ulong_t)&intel_th_2x,
31464 +       },
31465         {
31466                 /* Alder Lake CPU */
31467                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
31468                 .driver_data = (kernel_ulong_t)&intel_th_2x,
31469         },
31470 +       {
31471 +               /* Rocket Lake CPU */
31472 +               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c19),
31473 +               .driver_data = (kernel_ulong_t)&intel_th_2x,
31474 +       },
31475         { 0 },
31476  };
31478 diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
31479 index e4b7f2a951ad..c1bbc4caeb5c 100644
31480 --- a/drivers/i2c/busses/i2c-cadence.c
31481 +++ b/drivers/i2c/busses/i2c-cadence.c
31482 @@ -789,7 +789,7 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
31483         bool change_role = false;
31484  #endif
31486 -       ret = pm_runtime_get_sync(id->dev);
31487 +       ret = pm_runtime_resume_and_get(id->dev);
31488         if (ret < 0)
31489                 return ret;
31491 @@ -911,7 +911,7 @@ static int cdns_reg_slave(struct i2c_client *slave)
31492         if (slave->flags & I2C_CLIENT_TEN)
31493                 return -EAFNOSUPPORT;
31495 -       ret = pm_runtime_get_sync(id->dev);
31496 +       ret = pm_runtime_resume_and_get(id->dev);
31497         if (ret < 0)
31498                 return ret;
31500 @@ -1200,7 +1200,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
31501         if (IS_ERR(id->membase))
31502                 return PTR_ERR(id->membase);
31504 -       id->irq = platform_get_irq(pdev, 0);
31505 +       ret = platform_get_irq(pdev, 0);
31506 +       if (ret < 0)
31507 +               return ret;
31508 +       id->irq = ret;
31510         id->adap.owner = THIS_MODULE;
31511         id->adap.dev.of_node = pdev->dev.of_node;
31512 diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
31513 index a08554c1a570..bdff0e6345d9 100644
31514 --- a/drivers/i2c/busses/i2c-emev2.c
31515 +++ b/drivers/i2c/busses/i2c-emev2.c
31516 @@ -395,7 +395,10 @@ static int em_i2c_probe(struct platform_device *pdev)
31518         em_i2c_reset(&priv->adap);
31520 -       priv->irq = platform_get_irq(pdev, 0);
31521 +       ret = platform_get_irq(pdev, 0);
31522 +       if (ret < 0)
31523 +               goto err_clk;
31524 +       priv->irq = ret;
31525         ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
31526                                 "em_i2c", priv);
31527         if (ret)
31528 diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
31529 index 4acee6f9e5a3..99d446763530 100644
31530 --- a/drivers/i2c/busses/i2c-i801.c
31531 +++ b/drivers/i2c/busses/i2c-i801.c
31532 @@ -73,6 +73,7 @@
31533   * Comet Lake-V (PCH)          0xa3a3  32      hard    yes     yes     yes
31534   * Alder Lake-S (PCH)          0x7aa3  32      hard    yes     yes     yes
31535   * Alder Lake-P (PCH)          0x51a3  32      hard    yes     yes     yes
31536 + * Alder Lake-M (PCH)          0x54a3  32      hard    yes     yes     yes
31537   *
31538   * Features supported by this driver:
31539   * Software PEC                                no
31540 @@ -230,6 +231,7 @@
31541  #define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS         0x4b23
31542  #define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS          0x4da3
31543  #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS         0x51a3
31544 +#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS         0x54a3
31545  #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS              0x5ad4
31546  #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS         0x7aa3
31547  #define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS            0x8c22
31548 @@ -1087,6 +1089,7 @@ static const struct pci_device_id i801_ids[] = {
31549         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
31550         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS) },
31551         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS) },
31552 +       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS) },
31553         { 0, }
31554  };
31556 @@ -1771,6 +1774,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
31557         case PCI_DEVICE_ID_INTEL_EBG_SMBUS:
31558         case PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS:
31559         case PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS:
31560 +       case PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS:
31561                 priv->features |= FEATURE_BLOCK_PROC;
31562                 priv->features |= FEATURE_I2C_BLOCK_READ;
31563                 priv->features |= FEATURE_IRQ;
31564 diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
31565 index 98a89301ed2a..8e987945ed45 100644
31566 --- a/drivers/i2c/busses/i2c-img-scb.c
31567 +++ b/drivers/i2c/busses/i2c-img-scb.c
31568 @@ -1057,7 +1057,7 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
31569                         atomic = true;
31570         }
31572 -       ret = pm_runtime_get_sync(adap->dev.parent);
31573 +       ret = pm_runtime_resume_and_get(adap->dev.parent);
31574         if (ret < 0)
31575                 return ret;
31577 @@ -1158,7 +1158,7 @@ static int img_i2c_init(struct img_i2c *i2c)
31578         u32 rev;
31579         int ret;
31581 -       ret = pm_runtime_get_sync(i2c->adap.dev.parent);
31582 +       ret = pm_runtime_resume_and_get(i2c->adap.dev.parent);
31583         if (ret < 0)
31584                 return ret;
31586 diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
31587 index 9db6ccded5e9..8b9ba055c418 100644
31588 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c
31589 +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
31590 @@ -259,7 +259,7 @@ static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx)
31591         unsigned int temp;
31592         int ret;
31594 -       ret = pm_runtime_get_sync(lpi2c_imx->adapter.dev.parent);
31595 +       ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
31596         if (ret < 0)
31597                 return ret;
31599 diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
31600 index b80fdc1f0092..dc5ca71906db 100644
31601 --- a/drivers/i2c/busses/i2c-imx.c
31602 +++ b/drivers/i2c/busses/i2c-imx.c
31603 @@ -801,7 +801,7 @@ static int i2c_imx_reg_slave(struct i2c_client *client)
31604         i2c_imx->last_slave_event = I2C_SLAVE_STOP;
31606         /* Resume */
31607 -       ret = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
31608 +       ret = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent);
31609         if (ret < 0) {
31610                 dev_err(&i2c_imx->adapter.dev, "failed to resume i2c controller");
31611                 return ret;
31612 @@ -1253,7 +1253,7 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
31613         struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter);
31614         int result;
31616 -       result = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
31617 +       result = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent);
31618         if (result < 0)
31619                 return result;
31621 @@ -1496,7 +1496,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
31622         struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
31623         int irq, ret;
31625 -       ret = pm_runtime_get_sync(&pdev->dev);
31626 +       ret = pm_runtime_resume_and_get(&pdev->dev);
31627         if (ret < 0)
31628                 return ret;
31630 diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
31631 index 55177eb21d7b..baa7319eee53 100644
31632 --- a/drivers/i2c/busses/i2c-jz4780.c
31633 +++ b/drivers/i2c/busses/i2c-jz4780.c
31634 @@ -825,7 +825,10 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
31636         jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
31638 -       i2c->irq = platform_get_irq(pdev, 0);
31639 +       ret = platform_get_irq(pdev, 0);
31640 +       if (ret < 0)
31641 +               goto err;
31642 +       i2c->irq = ret;
31643         ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
31644                                dev_name(&pdev->dev), i2c);
31645         if (ret)
31646 diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
31647 index 2fb0532d8a16..ab261d762dea 100644
31648 --- a/drivers/i2c/busses/i2c-mlxbf.c
31649 +++ b/drivers/i2c/busses/i2c-mlxbf.c
31650 @@ -2376,6 +2376,8 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
31651         mlxbf_i2c_init_slave(pdev, priv);
31653         irq = platform_get_irq(pdev, 0);
31654 +       if (irq < 0)
31655 +               return irq;
31656         ret = devm_request_irq(dev, irq, mlxbf_smbus_irq,
31657                                IRQF_ONESHOT | IRQF_SHARED | IRQF_PROBE_SHARED,
31658                                dev_name(dev), priv);
31659 diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
31660 index 2ffd2f354d0a..bf25acba2ed5 100644
31661 --- a/drivers/i2c/busses/i2c-mt65xx.c
31662 +++ b/drivers/i2c/busses/i2c-mt65xx.c
31663 @@ -479,7 +479,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
31665         u16 control_reg;
31667 -       if (i2c->dev_comp->dma_sync) {
31668 +       if (i2c->dev_comp->apdma_sync) {
31669                 writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
31670                 udelay(10);
31671                 writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
31672 @@ -564,7 +564,7 @@ static const struct i2c_spec_values *mtk_i2c_get_spec(unsigned int speed)
31674  static int mtk_i2c_max_step_cnt(unsigned int target_speed)
31676 -       if (target_speed > I2C_MAX_FAST_MODE_FREQ)
31677 +       if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ)
31678                 return MAX_HS_STEP_CNT_DIV;
31679         else
31680                 return MAX_STEP_CNT_DIV;
31681 @@ -635,7 +635,7 @@ static int mtk_i2c_check_ac_timing(struct mtk_i2c *i2c,
31682         if (sda_min > sda_max)
31683                 return -3;
31685 -       if (check_speed > I2C_MAX_FAST_MODE_FREQ) {
31686 +       if (check_speed > I2C_MAX_FAST_MODE_PLUS_FREQ) {
31687                 if (i2c->dev_comp->ltiming_adjust) {
31688                         i2c->ac_timing.hs = I2C_TIME_DEFAULT_VALUE |
31689                                 (sample_cnt << 12) | (high_cnt << 8);
31690 @@ -850,7 +850,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
31692         control_reg = mtk_i2c_readw(i2c, OFFSET_CONTROL) &
31693                         ~(I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS);
31694 -       if ((i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ) || (left_num >= 1))
31695 +       if ((i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ) || (left_num >= 1))
31696                 control_reg |= I2C_CONTROL_RS;
31698         if (i2c->op == I2C_MASTER_WRRD)
31699 @@ -1067,7 +1067,8 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
31700                 }
31701         }
31703 -       if (i2c->auto_restart && num >= 2 && i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ)
31704 +       if (i2c->auto_restart && num >= 2 &&
31705 +               i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ)
31706                 /* ignore the first restart irq after the master code,
31707                  * otherwise the first transfer will be discarded.
31708                  */
31709 diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
31710 index 12ac4212aded..d4f6c6d60683 100644
31711 --- a/drivers/i2c/busses/i2c-omap.c
31712 +++ b/drivers/i2c/busses/i2c-omap.c
31713 @@ -1404,9 +1404,9 @@ omap_i2c_probe(struct platform_device *pdev)
31714         pm_runtime_set_autosuspend_delay(omap->dev, OMAP_I2C_PM_TIMEOUT);
31715         pm_runtime_use_autosuspend(omap->dev);
31717 -       r = pm_runtime_get_sync(omap->dev);
31718 +       r = pm_runtime_resume_and_get(omap->dev);
31719         if (r < 0)
31720 -               goto err_free_mem;
31721 +               goto err_disable_pm;
31723         /*
31724          * Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2.
31725 @@ -1513,8 +1513,8 @@ omap_i2c_probe(struct platform_device *pdev)
31726         omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
31727         pm_runtime_dont_use_autosuspend(omap->dev);
31728         pm_runtime_put_sync(omap->dev);
31729 +err_disable_pm:
31730         pm_runtime_disable(&pdev->dev);
31731 -err_free_mem:
31733         return r;
31735 @@ -1525,7 +1525,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
31736         int ret;
31738         i2c_del_adapter(&omap->adapter);
31739 -       ret = pm_runtime_get_sync(&pdev->dev);
31740 +       ret = pm_runtime_resume_and_get(&pdev->dev);
31741         if (ret < 0)
31742                 return ret;
31744 diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
31745 index 12f6d452c0f7..8722ca23f889 100644
31746 --- a/drivers/i2c/busses/i2c-rcar.c
31747 +++ b/drivers/i2c/busses/i2c-rcar.c
31748 @@ -1027,7 +1027,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
31749         if (of_property_read_bool(dev->of_node, "smbus"))
31750                 priv->flags |= ID_P_HOST_NOTIFY;
31752 -       priv->irq = platform_get_irq(pdev, 0);
31753 +       ret = platform_get_irq(pdev, 0);
31754 +       if (ret < 0)
31755 +               goto out_pm_disable;
31756 +       priv->irq = ret;
31757         ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv);
31758         if (ret < 0) {
31759                 dev_err(dev, "cannot get irq %d\n", priv->irq);
31760 diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
31761 index c2005c789d2b..319d1fa617c8 100644
31762 --- a/drivers/i2c/busses/i2c-sh7760.c
31763 +++ b/drivers/i2c/busses/i2c-sh7760.c
31764 @@ -471,7 +471,10 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
31765                 goto out2;
31766         }
31768 -       id->irq = platform_get_irq(pdev, 0);
31769 +       ret = platform_get_irq(pdev, 0);
31770 +       if (ret < 0)
31771 +               goto out3;
31772 +       id->irq = ret;
31774         id->adap.nr = pdev->id;
31775         id->adap.algo = &sh7760_i2c_algo;
31776 diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
31777 index 2917fecf6c80..8ead7e021008 100644
31778 --- a/drivers/i2c/busses/i2c-sprd.c
31779 +++ b/drivers/i2c/busses/i2c-sprd.c
31780 @@ -290,7 +290,7 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap,
31781         struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
31782         int im, ret;
31784 -       ret = pm_runtime_get_sync(i2c_dev->dev);
31785 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
31786         if (ret < 0)
31787                 return ret;
31789 @@ -576,7 +576,7 @@ static int sprd_i2c_remove(struct platform_device *pdev)
31790         struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev);
31791         int ret;
31793 -       ret = pm_runtime_get_sync(i2c_dev->dev);
31794 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
31795         if (ret < 0)
31796                 return ret;
31798 diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
31799 index c62c815b88eb..318abfa7926b 100644
31800 --- a/drivers/i2c/busses/i2c-stm32f7.c
31801 +++ b/drivers/i2c/busses/i2c-stm32f7.c
31802 @@ -1652,7 +1652,7 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
31803         i2c_dev->msg_id = 0;
31804         f7_msg->smbus = false;
31806 -       ret = pm_runtime_get_sync(i2c_dev->dev);
31807 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
31808         if (ret < 0)
31809                 return ret;
31811 @@ -1698,7 +1698,7 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
31812         f7_msg->read_write = read_write;
31813         f7_msg->smbus = true;
31815 -       ret = pm_runtime_get_sync(dev);
31816 +       ret = pm_runtime_resume_and_get(dev);
31817         if (ret < 0)
31818                 return ret;
31820 @@ -1799,7 +1799,7 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave)
31821         if (ret)
31822                 return ret;
31824 -       ret = pm_runtime_get_sync(dev);
31825 +       ret = pm_runtime_resume_and_get(dev);
31826         if (ret < 0)
31827                 return ret;
31829 @@ -1880,7 +1880,7 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave)
31831         WARN_ON(!i2c_dev->slave[id]);
31833 -       ret = pm_runtime_get_sync(i2c_dev->dev);
31834 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
31835         if (ret < 0)
31836                 return ret;
31838 @@ -2273,7 +2273,7 @@ static int stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
31839         int ret;
31840         struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
31842 -       ret = pm_runtime_get_sync(i2c_dev->dev);
31843 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
31844         if (ret < 0)
31845                 return ret;
31847 @@ -2295,7 +2295,7 @@ static int stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
31848         int ret;
31849         struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
31851 -       ret = pm_runtime_get_sync(i2c_dev->dev);
31852 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
31853         if (ret < 0)
31854                 return ret;
31856 diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
31857 index 087b2951942e..2a8568b97c14 100644
31858 --- a/drivers/i2c/busses/i2c-xiic.c
31859 +++ b/drivers/i2c/busses/i2c-xiic.c
31860 @@ -706,7 +706,7 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
31861         dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
31862                 xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
31864 -       err = pm_runtime_get_sync(i2c->dev);
31865 +       err = pm_runtime_resume_and_get(i2c->dev);
31866         if (err < 0)
31867                 return err;
31869 @@ -873,7 +873,7 @@ static int xiic_i2c_remove(struct platform_device *pdev)
31870         /* remove adapter & data */
31871         i2c_del_adapter(&i2c->adap);
31873 -       ret = pm_runtime_get_sync(i2c->dev);
31874 +       ret = pm_runtime_resume_and_get(i2c->dev);
31875         if (ret < 0)
31876                 return ret;
31878 diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
31879 index 6ceb11cc4be1..6ef38a8ee95c 100644
31880 --- a/drivers/i2c/i2c-dev.c
31881 +++ b/drivers/i2c/i2c-dev.c
31882 @@ -440,8 +440,13 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31883                                    sizeof(rdwr_arg)))
31884                         return -EFAULT;
31886 -               /* Put an arbitrary limit on the number of messages that can
31887 -                * be sent at once */
31888 +               if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
31889 +                       return -EINVAL;
31891 +               /*
31892 +                * Put an arbitrary limit on the number of messages that can
31893 +                * be sent at once
31894 +                */
31895                 if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
31896                         return -EINVAL;
31898 diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
31899 index f8e9b7305c13..e2e12a5585e5 100644
31900 --- a/drivers/i3c/master.c
31901 +++ b/drivers/i3c/master.c
31902 @@ -2535,7 +2535,7 @@ int i3c_master_register(struct i3c_master_controller *master,
31904         ret = i3c_master_bus_init(master);
31905         if (ret)
31906 -               goto err_destroy_wq;
31907 +               goto err_put_dev;
31909         ret = device_add(&master->dev);
31910         if (ret)
31911 @@ -2566,9 +2566,6 @@ int i3c_master_register(struct i3c_master_controller *master,
31912  err_cleanup_bus:
31913         i3c_master_bus_cleanup(master);
31915 -err_destroy_wq:
31916 -       destroy_workqueue(master->wq);
31918  err_put_dev:
31919         put_device(&master->dev);
31921 diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
31922 index 2e0c62c39155..8acf277b8b25 100644
31923 --- a/drivers/iio/accel/Kconfig
31924 +++ b/drivers/iio/accel/Kconfig
31925 @@ -211,7 +211,6 @@ config DMARD10
31926  config HID_SENSOR_ACCEL_3D
31927         depends on HID_SENSOR_HUB
31928         select IIO_BUFFER
31929 -       select IIO_TRIGGERED_BUFFER
31930         select HID_SENSOR_IIO_COMMON
31931         select HID_SENSOR_IIO_TRIGGER
31932         tristate "HID Accelerometers 3D"
31933 diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
31934 index 3633a4e302c6..fe225990de24 100644
31935 --- a/drivers/iio/accel/adis16201.c
31936 +++ b/drivers/iio/accel/adis16201.c
31937 @@ -215,7 +215,7 @@ static const struct iio_chan_spec adis16201_channels[] = {
31938         ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12),
31939         ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X,
31940                         BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
31941 -       ADIS_INCLI_CHAN(X, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
31942 +       ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
31943                         BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
31944         IIO_CHAN_SOFT_TIMESTAMP(7)
31945  };
31946 diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
31947 index e0667c4b3c08..91958da22dcf 100644
31948 --- a/drivers/iio/adc/Kconfig
31949 +++ b/drivers/iio/adc/Kconfig
31950 @@ -249,7 +249,7 @@ config AD799X
31951  config AD9467
31952         tristate "Analog Devices AD9467 High Speed ADC driver"
31953         depends on SPI
31954 -       select ADI_AXI_ADC
31955 +       depends on ADI_AXI_ADC
31956         help
31957           Say yes here to build support for Analog Devices:
31958           * AD9467 16-Bit, 200 MSPS/250 MSPS Analog-to-Digital Converter
31959 @@ -266,8 +266,6 @@ config ADI_AXI_ADC
31960         select IIO_BUFFER
31961         select IIO_BUFFER_HW_CONSUMER
31962         select IIO_BUFFER_DMAENGINE
31963 -       depends on HAS_IOMEM
31964 -       depends on OF
31965         help
31966           Say yes here to build support for Analog Devices Generic
31967           AXI ADC IP core. The IP core is used for interfacing with
31968 diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
31969 index 17402714b387..9e9ff07cf972 100644
31970 --- a/drivers/iio/adc/ad7476.c
31971 +++ b/drivers/iio/adc/ad7476.c
31972 @@ -321,25 +321,15 @@ static int ad7476_probe(struct spi_device *spi)
31973         spi_message_init(&st->msg);
31974         spi_message_add_tail(&st->xfer, &st->msg);
31976 -       ret = iio_triggered_buffer_setup(indio_dev, NULL,
31977 -                       &ad7476_trigger_handler, NULL);
31978 +       ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev, NULL,
31979 +                                             &ad7476_trigger_handler, NULL);
31980         if (ret)
31981 -               goto error_disable_reg;
31982 +               return ret;
31984         if (st->chip_info->reset)
31985                 st->chip_info->reset(st);
31987 -       ret = iio_device_register(indio_dev);
31988 -       if (ret)
31989 -               goto error_ring_unregister;
31990 -       return 0;
31992 -error_ring_unregister:
31993 -       iio_triggered_buffer_cleanup(indio_dev);
31994 -error_disable_reg:
31995 -       regulator_disable(st->reg);
31997 -       return ret;
31998 +       return devm_iio_device_register(&spi->dev, indio_dev);
32001  static const struct spi_device_id ad7476_id[] = {
32002 diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig
32003 index 24d492567336..2a3dd3b907be 100644
32004 --- a/drivers/iio/common/hid-sensors/Kconfig
32005 +++ b/drivers/iio/common/hid-sensors/Kconfig
32006 @@ -19,6 +19,7 @@ config HID_SENSOR_IIO_TRIGGER
32007         tristate "Common module (trigger) for all HID Sensor IIO drivers"
32008         depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON && IIO_BUFFER
32009         select IIO_TRIGGER
32010 +       select IIO_TRIGGERED_BUFFER
32011         help
32012           Say yes here to build trigger support for HID sensors.
32013           Triggers will be send if all requested attributes were read.
32014 diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
32015 index 5824f2edf975..20b5ac7ab66a 100644
32016 --- a/drivers/iio/gyro/Kconfig
32017 +++ b/drivers/iio/gyro/Kconfig
32018 @@ -111,7 +111,6 @@ config FXAS21002C_SPI
32019  config HID_SENSOR_GYRO_3D
32020         depends on HID_SENSOR_HUB
32021         select IIO_BUFFER
32022 -       select IIO_TRIGGERED_BUFFER
32023         select HID_SENSOR_IIO_COMMON
32024         select HID_SENSOR_IIO_TRIGGER
32025         tristate "HID Gyroscope 3D"
32026 diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
32027 index ac90be03332a..f17a93519535 100644
32028 --- a/drivers/iio/gyro/mpu3050-core.c
32029 +++ b/drivers/iio/gyro/mpu3050-core.c
32030 @@ -272,7 +272,16 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
32031         case IIO_CHAN_INFO_OFFSET:
32032                 switch (chan->type) {
32033                 case IIO_TEMP:
32034 -                       /* The temperature scaling is (x+23000)/280 Celsius */
32035 +                       /*
32036 +                        * The temperature scaling is (x+23000)/280 Celsius
32037 +                        * for the "best fit straight line" temperature range
32038 +                        * of -30C..85C.  The 23000 includes room temperature
32039 +                        * offset of +35C, 280 is the precision scale and x is
32040 +                        * the 16-bit signed integer reported by hardware.
32041 +                        *
32042 +                        * Temperature value itself represents temperature of
32043 +                        * the sensor die.
32044 +                        */
32045                         *val = 23000;
32046                         return IIO_VAL_INT;
32047                 default:
32048 @@ -329,7 +338,7 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
32049                                 goto out_read_raw_unlock;
32050                         }
32052 -                       *val = be16_to_cpu(raw_val);
32053 +                       *val = (s16)be16_to_cpu(raw_val);
32054                         ret = IIO_VAL_INT;
32056                         goto out_read_raw_unlock;
32057 diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
32058 index 6549fcf6db69..2de5494e7c22 100644
32059 --- a/drivers/iio/humidity/Kconfig
32060 +++ b/drivers/iio/humidity/Kconfig
32061 @@ -52,7 +52,6 @@ config HID_SENSOR_HUMIDITY
32062         tristate "HID Environmental humidity sensor"
32063         depends on HID_SENSOR_HUB
32064         select IIO_BUFFER
32065 -       select IIO_TRIGGERED_BUFFER
32066         select HID_SENSOR_IIO_COMMON
32067         select HID_SENSOR_IIO_TRIGGER
32068         help
32069 diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
32070 index dfe86c589325..c41b8ef1e250 100644
32071 --- a/drivers/iio/imu/adis16480.c
32072 +++ b/drivers/iio/imu/adis16480.c
32073 @@ -10,6 +10,7 @@
32074  #include <linux/of_irq.h>
32075  #include <linux/interrupt.h>
32076  #include <linux/delay.h>
32077 +#include <linux/math.h>
32078  #include <linux/mutex.h>
32079  #include <linux/device.h>
32080  #include <linux/kernel.h>
32081 @@ -17,6 +18,7 @@
32082  #include <linux/slab.h>
32083  #include <linux/sysfs.h>
32084  #include <linux/module.h>
32085 +#include <linux/lcm.h>
32087  #include <linux/iio/iio.h>
32088  #include <linux/iio/sysfs.h>
32089 @@ -170,6 +172,11 @@ static const char * const adis16480_int_pin_names[4] = {
32090         [ADIS16480_PIN_DIO4] = "DIO4",
32091  };
32093 +static bool low_rate_allow;
32094 +module_param(low_rate_allow, bool, 0444);
32095 +MODULE_PARM_DESC(low_rate_allow,
32096 +                "Allow IMU rates below the minimum advisable when external clk is used in PPS mode (default: N)");
32098  #ifdef CONFIG_DEBUG_FS
32100  static ssize_t adis16480_show_firmware_revision(struct file *file,
32101 @@ -312,7 +319,8 @@ static int adis16480_debugfs_init(struct iio_dev *indio_dev)
32102  static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
32104         struct adis16480 *st = iio_priv(indio_dev);
32105 -       unsigned int t, reg;
32106 +       unsigned int t, sample_rate = st->clk_freq;
32107 +       int ret;
32109         if (val < 0 || val2 < 0)
32110                 return -EINVAL;
32111 @@ -321,28 +329,65 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
32112         if (t == 0)
32113                 return -EINVAL;
32115 +       mutex_lock(&st->adis.state_lock);
32116         /*
32117 -        * When using PPS mode, the rate of data collection is equal to the
32118 -        * product of the external clock frequency and the scale factor in the
32119 -        * SYNC_SCALE register.
32120 -        * When using sync mode, or internal clock, the output data rate is
32121 -        * equal with  the clock frequency divided by DEC_RATE + 1.
32122 +        * When using PPS mode, the input clock needs to be scaled so that we have an IMU
32123 +        * sample rate between (optimally) 4000 and 4250. After this, we can use the
32124 +        * decimation filter to lower the sampling rate in order to get what the user wants.
32125 +        * Optimally, the user sample rate is a multiple of both the IMU sample rate and
32126 +        * the input clock. Hence, calculating the sync_scale dynamically gives us better
32127 +        * chances of achieving a perfect/integer value for DEC_RATE. The math here is:
32128 +        *      1. lcm of the input clock and the desired output rate.
32129 +        *      2. get the highest multiple of the previous result lower than the adis max rate.
32130 +        *      3. The last result becomes the IMU sample rate. Use that to calculate SYNC_SCALE
32131 +        *         and DEC_RATE (to get the user output rate)
32132          */
32133         if (st->clk_mode == ADIS16480_CLK_PPS) {
32134 -               t = t / st->clk_freq;
32135 -               reg = ADIS16495_REG_SYNC_SCALE;
32136 -       } else {
32137 -               t = st->clk_freq / t;
32138 -               reg = ADIS16480_REG_DEC_RATE;
32139 +               unsigned long scaled_rate = lcm(st->clk_freq, t);
32140 +               int sync_scale;
32142 +               /*
32143 +                * If lcm is bigger than the IMU maximum sampling rate there's no perfect
32144 +                * solution. In this case, we get the highest multiple of the input clock
32145 +                * lower than the IMU max sample rate.
32146 +                */
32147 +               if (scaled_rate > st->chip_info->int_clk)
32148 +                       scaled_rate = st->chip_info->int_clk / st->clk_freq * st->clk_freq;
32149 +               else
32150 +                       scaled_rate = st->chip_info->int_clk / scaled_rate * scaled_rate;
32152 +               /*
32153 +                * This is not an hard requirement but it's not advised to run the IMU
32154 +                * with a sample rate lower than 4000Hz due to possible undersampling
32155 +                * issues. However, there are users that might really want to take the risk.
32156 +                * Hence, we provide a module parameter for them. If set, we allow sample
32157 +                * rates lower than 4KHz. By default, we won't allow this and we just roundup
32158 +                * the rate to the next multiple of the input clock bigger than 4KHz. This
32159 +                * is done like this as in some cases (when DEC_RATE is 0) might give
32160 +                * us the closest value to the one desired by the user...
32161 +                */
32162 +               if (scaled_rate < 4000000 && !low_rate_allow)
32163 +                       scaled_rate = roundup(4000000, st->clk_freq);
32165 +               sync_scale = scaled_rate / st->clk_freq;
32166 +               ret = __adis_write_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, sync_scale);
32167 +               if (ret)
32168 +                       goto error;
32170 +               sample_rate = scaled_rate;
32171         }
32173 +       t = DIV_ROUND_CLOSEST(sample_rate, t);
32174 +       if (t)
32175 +               t--;
32177         if (t > st->chip_info->max_dec_rate)
32178                 t = st->chip_info->max_dec_rate;
32180 -       if ((t != 0) && (st->clk_mode != ADIS16480_CLK_PPS))
32181 -               t--;
32183 -       return adis_write_reg_16(&st->adis, reg, t);
32184 +       ret = __adis_write_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, t);
32185 +error:
32186 +       mutex_unlock(&st->adis.state_lock);
32187 +       return ret;
32190  static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
32191 @@ -350,34 +395,35 @@ static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
32192         struct adis16480 *st = iio_priv(indio_dev);
32193         uint16_t t;
32194         int ret;
32195 -       unsigned int freq;
32196 -       unsigned int reg;
32197 +       unsigned int freq, sample_rate = st->clk_freq;
32199 -       if (st->clk_mode == ADIS16480_CLK_PPS)
32200 -               reg = ADIS16495_REG_SYNC_SCALE;
32201 -       else
32202 -               reg = ADIS16480_REG_DEC_RATE;
32203 +       mutex_lock(&st->adis.state_lock);
32205 +       if (st->clk_mode == ADIS16480_CLK_PPS) {
32206 +               u16 sync_scale;
32208 +               ret = __adis_read_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, &sync_scale);
32209 +               if (ret)
32210 +                       goto error;
32212 -       ret = adis_read_reg_16(&st->adis, reg, &t);
32213 +               sample_rate = st->clk_freq * sync_scale;
32214 +       }
32216 +       ret = __adis_read_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, &t);
32217         if (ret)
32218 -               return ret;
32219 +               goto error;
32221 -       /*
32222 -        * When using PPS mode, the rate of data collection is equal to the
32223 -        * product of the external clock frequency and the scale factor in the
32224 -        * SYNC_SCALE register.
32225 -        * When using sync mode, or internal clock, the output data rate is
32226 -        * equal with  the clock frequency divided by DEC_RATE + 1.
32227 -        */
32228 -       if (st->clk_mode == ADIS16480_CLK_PPS)
32229 -               freq = st->clk_freq * t;
32230 -       else
32231 -               freq = st->clk_freq / (t + 1);
32232 +       mutex_unlock(&st->adis.state_lock);
32234 +       freq = DIV_ROUND_CLOSEST(sample_rate, (t + 1));
32236         *val = freq / 1000;
32237         *val2 = (freq % 1000) * 1000;
32239         return IIO_VAL_INT_PLUS_MICRO;
32240 +error:
32241 +       mutex_unlock(&st->adis.state_lock);
32242 +       return ret;
32245  enum {
32246 @@ -1278,6 +1324,20 @@ static int adis16480_probe(struct spi_device *spi)
32248                 st->clk_freq = clk_get_rate(st->ext_clk);
32249                 st->clk_freq *= 1000; /* micro */
32250 +               if (st->clk_mode == ADIS16480_CLK_PPS) {
32251 +                       u16 sync_scale;
32253 +                       /*
32254 +                        * In PPS mode, the IMU sample rate is the clk_freq * sync_scale. Hence,
32255 +                        * default the IMU sample rate to the highest multiple of the input clock
32256 +                        * lower than the IMU max sample rate. The internal sample rate is the
32257 +                        * max...
32258 +                        */
32259 +                       sync_scale = st->chip_info->int_clk / st->clk_freq;
32260 +                       ret = __adis_write_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, sync_scale);
32261 +                       if (ret)
32262 +                               return ret;
32263 +               }
32264         } else {
32265                 st->clk_freq = st->chip_info->int_clk;
32266         }
32267 diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
32268 index 453c51c79655..69ab94ab7297 100644
32269 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
32270 +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
32271 @@ -731,12 +731,16 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
32272         }
32275 -static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
32276 +static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val,
32277 +                                       int val2)
32279         int result, i;
32281 +       if (val != 0)
32282 +               return -EINVAL;
32284         for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
32285 -               if (gyro_scale_6050[i] == val) {
32286 +               if (gyro_scale_6050[i] == val2) {
32287                         result = inv_mpu6050_set_gyro_fsr(st, i);
32288                         if (result)
32289                                 return result;
32290 @@ -767,13 +771,17 @@ static int inv_write_raw_get_fmt(struct iio_dev *indio_dev,
32291         return -EINVAL;
32294 -static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
32295 +static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val,
32296 +                                        int val2)
32298         int result, i;
32299         u8 d;
32301 +       if (val != 0)
32302 +               return -EINVAL;
32304         for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
32305 -               if (accel_scale[i] == val) {
32306 +               if (accel_scale[i] == val2) {
32307                         d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
32308                         result = regmap_write(st->map, st->reg->accl_config, d);
32309                         if (result)
32310 @@ -814,10 +822,10 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
32311         case IIO_CHAN_INFO_SCALE:
32312                 switch (chan->type) {
32313                 case IIO_ANGL_VEL:
32314 -                       result = inv_mpu6050_write_gyro_scale(st, val2);
32315 +                       result = inv_mpu6050_write_gyro_scale(st, val, val2);
32316                         break;
32317                 case IIO_ACCEL:
32318 -                       result = inv_mpu6050_write_accel_scale(st, val2);
32319 +                       result = inv_mpu6050_write_accel_scale(st, val, val2);
32320                         break;
32321                 default:
32322                         result = -EINVAL;
32323 diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
32324 index 7db761afa578..36f3a900878d 100644
32325 --- a/drivers/iio/industrialio-core.c
32326 +++ b/drivers/iio/industrialio-core.c
32327 @@ -1734,7 +1734,6 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
32328         if (!indio_dev->info)
32329                 goto out_unlock;
32331 -       ret = -EINVAL;
32332         list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
32333                 ret = h->ioctl(indio_dev, filp, cmd, arg);
32334                 if (ret != IIO_IOCTL_UNHANDLED)
32335 @@ -1742,7 +1741,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
32336         }
32338         if (ret == IIO_IOCTL_UNHANDLED)
32339 -               ret = -EINVAL;
32340 +               ret = -ENODEV;
32342  out_unlock:
32343         mutex_unlock(&indio_dev->info_exist_lock);
32344 @@ -1864,9 +1863,6 @@ EXPORT_SYMBOL(__iio_device_register);
32345   **/
32346  void iio_device_unregister(struct iio_dev *indio_dev)
32348 -       struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
32349 -       struct iio_ioctl_handler *h, *t;
32351         cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
32353         mutex_lock(&indio_dev->info_exist_lock);
32354 @@ -1877,9 +1873,6 @@ void iio_device_unregister(struct iio_dev *indio_dev)
32356         indio_dev->info = NULL;
32358 -       list_for_each_entry_safe(h, t, &iio_dev_opaque->ioctl_handlers, entry)
32359 -               list_del(&h->entry);
32361         iio_device_wakeup_eventset(indio_dev);
32362         iio_buffer_wakeup_poll(indio_dev);
32364 diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
32365 index 33ad4dd0b5c7..917f9becf9c7 100644
32366 --- a/drivers/iio/light/Kconfig
32367 +++ b/drivers/iio/light/Kconfig
32368 @@ -256,7 +256,6 @@ config ISL29125
32369  config HID_SENSOR_ALS
32370         depends on HID_SENSOR_HUB
32371         select IIO_BUFFER
32372 -       select IIO_TRIGGERED_BUFFER
32373         select HID_SENSOR_IIO_COMMON
32374         select HID_SENSOR_IIO_TRIGGER
32375         tristate "HID ALS"
32376 @@ -270,7 +269,6 @@ config HID_SENSOR_ALS
32377  config HID_SENSOR_PROX
32378         depends on HID_SENSOR_HUB
32379         select IIO_BUFFER
32380 -       select IIO_TRIGGERED_BUFFER
32381         select HID_SENSOR_IIO_COMMON
32382         select HID_SENSOR_IIO_TRIGGER
32383         tristate "HID PROX"
32384 diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
32385 index 7ba7aa59437c..040d8429a6e0 100644
32386 --- a/drivers/iio/light/gp2ap002.c
32387 +++ b/drivers/iio/light/gp2ap002.c
32388 @@ -583,7 +583,7 @@ static int gp2ap002_probe(struct i2c_client *client,
32389                                         "gp2ap002", indio_dev);
32390         if (ret) {
32391                 dev_err(dev, "unable to request IRQ\n");
32392 -               goto out_disable_vio;
32393 +               goto out_put_pm;
32394         }
32395         gp2ap002->irq = client->irq;
32397 @@ -613,8 +613,9 @@ static int gp2ap002_probe(struct i2c_client *client,
32399         return 0;
32401 -out_disable_pm:
32402 +out_put_pm:
32403         pm_runtime_put_noidle(dev);
32404 +out_disable_pm:
32405         pm_runtime_disable(dev);
32406  out_disable_vio:
32407         regulator_disable(gp2ap002->vio);
32408 diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
32409 index 5bf2bfbc5379..6ce37819fb73 100644
32410 --- a/drivers/iio/light/tsl2563.c
32411 +++ b/drivers/iio/light/tsl2563.c
32412 @@ -271,11 +271,7 @@ static void tsl2563_wait_adc(struct tsl2563_chip *chip)
32413         default:
32414                 delay = 402;
32415         }
32416 -       /*
32417 -        * TODO: Make sure that we wait at least required delay but why we
32418 -        * have to extend it one tick more?
32419 -        */
32420 -       schedule_timeout_interruptible(msecs_to_jiffies(delay) + 2);
32421 +       schedule_msec_hrtimeout_interruptible(delay + 1);
32424  static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc)
32425 diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
32426 index 0f787bfc88fc..c9d8f07a6fcd 100644
32427 --- a/drivers/iio/light/tsl2583.c
32428 +++ b/drivers/iio/light/tsl2583.c
32429 @@ -341,6 +341,14 @@ static int tsl2583_als_calibrate(struct iio_dev *indio_dev)
32430                 return lux_val;
32431         }
32433 +       /* Avoid division by zero of lux_value later on */
32434 +       if (lux_val == 0) {
32435 +               dev_err(&chip->client->dev,
32436 +                       "%s: lux_val of 0 will produce out of range trim_value\n",
32437 +                       __func__);
32438 +               return -ENODATA;
32439 +       }
32441         gain_trim_val = (unsigned int)(((chip->als_settings.als_cal_target)
32442                         * chip->als_settings.als_gain_trim) / lux_val);
32443         if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
32444 diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
32445 index 5d4ffd66032e..74ad5701c6c2 100644
32446 --- a/drivers/iio/magnetometer/Kconfig
32447 +++ b/drivers/iio/magnetometer/Kconfig
32448 @@ -95,7 +95,6 @@ config MAG3110
32449  config HID_SENSOR_MAGNETOMETER_3D
32450         depends on HID_SENSOR_HUB
32451         select IIO_BUFFER
32452 -       select IIO_TRIGGERED_BUFFER
32453         select HID_SENSOR_IIO_COMMON
32454         select HID_SENSOR_IIO_TRIGGER
32455         tristate "HID Magenetometer 3D"
32456 diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
32457 index d46f23d82b3d..2f2f8cb3c26c 100644
32458 --- a/drivers/iio/magnetometer/yamaha-yas530.c
32459 +++ b/drivers/iio/magnetometer/yamaha-yas530.c
32460 @@ -32,13 +32,14 @@
32461  #include <linux/regmap.h>
32462  #include <linux/regulator/consumer.h>
32463  #include <linux/random.h>
32464 -#include <linux/unaligned/be_byteshift.h>
32466  #include <linux/iio/buffer.h>
32467  #include <linux/iio/iio.h>
32468  #include <linux/iio/trigger_consumer.h>
32469  #include <linux/iio/triggered_buffer.h>
32471 +#include <asm/unaligned.h>
32473  /* This register map covers YAS530 and YAS532 but differs in YAS 537 and YAS539 */
32474  #define YAS5XX_DEVICE_ID               0x80
32475  #define YAS5XX_ACTUATE_INIT_COIL       0x81
32476 @@ -887,6 +888,7 @@ static int yas5xx_probe(struct i2c_client *i2c,
32477                 strncpy(yas5xx->name, "yas532", sizeof(yas5xx->name));
32478                 break;
32479         default:
32480 +               ret = -ENODEV;
32481                 dev_err(dev, "unhandled device ID %02x\n", yas5xx->devid);
32482                 goto assert_reset;
32483         }
32484 diff --git a/drivers/iio/orientation/Kconfig b/drivers/iio/orientation/Kconfig
32485 index a505583cc2fd..396cbbb867f4 100644
32486 --- a/drivers/iio/orientation/Kconfig
32487 +++ b/drivers/iio/orientation/Kconfig
32488 @@ -9,7 +9,6 @@ menu "Inclinometer sensors"
32489  config HID_SENSOR_INCLINOMETER_3D
32490         depends on HID_SENSOR_HUB
32491         select IIO_BUFFER
32492 -       select IIO_TRIGGERED_BUFFER
32493         select HID_SENSOR_IIO_COMMON
32494         select HID_SENSOR_IIO_TRIGGER
32495         tristate "HID Inclinometer 3D"
32496 @@ -20,7 +19,6 @@ config HID_SENSOR_INCLINOMETER_3D
32497  config HID_SENSOR_DEVICE_ROTATION
32498         depends on HID_SENSOR_HUB
32499         select IIO_BUFFER
32500 -       select IIO_TRIGGERED_BUFFER
32501         select HID_SENSOR_IIO_COMMON
32502         select HID_SENSOR_IIO_TRIGGER
32503         tristate "HID Device Rotation"
32504 diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
32505 index 18e4ef060096..c087d8f72a54 100644
32506 --- a/drivers/iio/orientation/hid-sensor-rotation.c
32507 +++ b/drivers/iio/orientation/hid-sensor-rotation.c
32508 @@ -21,7 +21,7 @@ struct dev_rot_state {
32509         struct hid_sensor_common common_attributes;
32510         struct hid_sensor_hub_attribute_info quaternion;
32511         struct {
32512 -               u32 sampled_vals[4] __aligned(16);
32513 +               s32 sampled_vals[4] __aligned(16);
32514                 u64 timestamp __aligned(8);
32515         } scan;
32516         int scale_pre_decml;
32517 @@ -170,8 +170,15 @@ static int dev_rot_capture_sample(struct hid_sensor_hub_device *hsdev,
32518         struct dev_rot_state *rot_state = iio_priv(indio_dev);
32520         if (usage_id == HID_USAGE_SENSOR_ORIENT_QUATERNION) {
32521 -               memcpy(&rot_state->scan.sampled_vals, raw_data,
32522 -                      sizeof(rot_state->scan.sampled_vals));
32523 +               if (raw_len / 4 == sizeof(s16)) {
32524 +                       rot_state->scan.sampled_vals[0] = ((s16 *)raw_data)[0];
32525 +                       rot_state->scan.sampled_vals[1] = ((s16 *)raw_data)[1];
32526 +                       rot_state->scan.sampled_vals[2] = ((s16 *)raw_data)[2];
32527 +                       rot_state->scan.sampled_vals[3] = ((s16 *)raw_data)[3];
32528 +               } else {
32529 +                       memcpy(&rot_state->scan.sampled_vals, raw_data,
32530 +                              sizeof(rot_state->scan.sampled_vals));
32531 +               }
32533                 dev_dbg(&indio_dev->dev, "Recd Quat len:%zu::%zu\n", raw_len,
32534                         sizeof(rot_state->scan.sampled_vals));
32535 diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
32536 index 689b978db4f9..fc0d3cfca418 100644
32537 --- a/drivers/iio/pressure/Kconfig
32538 +++ b/drivers/iio/pressure/Kconfig
32539 @@ -79,7 +79,6 @@ config DPS310
32540  config HID_SENSOR_PRESS
32541         depends on HID_SENSOR_HUB
32542         select IIO_BUFFER
32543 -       select IIO_TRIGGERED_BUFFER
32544         select HID_SENSOR_IIO_COMMON
32545         select HID_SENSOR_IIO_TRIGGER
32546         tristate "HID PRESS"
32547 diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
32548 index c685f10b5ae4..cc206bfa09c7 100644
32549 --- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
32550 +++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
32551 @@ -160,6 +160,7 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
32552         ret = lidar_write_control(data, LIDAR_REG_CONTROL_ACQUIRE);
32553         if (ret < 0) {
32554                 dev_err(&client->dev, "cannot send start measurement command");
32555 +               pm_runtime_put_noidle(&client->dev);
32556                 return ret;
32557         }
32559 diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
32560 index 37fd0b65a014..ea82cfaf7f42 100644
32561 --- a/drivers/iio/proximity/sx9310.c
32562 +++ b/drivers/iio/proximity/sx9310.c
32563 @@ -763,7 +763,11 @@ static int sx9310_write_far_debounce(struct sx9310_data *data, int val)
32564         int ret;
32565         unsigned int regval;
32567 -       val = ilog2(val);
32568 +       if (val > 0)
32569 +               val = ilog2(val);
32570 +       if (!FIELD_FIT(SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_MASK, val))
32571 +               return -EINVAL;
32573         regval = FIELD_PREP(SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_MASK, val);
32575         mutex_lock(&data->mutex);
32576 @@ -780,7 +784,11 @@ static int sx9310_write_close_debounce(struct sx9310_data *data, int val)
32577         int ret;
32578         unsigned int regval;
32580 -       val = ilog2(val);
32581 +       if (val > 0)
32582 +               val = ilog2(val);
32583 +       if (!FIELD_FIT(SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_MASK, val))
32584 +               return -EINVAL;
32586         regval = FIELD_PREP(SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_MASK, val);
32588         mutex_lock(&data->mutex);
32589 @@ -1213,17 +1221,17 @@ static int sx9310_init_compensation(struct iio_dev *indio_dev)
32592  static const struct sx9310_reg_default *
32593 -sx9310_get_default_reg(struct sx9310_data *data, int i,
32594 +sx9310_get_default_reg(struct sx9310_data *data, int idx,
32595                        struct sx9310_reg_default *reg_def)
32597 -       int ret;
32598         const struct device_node *np = data->client->dev.of_node;
32599 -       u32 combined[SX9310_NUM_CHANNELS] = { 4, 4, 4, 4 };
32600 +       u32 combined[SX9310_NUM_CHANNELS];
32601 +       u32 start = 0, raw = 0, pos = 0;
32602         unsigned long comb_mask = 0;
32603 +       int ret, i, count;
32604         const char *res;
32605 -       u32 start = 0, raw = 0, pos = 0;
32607 -       memcpy(reg_def, &sx9310_default_regs[i], sizeof(*reg_def));
32608 +       memcpy(reg_def, &sx9310_default_regs[idx], sizeof(*reg_def));
32609         if (!np)
32610                 return reg_def;
32612 @@ -1234,15 +1242,31 @@ sx9310_get_default_reg(struct sx9310_data *data, int i,
32613                         reg_def->def |= SX9310_REG_PROX_CTRL2_SHIELDEN_GROUND;
32614                 }
32616 -               reg_def->def &= ~SX9310_REG_PROX_CTRL2_COMBMODE_MASK;
32617 -               of_property_read_u32_array(np, "semtech,combined-sensors",
32618 -                                          combined, ARRAY_SIZE(combined));
32619 -               for (i = 0; i < ARRAY_SIZE(combined); i++) {
32620 -                       if (combined[i] <= SX9310_NUM_CHANNELS)
32621 -                               comb_mask |= BIT(combined[i]);
32622 +               count = of_property_count_elems_of_size(np, "semtech,combined-sensors",
32623 +                                                       sizeof(u32));
32624 +               if (count > 0 && count <= ARRAY_SIZE(combined)) {
32625 +                       ret = of_property_read_u32_array(np, "semtech,combined-sensors",
32626 +                                                        combined, count);
32627 +                       if (ret)
32628 +                               break;
32629 +               } else {
32630 +                       /*
32631 +                        * Either the property does not exist in the DT or the
32632 +                        * number of entries is incorrect.
32633 +                        */
32634 +                       break;
32635                 }
32636 +               for (i = 0; i < count; i++) {
32637 +                       if (combined[i] >= SX9310_NUM_CHANNELS) {
32638 +                               /* Invalid sensor (invalid DT). */
32639 +                               break;
32640 +                       }
32641 +                       comb_mask |= BIT(combined[i]);
32642 +               }
32643 +               if (i < count)
32644 +                       break;
32646 -               comb_mask &= 0xf;
32647 +               reg_def->def &= ~SX9310_REG_PROX_CTRL2_COMBMODE_MASK;
32648                 if (comb_mask == (BIT(3) | BIT(2) | BIT(1) | BIT(0)))
32649                         reg_def->def |= SX9310_REG_PROX_CTRL2_COMBMODE_CS0_CS1_CS2_CS3;
32650                 else if (comb_mask == (BIT(1) | BIT(2)))
32651 diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
32652 index f1f2a1499c9e..4df60082c1fa 100644
32653 --- a/drivers/iio/temperature/Kconfig
32654 +++ b/drivers/iio/temperature/Kconfig
32655 @@ -45,7 +45,6 @@ config HID_SENSOR_TEMP
32656         tristate "HID Environmental temperature sensor"
32657         depends on HID_SENSOR_HUB
32658         select IIO_BUFFER
32659 -       select IIO_TRIGGERED_BUFFER
32660         select HID_SENSOR_IIO_COMMON
32661         select HID_SENSOR_IIO_TRIGGER
32662         help
32663 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
32664 index 3d194bb60840..6adbaea358ae 100644
32665 --- a/drivers/infiniband/core/cm.c
32666 +++ b/drivers/infiniband/core/cm.c
32667 @@ -2138,7 +2138,8 @@ static int cm_req_handler(struct cm_work *work)
32668                 goto destroy;
32669         }
32671 -       cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
32672 +       if (cm_id_priv->av.ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE)
32673 +               cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
32675         memset(&work->path[0], 0, sizeof(work->path[0]));
32676         if (cm_req_has_alt_path(req_msg))
32677 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
32678 index 94096511599f..6ac07911a17b 100644
32679 --- a/drivers/infiniband/core/cma.c
32680 +++ b/drivers/infiniband/core/cma.c
32681 @@ -463,7 +463,6 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
32682         id_priv->id.route.addr.dev_addr.transport =
32683                 rdma_node_get_transport(cma_dev->device->node_type);
32684         list_add_tail(&id_priv->list, &cma_dev->id_list);
32685 -       rdma_restrack_add(&id_priv->res);
32687         trace_cm_id_attach(id_priv, cma_dev->device);
32689 @@ -700,6 +699,7 @@ static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
32690         mutex_lock(&lock);
32691         cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
32692         mutex_unlock(&lock);
32693 +       rdma_restrack_add(&id_priv->res);
32694         return 0;
32697 @@ -754,8 +754,10 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
32698         }
32700  out:
32701 -       if (!ret)
32702 +       if (!ret) {
32703                 cma_attach_to_dev(id_priv, cma_dev);
32704 +               rdma_restrack_add(&id_priv->res);
32705 +       }
32707         mutex_unlock(&lock);
32708         return ret;
32709 @@ -816,6 +818,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
32711  found:
32712         cma_attach_to_dev(id_priv, cma_dev);
32713 +       rdma_restrack_add(&id_priv->res);
32714         mutex_unlock(&lock);
32715         addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
32716         memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
32717 @@ -2529,6 +2532,7 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,
32718                rdma_addr_size(cma_src_addr(id_priv)));
32720         _cma_attach_to_dev(dev_id_priv, cma_dev);
32721 +       rdma_restrack_add(&dev_id_priv->res);
32722         cma_id_get(id_priv);
32723         dev_id_priv->internal_id = 1;
32724         dev_id_priv->afonly = id_priv->afonly;
32725 @@ -3169,6 +3173,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
32726         ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
32727         id_priv->id.port_num = p;
32728         cma_attach_to_dev(id_priv, cma_dev);
32729 +       rdma_restrack_add(&id_priv->res);
32730         cma_set_loopback(cma_src_addr(id_priv));
32731  out:
32732         mutex_unlock(&lock);
32733 @@ -3201,6 +3206,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
32734                 if (status)
32735                         pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
32736                                              status);
32737 +               rdma_restrack_add(&id_priv->res);
32738         } else if (status) {
32739                 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
32740         }
32741 @@ -3812,6 +3818,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
32742         if (ret)
32743                 goto err2;
32745 +       if (!cma_any_addr(addr))
32746 +               rdma_restrack_add(&id_priv->res);
32747         return 0;
32748  err2:
32749         if (id_priv->cma_dev)
32750 diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
32751 index 995d4633b0a1..d4d4959c2434 100644
32752 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
32753 +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
32754 @@ -2784,6 +2784,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
32755                 dev_err(&cq->hwq.pdev->dev,
32756                         "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
32757                         cqe_cons, rq->max_wqe);
32758 +               rc = -EINVAL;
32759                 goto done;
32760         }
32762 diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
32763 index fa7878336100..3ca47004b752 100644
32764 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
32765 +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
32766 @@ -854,6 +854,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res     *res,
32768  unmap_io:
32769         pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
32770 +       dpit->dbr_bar_reg_iomem = NULL;
32771         return -ENOMEM;
32774 diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
32775 index 5c95c789f302..e800e8e8bed5 100644
32776 --- a/drivers/infiniband/hw/cxgb4/resource.c
32777 +++ b/drivers/infiniband/hw/cxgb4/resource.c
32778 @@ -216,7 +216,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
32779                         goto out;
32780                 entry->qid = qid;
32781                 list_add_tail(&entry->entry, &uctx->cqids);
32782 -               for (i = qid; i & rdev->qpmask; i++) {
32783 +               for (i = qid + 1; i & rdev->qpmask; i++) {
32784                         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
32785                         if (!entry)
32786                                 goto out;
32787 diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
32788 index 0e83d4b61e46..2cf102b5abd4 100644
32789 --- a/drivers/infiniband/hw/hfi1/firmware.c
32790 +++ b/drivers/infiniband/hw/hfi1/firmware.c
32791 @@ -1916,6 +1916,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
32792                         dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
32793                                    __func__, (ptr -
32794                                    (u32 *)dd->platform_config.data));
32795 +                       ret = -EINVAL;
32796                         goto bail;
32797                 }
32798                 /* Jump the CRC DWORD */
32799 diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
32800 index f650cac9d424..d30c23b6527a 100644
32801 --- a/drivers/infiniband/hw/hfi1/ipoib.h
32802 +++ b/drivers/infiniband/hw/hfi1/ipoib.h
32803 @@ -52,8 +52,9 @@ union hfi1_ipoib_flow {
32804   * @producer_lock: producer sync lock
32805   * @consumer_lock: consumer sync lock
32806   */
32807 +struct ipoib_txreq;
32808  struct hfi1_ipoib_circ_buf {
32809 -       void **items;
32810 +       struct ipoib_txreq **items;
32811         unsigned long head;
32812         unsigned long tail;
32813         unsigned long max_items;
32814 diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
32815 index edd4eeac8dd1..cdc26ee3cf52 100644
32816 --- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
32817 +++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
32818 @@ -702,14 +702,14 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
32820         priv->tx_napis = kcalloc_node(dev->num_tx_queues,
32821                                       sizeof(struct napi_struct),
32822 -                                     GFP_ATOMIC,
32823 +                                     GFP_KERNEL,
32824                                       priv->dd->node);
32825         if (!priv->tx_napis)
32826                 goto free_txreq_cache;
32828         priv->txqs = kcalloc_node(dev->num_tx_queues,
32829                                   sizeof(struct hfi1_ipoib_txq),
32830 -                                 GFP_ATOMIC,
32831 +                                 GFP_KERNEL,
32832                                   priv->dd->node);
32833         if (!priv->txqs)
32834                 goto free_tx_napis;
32835 @@ -741,9 +741,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
32836                                              priv->dd->node);
32838                 txq->tx_ring.items =
32839 -                       vzalloc_node(array_size(tx_ring_size,
32840 -                                               sizeof(struct ipoib_txreq)),
32841 -                                    priv->dd->node);
32842 +                       kcalloc_node(tx_ring_size,
32843 +                                    sizeof(struct ipoib_txreq *),
32844 +                                    GFP_KERNEL, priv->dd->node);
32845                 if (!txq->tx_ring.items)
32846                         goto free_txqs;
32848 @@ -764,7 +764,7 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
32849                 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
32851                 netif_napi_del(txq->napi);
32852 -               vfree(txq->tx_ring.items);
32853 +               kfree(txq->tx_ring.items);
32854         }
32856         kfree(priv->txqs);
32857 @@ -817,7 +817,7 @@ void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
32858                 hfi1_ipoib_drain_tx_list(txq);
32859                 netif_napi_del(txq->napi);
32860                 (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
32861 -               vfree(txq->tx_ring.items);
32862 +               kfree(txq->tx_ring.items);
32863         }
32865         kfree(priv->txqs);
32866 diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
32867 index f3fb28e3d5d7..d213f65d4cdd 100644
32868 --- a/drivers/infiniband/hw/hfi1/mmu_rb.c
32869 +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
32870 @@ -89,7 +89,7 @@ int hfi1_mmu_rb_register(void *ops_arg,
32871         struct mmu_rb_handler *h;
32872         int ret;
32874 -       h = kmalloc(sizeof(*h), GFP_KERNEL);
32875 +       h = kzalloc(sizeof(*h), GFP_KERNEL);
32876         if (!h)
32877                 return -ENOMEM;
32879 diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
32880 index ce26f97b2ca2..ad3cee54140e 100644
32881 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
32882 +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
32883 @@ -5068,6 +5068,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
32884         qp_attr->cur_qp_state = qp_attr->qp_state;
32885         qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
32886         qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
32887 +       qp_attr->cap.max_inline_data = hr_qp->max_inline_data;
32889         if (!ibqp->uobject) {
32890                 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
32891 diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
32892 index 53e5cd1a2bd6..146a4148219b 100644
32893 --- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
32894 +++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
32895 @@ -393,12 +393,9 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
32896         i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
32897                     pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
32898         pble_rsrc->unallocated_pble -= (chunk->size >> 3);
32899 -       list_add(&chunk->list, &pble_rsrc->pinfo.clist);
32900         sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
32901                         sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
32902 -       if (sd_entry->valid)
32903 -               return 0;
32904 -       if (dev->is_pf) {
32905 +       if (dev->is_pf && !sd_entry->valid) {
32906                 ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
32907                                             sd_reg_val, idx->sd_idx,
32908                                             sd_entry->entry_type, true);
32909 @@ -409,6 +406,7 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
32910         }
32912         sd_entry->valid = true;
32913 +       list_add(&chunk->list, &pble_rsrc->pinfo.clist);
32914         return 0;
32915   error:
32916         kfree(chunk);
32917 diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
32918 index 25da0b05b4e2..f0af3f1ae039 100644
32919 --- a/drivers/infiniband/hw/mlx5/fs.c
32920 +++ b/drivers/infiniband/hw/mlx5/fs.c
32921 @@ -1528,8 +1528,8 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add(
32922                 dst_num++;
32923         }
32925 -       handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher,
32926 -                                       flow_context, flow_act,
32927 +       handler = _create_raw_flow_rule(dev, ft_prio, dst_num ? dst : NULL,
32928 +                                       fs_matcher, flow_context, flow_act,
32929                                         cmd_in, inlen, dst_num);
32931         if (IS_ERR(handler)) {
32932 @@ -1885,8 +1885,9 @@ static int get_dests(struct uverbs_attr_bundle *attrs,
32933                 else
32934                         *dest_id = mqp->raw_packet_qp.rq.tirn;
32935                 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
32936 -       } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
32937 -                  fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
32938 +       } else if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
32939 +                   fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) &&
32940 +                  !(*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)) {
32941                 *dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
32942         }
32944 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
32945 index 0d69a697d75f..4be7bccefaa4 100644
32946 --- a/drivers/infiniband/hw/mlx5/main.c
32947 +++ b/drivers/infiniband/hw/mlx5/main.c
32948 @@ -499,7 +499,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
32949         translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
32950                                  &props->active_width, ext);
32952 -       if (!dev->is_rep && mlx5_is_roce_enabled(mdev)) {
32953 +       if (!dev->is_rep && dev->mdev->roce.roce_en) {
32954                 u16 qkey_viol_cntr;
32956                 props->port_cap_flags |= IB_PORT_CM_SUP;
32957 @@ -4174,7 +4174,7 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
32959                 /* Register only for native ports */
32960                 err = mlx5_add_netdev_notifier(dev, port_num);
32961 -               if (err || dev->is_rep || !mlx5_is_roce_enabled(mdev))
32962 +               if (err || dev->is_rep || !mlx5_is_roce_init_enabled(mdev))
32963                         /*
32964                          * We don't enable ETH interface for
32965                          * 1. IB representors
32966 @@ -4711,7 +4711,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
32967         dev->mdev = mdev;
32968         dev->num_ports = num_ports;
32970 -       if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev))
32971 +       if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev))
32972                 profile = &raw_eth_profile;
32973         else
32974                 profile = &pf_profile;
32975 diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
32976 index 88cc26e008fc..b085c02b53d0 100644
32977 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
32978 +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
32979 @@ -547,11 +547,6 @@ static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
32980         return container_of(wr, struct mlx5_umr_wr, wr);
32983 -struct mlx5_shared_mr_info {
32984 -       int mr_id;
32985 -       struct ib_umem          *umem;
32988  enum mlx5_ib_cq_pr_flags {
32989         MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
32990  };
32991 @@ -654,47 +649,69 @@ struct mlx5_ib_dm {
32992         atomic64_add(value, &((mr)->odp_stats.counter_name))
32994  struct mlx5_ib_mr {
32995 -       struct ib_mr            ibmr;
32996 -       void                    *descs;
32997 -       dma_addr_t              desc_map;
32998 -       int                     ndescs;
32999 -       int                     data_length;
33000 -       int                     meta_ndescs;
33001 -       int                     meta_length;
33002 -       int                     max_descs;
33003 -       int                     desc_size;
33004 -       int                     access_mode;
33005 -       unsigned int            page_shift;
33006 -       struct mlx5_core_mkey   mmkey;
33007 -       struct ib_umem         *umem;
33008 -       struct mlx5_shared_mr_info      *smr_info;
33009 -       struct list_head        list;
33010 -       struct mlx5_cache_ent  *cache_ent;
33011 -       u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
33012 -       struct mlx5_core_sig_ctx    *sig;
33013 -       void                    *descs_alloc;
33014 -       int                     access_flags; /* Needed for rereg MR */
33016 -       struct mlx5_ib_mr      *parent;
33017 -       /* Needed for IB_MR_TYPE_INTEGRITY */
33018 -       struct mlx5_ib_mr      *pi_mr;
33019 -       struct mlx5_ib_mr      *klm_mr;
33020 -       struct mlx5_ib_mr      *mtt_mr;
33021 -       u64                     data_iova;
33022 -       u64                     pi_iova;
33024 -       /* For ODP and implicit */
33025 -       struct xarray           implicit_children;
33026 -       union {
33027 -               struct list_head elm;
33028 -               struct work_struct work;
33029 -       } odp_destroy;
33030 -       struct ib_odp_counters  odp_stats;
33031 -       bool                    is_odp_implicit;
33032 +       struct ib_mr ibmr;
33033 +       struct mlx5_core_mkey mmkey;
33035 -       struct mlx5_async_work  cb_work;
33036 +       /* User MR data */
33037 +       struct mlx5_cache_ent *cache_ent;
33038 +       struct ib_umem *umem;
33040 +       /* This is zero'd when the MR is allocated */
33041 +       struct {
33042 +               /* Used only while the MR is in the cache */
33043 +               struct {
33044 +                       u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
33045 +                       struct mlx5_async_work cb_work;
33046 +                       /* Cache list element */
33047 +                       struct list_head list;
33048 +               };
33050 +               /* Used only by kernel MRs (umem == NULL) */
33051 +               struct {
33052 +                       void *descs;
33053 +                       void *descs_alloc;
33054 +                       dma_addr_t desc_map;
33055 +                       int max_descs;
33056 +                       int ndescs;
33057 +                       int desc_size;
33058 +                       int access_mode;
33060 +                       /* For Kernel IB_MR_TYPE_INTEGRITY */
33061 +                       struct mlx5_core_sig_ctx *sig;
33062 +                       struct mlx5_ib_mr *pi_mr;
33063 +                       struct mlx5_ib_mr *klm_mr;
33064 +                       struct mlx5_ib_mr *mtt_mr;
33065 +                       u64 data_iova;
33066 +                       u64 pi_iova;
33067 +                       int meta_ndescs;
33068 +                       int meta_length;
33069 +                       int data_length;
33070 +               };
33072 +               /* Used only by User MRs (umem != NULL) */
33073 +               struct {
33074 +                       unsigned int page_shift;
33075 +                       /* Current access_flags */
33076 +                       int access_flags;
33078 +                       /* For User ODP */
33079 +                       struct mlx5_ib_mr *parent;
33080 +                       struct xarray implicit_children;
33081 +                       union {
33082 +                               struct work_struct work;
33083 +                       } odp_destroy;
33084 +                       struct ib_odp_counters odp_stats;
33085 +                       bool is_odp_implicit;
33086 +               };
33087 +       };
33088  };
33090 +/* Zero the fields in the mr that are variant depending on usage */
33091 +static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr)
33093 +       memset(mr->out, 0, sizeof(*mr) - offsetof(struct mlx5_ib_mr, out));
33096  static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
33098         return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
33099 diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
33100 index db05b0e0a8d7..ea8f068a6da3 100644
33101 --- a/drivers/infiniband/hw/mlx5/mr.c
33102 +++ b/drivers/infiniband/hw/mlx5/mr.c
33103 @@ -590,6 +590,8 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
33104                 ent->available_mrs--;
33105                 queue_adjust_cache_locked(ent);
33106                 spin_unlock_irq(&ent->lock);
33108 +               mlx5_clear_mr(mr);
33109         }
33110         mr->access_flags = access_flags;
33111         return mr;
33112 @@ -615,16 +617,14 @@ static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
33113                         ent->available_mrs--;
33114                         queue_adjust_cache_locked(ent);
33115                         spin_unlock_irq(&ent->lock);
33116 -                       break;
33117 +                       mlx5_clear_mr(mr);
33118 +                       return mr;
33119                 }
33120                 queue_adjust_cache_locked(ent);
33121                 spin_unlock_irq(&ent->lock);
33122         }
33124 -       if (!mr)
33125 -               req_ent->miss++;
33127 -       return mr;
33128 +       req_ent->miss++;
33129 +       return NULL;
33132  static void detach_mr_from_cache(struct mlx5_ib_mr *mr)
33133 @@ -993,8 +993,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
33135         mr->ibmr.pd = pd;
33136         mr->umem = umem;
33137 -       mr->access_flags = access_flags;
33138 -       mr->desc_size = sizeof(struct mlx5_mtt);
33139         mr->mmkey.iova = iova;
33140         mr->mmkey.size = umem->length;
33141         mr->mmkey.pd = to_mpd(pd)->pdn;
33142 diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
33143 index b103555b1f5d..d98755e78362 100644
33144 --- a/drivers/infiniband/hw/mlx5/odp.c
33145 +++ b/drivers/infiniband/hw/mlx5/odp.c
33146 @@ -227,7 +227,6 @@ static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
33148         dma_fence_odp_mr(mr);
33150 -       mr->parent = NULL;
33151         mlx5_mr_cache_free(mr_to_mdev(mr), mr);
33152         ib_umem_odp_release(odp);
33154 diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
33155 index f5a52a6fae43..843f9e7fe96f 100644
33156 --- a/drivers/infiniband/hw/mlx5/qp.c
33157 +++ b/drivers/infiniband/hw/mlx5/qp.c
33158 @@ -3146,6 +3146,19 @@ enum {
33159         MLX5_PATH_FLAG_COUNTER  = 1 << 2,
33160  };
33162 +static int mlx5_to_ib_rate_map(u8 rate)
33164 +       static const int rates[] = { IB_RATE_PORT_CURRENT, IB_RATE_56_GBPS,
33165 +                                    IB_RATE_25_GBPS,      IB_RATE_100_GBPS,
33166 +                                    IB_RATE_200_GBPS,     IB_RATE_50_GBPS,
33167 +                                    IB_RATE_400_GBPS };
33169 +       if (rate < ARRAY_SIZE(rates))
33170 +               return rates[rate];
33172 +       return rate - MLX5_STAT_RATE_OFFSET;
33175  static int ib_to_mlx5_rate_map(u8 rate)
33177         switch (rate) {
33178 @@ -4485,7 +4498,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
33179         rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid));
33181         static_rate = MLX5_GET(ads, path, stat_rate);
33182 -       rdma_ah_set_static_rate(ah_attr, static_rate ? static_rate - 5 : 0);
33183 +       rdma_ah_set_static_rate(ah_attr, mlx5_to_ib_rate_map(static_rate));
33184         if (MLX5_GET(ads, path, grh) ||
33185             ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
33186                 rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label),
33187 diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
33188 index c4bc58736e48..1715fbe0719d 100644
33189 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
33190 +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
33191 @@ -636,8 +636,10 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
33192         memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
33194         if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
33195 -                            &qp->iwarp_cm_flags))
33196 +                            &qp->iwarp_cm_flags)) {
33197 +               rc = -ENODEV;
33198                 goto err; /* QP already being destroyed */
33199 +       }
33201         rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
33202         if (rc) {
33203 diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
33204 index df0d173d6acb..da2e867a1ed9 100644
33205 --- a/drivers/infiniband/sw/rxe/rxe_av.c
33206 +++ b/drivers/infiniband/sw/rxe/rxe_av.c
33207 @@ -88,7 +88,7 @@ void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
33208                 type = RXE_NETWORK_TYPE_IPV4;
33209                 break;
33210         case RDMA_NETWORK_IPV6:
33211 -               type = RXE_NETWORK_TYPE_IPV4;
33212 +               type = RXE_NETWORK_TYPE_IPV6;
33213                 break;
33214         default:
33215                 /* not reached - checked in rxe_av_chk_attr */
33216 diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
33217 index 34a910cf0edb..61c17db70d65 100644
33218 --- a/drivers/infiniband/sw/siw/siw_mem.c
33219 +++ b/drivers/infiniband/sw/siw/siw_mem.c
33220 @@ -106,8 +106,6 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
33221         mem->perms = rights & IWARP_ACCESS_MASK;
33222         kref_init(&mem->ref);
33224 -       mr->mem = mem;
33226         get_random_bytes(&next, 4);
33227         next &= 0x00ffffff;
33229 @@ -116,6 +114,8 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
33230                 kfree(mem);
33231                 return -ENOMEM;
33232         }
33234 +       mr->mem = mem;
33235         /* Set the STag index part */
33236         mem->stag = id << 8;
33237         mr->base_mr.lkey = mr->base_mr.rkey = mem->stag;
33238 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
33239 index 7305ed8976c2..18266f07c58d 100644
33240 --- a/drivers/infiniband/ulp/isert/ib_isert.c
33241 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
33242 @@ -438,23 +438,23 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
33243         isert_init_conn(isert_conn);
33244         isert_conn->cm_id = cma_id;
33246 -       ret = isert_alloc_login_buf(isert_conn, cma_id->device);
33247 -       if (ret)
33248 -               goto out;
33250         device = isert_device_get(cma_id);
33251         if (IS_ERR(device)) {
33252                 ret = PTR_ERR(device);
33253 -               goto out_rsp_dma_map;
33254 +               goto out;
33255         }
33256         isert_conn->device = device;
33258 +       ret = isert_alloc_login_buf(isert_conn, cma_id->device);
33259 +       if (ret)
33260 +               goto out_conn_dev;
33262         isert_set_nego_params(isert_conn, &event->param.conn);
33264         isert_conn->qp = isert_create_qp(isert_conn, cma_id);
33265         if (IS_ERR(isert_conn->qp)) {
33266                 ret = PTR_ERR(isert_conn->qp);
33267 -               goto out_conn_dev;
33268 +               goto out_rsp_dma_map;
33269         }
33271         ret = isert_login_post_recv(isert_conn);
33272 @@ -473,10 +473,10 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
33274  out_destroy_qp:
33275         isert_destroy_qp(isert_conn);
33276 -out_conn_dev:
33277 -       isert_device_put(device);
33278  out_rsp_dma_map:
33279         isert_free_login_buf(isert_conn);
33280 +out_conn_dev:
33281 +       isert_device_put(device);
33282  out:
33283         kfree(isert_conn);
33284         rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
33285 diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
33286 index 6734329cca33..959ba0462ef0 100644
33287 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
33288 +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
33289 @@ -2784,8 +2784,8 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
33290         } while (!changed && old_state != RTRS_CLT_DEAD);
33292         if (likely(changed)) {
33293 -               rtrs_clt_destroy_sess_files(sess, sysfs_self);
33294                 rtrs_clt_remove_path_from_arr(sess);
33295 +               rtrs_clt_destroy_sess_files(sess, sysfs_self);
33296                 kobject_put(&sess->kobj);
33297         }
33299 diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
33300 index 6be60aa5ffe2..7f0420ad9057 100644
33301 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c
33302 +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
33303 @@ -2378,6 +2378,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
33304                 pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
33305                         dev_name(&sdev->device->dev), port_num);
33306                 mutex_unlock(&sport->mutex);
33307 +               ret = -EINVAL;
33308                 goto reject;
33309         }
33311 diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
33312 index d8fccf048bf4..30576a5f2f04 100644
33313 --- a/drivers/input/touchscreen/ili210x.c
33314 +++ b/drivers/input/touchscreen/ili210x.c
33315 @@ -87,7 +87,7 @@ static bool ili210x_touchdata_to_coords(const u8 *touchdata,
33316                                         unsigned int *x, unsigned int *y,
33317                                         unsigned int *z)
33319 -       if (touchdata[0] & BIT(finger))
33320 +       if (!(touchdata[0] & BIT(finger)))
33321                 return false;
33323         *x = get_unaligned_be16(touchdata + 1 + (finger * 4) + 0);
33324 diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
33325 index 321f5906e6ed..df7b19ff0a9e 100644
33326 --- a/drivers/iommu/amd/init.c
33327 +++ b/drivers/iommu/amd/init.c
33328 @@ -12,7 +12,6 @@
33329  #include <linux/acpi.h>
33330  #include <linux/list.h>
33331  #include <linux/bitmap.h>
33332 -#include <linux/delay.h>
33333  #include <linux/slab.h>
33334  #include <linux/syscore_ops.h>
33335  #include <linux/interrupt.h>
33336 @@ -257,8 +256,6 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
33337  static int amd_iommu_enable_interrupts(void);
33338  static int __init iommu_go_to_state(enum iommu_init_state state);
33339  static void init_device_table_dma(void);
33340 -static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
33341 -                               u8 fxn, u64 *value, bool is_write);
33343  static bool amd_iommu_pre_enabled = true;
33345 @@ -1717,53 +1714,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
33346         return 0;
33349 -static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
33350 +static void init_iommu_perf_ctr(struct amd_iommu *iommu)
33352 -       int retry;
33353 +       u64 val;
33354         struct pci_dev *pdev = iommu->dev;
33355 -       u64 val = 0xabcd, val2 = 0, save_reg, save_src;
33357         if (!iommu_feature(iommu, FEATURE_PC))
33358                 return;
33360         amd_iommu_pc_present = true;
33362 -       /* save the value to restore, if writable */
33363 -       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) ||
33364 -           iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false))
33365 -               goto pc_false;
33367 -       /*
33368 -        * Disable power gating by programing the performance counter
33369 -        * source to 20 (i.e. counts the reads and writes from/to IOMMU
33370 -        * Reserved Register [MMIO Offset 1FF8h] that are ignored.),
33371 -        * which never get incremented during this init phase.
33372 -        * (Note: The event is also deprecated.)
33373 -        */
33374 -       val = 20;
33375 -       if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true))
33376 -               goto pc_false;
33378 -       /* Check if the performance counters can be written to */
33379 -       val = 0xabcd;
33380 -       for (retry = 5; retry; retry--) {
33381 -               if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) ||
33382 -                   iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) ||
33383 -                   val2)
33384 -                       break;
33386 -               /* Wait about 20 msec for power gating to disable and retry. */
33387 -               msleep(20);
33388 -       }
33390 -       /* restore */
33391 -       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) ||
33392 -           iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true))
33393 -               goto pc_false;
33395 -       if (val != val2)
33396 -               goto pc_false;
33398         pci_info(pdev, "IOMMU performance counters supported\n");
33400         val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
33401 @@ -1771,11 +1731,6 @@ static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
33402         iommu->max_counters = (u8) ((val >> 7) & 0xf);
33404         return;
33406 -pc_false:
33407 -       pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
33408 -       amd_iommu_pc_present = false;
33409 -       return;
33412  static ssize_t amd_iommu_show_cap(struct device *dev,
33413 @@ -1837,7 +1792,7 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
33414          * IVHD and MMIO conflict.
33415          */
33416         if (features != iommu->features)
33417 -               pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
33418 +               pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
33419                         features, iommu->features);
33422 diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
33423 index 8594b4a83043..941ba5484731 100644
33424 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
33425 +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
33426 @@ -2305,6 +2305,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
33428         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
33430 +       if (!gather->pgsize)
33431 +               return;
33433         arm_smmu_tlb_inv_range_domain(gather->start,
33434                                       gather->end - gather->start + 1,
33435                                       gather->pgsize, true, smmu_domain);
33436 diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
33437 index f985817c967a..230b6f6b3901 100644
33438 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
33439 +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
33440 @@ -115,7 +115,7 @@
33441  #define GERROR_PRIQ_ABT_ERR            (1 << 3)
33442  #define GERROR_EVTQ_ABT_ERR            (1 << 2)
33443  #define GERROR_CMDQ_ERR                        (1 << 0)
33444 -#define GERROR_ERR_MASK                        0xfd
33445 +#define GERROR_ERR_MASK                        0x1fd
33447  #define ARM_SMMU_GERRORN               0x64
33449 diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
33450 index af765c813cc8..fdd095e1fa52 100644
33451 --- a/drivers/iommu/dma-iommu.c
33452 +++ b/drivers/iommu/dma-iommu.c
33453 @@ -52,6 +52,17 @@ struct iommu_dma_cookie {
33454  };
33456  static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
33457 +bool iommu_dma_forcedac __read_mostly;
33459 +static int __init iommu_dma_forcedac_setup(char *str)
33461 +       int ret = kstrtobool(str, &iommu_dma_forcedac);
33463 +       if (!ret && iommu_dma_forcedac)
33464 +               pr_info("Forcing DAC for PCI devices\n");
33465 +       return ret;
33467 +early_param("iommu.forcedac", iommu_dma_forcedac_setup);
33469  void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
33470                 struct iommu_domain *domain)
33471 @@ -444,7 +455,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
33472                 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
33474         /* Try to get PCI devices a SAC address */
33475 -       if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
33476 +       if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
33477                 iova = alloc_iova_fast(iovad, iova_len,
33478                                        DMA_BIT_MASK(32) >> shift, false);
33480 diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
33481 index ee0932307d64..7e551da6c1fb 100644
33482 --- a/drivers/iommu/intel/iommu.c
33483 +++ b/drivers/iommu/intel/iommu.c
33484 @@ -360,7 +360,6 @@ int intel_iommu_enabled = 0;
33485  EXPORT_SYMBOL_GPL(intel_iommu_enabled);
33487  static int dmar_map_gfx = 1;
33488 -static int dmar_forcedac;
33489  static int intel_iommu_strict;
33490  static int intel_iommu_superpage = 1;
33491  static int iommu_identity_mapping;
33492 @@ -451,8 +450,8 @@ static int __init intel_iommu_setup(char *str)
33493                         dmar_map_gfx = 0;
33494                         pr_info("Disable GFX device mapping\n");
33495                 } else if (!strncmp(str, "forcedac", 8)) {
33496 -                       pr_info("Forcing DAC for PCI devices\n");
33497 -                       dmar_forcedac = 1;
33498 +                       pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
33499 +                       iommu_dma_forcedac = true;
33500                 } else if (!strncmp(str, "strict", 6)) {
33501                         pr_info("Disable batched IOTLB flush\n");
33502                         intel_iommu_strict = 1;
33503 @@ -658,7 +657,14 @@ static int domain_update_iommu_snooping(struct intel_iommu *skip)
33504         rcu_read_lock();
33505         for_each_active_iommu(iommu, drhd) {
33506                 if (iommu != skip) {
33507 -                       if (!ecap_sc_support(iommu->ecap)) {
33508 +                       /*
33509 +                        * If the hardware is operating in the scalable mode,
33510 +                        * the snooping control is always supported since we
33511 +                        * always set PASID-table-entry.PGSNP bit if the domain
33512 +                        * is managed outside (UNMANAGED).
33513 +                        */
33514 +                       if (!sm_supported(iommu) &&
33515 +                           !ecap_sc_support(iommu->ecap)) {
33516                                 ret = 0;
33517                                 break;
33518                         }
33519 @@ -1340,6 +1346,11 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
33520                       readl, (sts & DMA_GSTS_RTPS), sts);
33522         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
33524 +       iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
33525 +       if (sm_supported(iommu))
33526 +               qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
33527 +       iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
33530  void iommu_flush_write_buffer(struct intel_iommu *iommu)
33531 @@ -2289,6 +2300,41 @@ static inline int hardware_largepage_caps(struct dmar_domain *domain,
33532         return level;
33536 + * Ensure that old small page tables are removed to make room for superpage(s).
33537 + * We're going to add new large pages, so make sure we don't remove their parent
33538 + * tables. The IOTLB/devTLBs should be flushed if any PDE/PTEs are cleared.
33539 + */
33540 +static void switch_to_super_page(struct dmar_domain *domain,
33541 +                                unsigned long start_pfn,
33542 +                                unsigned long end_pfn, int level)
33544 +       unsigned long lvl_pages = lvl_to_nr_pages(level);
33545 +       struct dma_pte *pte = NULL;
33546 +       int i;
33548 +       while (start_pfn <= end_pfn) {
33549 +               if (!pte)
33550 +                       pte = pfn_to_dma_pte(domain, start_pfn, &level);
33552 +               if (dma_pte_present(pte)) {
33553 +                       dma_pte_free_pagetable(domain, start_pfn,
33554 +                                              start_pfn + lvl_pages - 1,
33555 +                                              level + 1);
33557 +                       for_each_domain_iommu(i, domain)
33558 +                               iommu_flush_iotlb_psi(g_iommus[i], domain,
33559 +                                                     start_pfn, lvl_pages,
33560 +                                                     0, 0);
33561 +               }
33563 +               pte++;
33564 +               start_pfn += lvl_pages;
33565 +               if (first_pte_in_page(pte))
33566 +                       pte = NULL;
33567 +       }
33570  static int
33571  __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
33572                  unsigned long phys_pfn, unsigned long nr_pages, int prot)
33573 @@ -2305,8 +2351,9 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
33574                 return -EINVAL;
33576         attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
33577 +       attr |= DMA_FL_PTE_PRESENT;
33578         if (domain_use_first_level(domain)) {
33579 -               attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
33580 +               attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
33582                 if (domain->domain.type == IOMMU_DOMAIN_DMA) {
33583                         attr |= DMA_FL_PTE_ACCESS;
33584 @@ -2329,22 +2376,11 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
33585                                 return -ENOMEM;
33586                         /* It is large page*/
33587                         if (largepage_lvl > 1) {
33588 -                               unsigned long nr_superpages, end_pfn;
33589 +                               unsigned long end_pfn;
33591                                 pteval |= DMA_PTE_LARGE_PAGE;
33592 -                               lvl_pages = lvl_to_nr_pages(largepage_lvl);
33594 -                               nr_superpages = nr_pages / lvl_pages;
33595 -                               end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
33597 -                               /*
33598 -                                * Ensure that old small page tables are
33599 -                                * removed to make room for superpage(s).
33600 -                                * We're adding new large pages, so make sure
33601 -                                * we don't remove their parent tables.
33602 -                                */
33603 -                               dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
33604 -                                                      largepage_lvl + 1);
33605 +                               end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1;
33606 +                               switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
33607                         } else {
33608                                 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
33609                         }
33610 @@ -2422,6 +2458,10 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn
33611                                    (((u16)bus) << 8) | devfn,
33612                                    DMA_CCMD_MASK_NOBIT,
33613                                    DMA_CCMD_DEVICE_INVL);
33615 +       if (sm_supported(iommu))
33616 +               qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0);
33618         iommu->flush.flush_iotlb(iommu,
33619                                  did_old,
33620                                  0,
33621 @@ -2505,6 +2545,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
33623         flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
33625 +       if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
33626 +               flags |= PASID_FLAG_PAGE_SNOOP;
33628         return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
33629                                              domain->iommu_did[iommu->seq_id],
33630                                              flags);
33631 @@ -3267,8 +3310,6 @@ static int __init init_dmars(void)
33632                 register_pasid_allocator(iommu);
33633  #endif
33634                 iommu_set_root_entry(iommu);
33635 -               iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
33636 -               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
33637         }
33639  #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
33640 @@ -3458,12 +3499,7 @@ static int init_iommu_hw(void)
33641                 }
33643                 iommu_flush_write_buffer(iommu);
33645                 iommu_set_root_entry(iommu);
33647 -               iommu->flush.flush_context(iommu, 0, 0, 0,
33648 -                                          DMA_CCMD_GLOBAL_INVL);
33649 -               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
33650                 iommu_enable_translation(iommu);
33651                 iommu_disable_protect_mem_regions(iommu);
33652         }
33653 @@ -3846,8 +3882,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
33654                 goto disable_iommu;
33656         iommu_set_root_entry(iommu);
33657 -       iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
33658 -       iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
33659         iommu_enable_translation(iommu);
33661         iommu_disable_protect_mem_regions(iommu);
33662 diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
33663 index 611ef5243cb6..5c16ebe037a1 100644
33664 --- a/drivers/iommu/intel/irq_remapping.c
33665 +++ b/drivers/iommu/intel/irq_remapping.c
33666 @@ -736,7 +736,7 @@ static int __init intel_prepare_irq_remapping(void)
33667                 return -ENODEV;
33669         if (intel_cap_audit(CAP_AUDIT_STATIC_IRQR, NULL))
33670 -               goto error;
33671 +               return -ENODEV;
33673         if (!dmar_ir_support())
33674                 return -ENODEV;
33675 diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
33676 index f26cb6195b2c..5093d317ff1a 100644
33677 --- a/drivers/iommu/intel/pasid.c
33678 +++ b/drivers/iommu/intel/pasid.c
33679 @@ -411,6 +411,16 @@ static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
33680         pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
33684 + * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
33685 + * PASID entry.
33686 + */
33687 +static inline void
33688 +pasid_set_pgsnp(struct pasid_entry *pe)
33690 +       pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
33693  /*
33694   * Setup the First Level Page table Pointer field (Bit 140~191)
33695   * of a scalable mode PASID entry.
33696 @@ -565,6 +575,9 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
33697                 }
33698         }
33700 +       if (flags & PASID_FLAG_PAGE_SNOOP)
33701 +               pasid_set_pgsnp(pte);
33703         pasid_set_domain_id(pte, did);
33704         pasid_set_address_width(pte, iommu->agaw);
33705         pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
33706 @@ -643,6 +656,9 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
33707         pasid_set_fault_enable(pte);
33708         pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
33710 +       if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
33711 +               pasid_set_pgsnp(pte);
33713         /*
33714          * Since it is a second level only translation setup, we should
33715          * set SRE bit as well (addresses are expected to be GPAs).
33716 diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
33717 index 444c0bec221a..086ebd697319 100644
33718 --- a/drivers/iommu/intel/pasid.h
33719 +++ b/drivers/iommu/intel/pasid.h
33720 @@ -48,6 +48,7 @@
33721   */
33722  #define PASID_FLAG_SUPERVISOR_MODE     BIT(0)
33723  #define PASID_FLAG_NESTED              BIT(1)
33724 +#define PASID_FLAG_PAGE_SNOOP          BIT(2)
33726  /*
33727   * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
33728 diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
33729 index 574a7e657a9a..ecb6314fdd5c 100644
33730 --- a/drivers/iommu/intel/svm.c
33731 +++ b/drivers/iommu/intel/svm.c
33732 @@ -862,7 +862,7 @@ intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc)
33733         /* Fill in event data for device specific processing */
33734         memset(&event, 0, sizeof(struct iommu_fault_event));
33735         event.fault.type = IOMMU_FAULT_PAGE_REQ;
33736 -       event.fault.prm.addr = desc->addr;
33737 +       event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
33738         event.fault.prm.pasid = desc->pasid;
33739         event.fault.prm.grpid = desc->prg_index;
33740         event.fault.prm.perm = prq_to_iommu_prot(desc);
33741 @@ -920,7 +920,17 @@ static irqreturn_t prq_event_thread(int irq, void *d)
33742                                ((unsigned long long *)req)[1]);
33743                         goto no_pasid;
33744                 }
33746 +               /* We shall not receive page request for supervisor SVM */
33747 +               if (req->pm_req && (req->rd_req | req->wr_req)) {
33748 +                       pr_err("Unexpected page request in Privilege Mode");
33749 +                       /* No need to find the matching sdev as for bad_req */
33750 +                       goto no_pasid;
33751 +               }
33752 +               /* DMA read with exec requeset is not supported. */
33753 +               if (req->exe_req && req->rd_req) {
33754 +                       pr_err("Execution request not supported\n");
33755 +                       goto no_pasid;
33756 +               }
33757                 if (!svm || svm->pasid != req->pasid) {
33758                         rcu_read_lock();
33759                         svm = ioasid_find(NULL, req->pasid, NULL);
33760 @@ -1021,12 +1031,12 @@ static irqreturn_t prq_event_thread(int irq, void *d)
33761                                 QI_PGRP_RESP_TYPE;
33762                         resp.qw1 = QI_PGRP_IDX(req->prg_index) |
33763                                 QI_PGRP_LPIG(req->lpig);
33764 +                       resp.qw2 = 0;
33765 +                       resp.qw3 = 0;
33767                         if (req->priv_data_present)
33768                                 memcpy(&resp.qw2, req->priv_data,
33769                                        sizeof(req->priv_data));
33770 -                       resp.qw2 = 0;
33771 -                       resp.qw3 = 0;
33772                         qi_submit_sync(iommu, &resp, 1, 0);
33773                 }
33774  prq_advance:
33775 diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
33776 index d0b0a15dba84..e10cfa99057c 100644
33777 --- a/drivers/iommu/iommu.c
33778 +++ b/drivers/iommu/iommu.c
33779 @@ -2878,10 +2878,12 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
33780   */
33781  int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
33783 -       const struct iommu_ops *ops = dev->bus->iommu_ops;
33784 +       if (dev->iommu && dev->iommu->iommu_dev) {
33785 +               const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
33787 -       if (ops && ops->dev_enable_feat)
33788 -               return ops->dev_enable_feat(dev, feat);
33789 +               if (ops->dev_enable_feat)
33790 +                       return ops->dev_enable_feat(dev, feat);
33791 +       }
33793         return -ENODEV;
33795 @@ -2894,10 +2896,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
33796   */
33797  int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
33799 -       const struct iommu_ops *ops = dev->bus->iommu_ops;
33800 +       if (dev->iommu && dev->iommu->iommu_dev) {
33801 +               const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
33803 -       if (ops && ops->dev_disable_feat)
33804 -               return ops->dev_disable_feat(dev, feat);
33805 +               if (ops->dev_disable_feat)
33806 +                       return ops->dev_disable_feat(dev, feat);
33807 +       }
33809         return -EBUSY;
33811 @@ -2905,10 +2909,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
33813  bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
33815 -       const struct iommu_ops *ops = dev->bus->iommu_ops;
33816 +       if (dev->iommu && dev->iommu->iommu_dev) {
33817 +               const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
33819 -       if (ops && ops->dev_feat_enabled)
33820 -               return ops->dev_feat_enabled(dev, feat);
33821 +               if (ops->dev_feat_enabled)
33822 +                       return ops->dev_feat_enabled(dev, feat);
33823 +       }
33825         return false;
33827 diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
33828 index 6ecc007f07cd..e168a682806a 100644
33829 --- a/drivers/iommu/mtk_iommu.c
33830 +++ b/drivers/iommu/mtk_iommu.c
33831 @@ -688,13 +688,6 @@ static const struct iommu_ops mtk_iommu_ops = {
33832  static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
33834         u32 regval;
33835 -       int ret;
33837 -       ret = clk_prepare_enable(data->bclk);
33838 -       if (ret) {
33839 -               dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
33840 -               return ret;
33841 -       }
33843         if (data->plat_data->m4u_plat == M4U_MT8173) {
33844                 regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
33845 @@ -760,7 +753,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
33846         if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
33847                              dev_name(data->dev), (void *)data)) {
33848                 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
33849 -               clk_disable_unprepare(data->bclk);
33850                 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
33851                 return -ENODEV;
33852         }
33853 @@ -977,14 +969,19 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
33854         void __iomem *base = data->base;
33855         int ret;
33857 -       /* Avoid first resume to affect the default value of registers below. */
33858 -       if (!m4u_dom)
33859 -               return 0;
33860         ret = clk_prepare_enable(data->bclk);
33861         if (ret) {
33862                 dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
33863                 return ret;
33864         }
33866 +       /*
33867 +        * Uppon first resume, only enable the clk and return, since the values of the
33868 +        * registers are not yet set.
33869 +        */
33870 +       if (!m4u_dom)
33871 +               return 0;
33873         writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
33874         writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
33875         writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
33876 diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
33877 index 563a9b366294..e81e89a81cb5 100644
33878 --- a/drivers/irqchip/irq-gic-v3-mbi.c
33879 +++ b/drivers/irqchip/irq-gic-v3-mbi.c
33880 @@ -303,7 +303,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
33881         reg = of_get_property(np, "mbi-alias", NULL);
33882         if (reg) {
33883                 mbi_phys_base = of_translate_address(np, reg);
33884 -               if (mbi_phys_base == OF_BAD_ADDR) {
33885 +               if (mbi_phys_base == (phys_addr_t)OF_BAD_ADDR) {
33886                         ret = -ENXIO;
33887                         goto err_free_mbi;
33888                 }
33889 diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
33890 index eb0ee356a629..00404024d7cd 100644
33891 --- a/drivers/irqchip/irq-gic-v3.c
33892 +++ b/drivers/irqchip/irq-gic-v3.c
33893 @@ -648,6 +648,10 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
33895         irqnr = gic_read_iar();
33897 +       /* Check for special IDs first */
33898 +       if ((irqnr >= 1020 && irqnr <= 1023))
33899 +               return;
33901         if (gic_supports_nmi() &&
33902             unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
33903                 gic_handle_nmi(irqnr, regs);
33904 @@ -659,10 +663,6 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
33905                 gic_arch_enable_irqs();
33906         }
33908 -       /* Check for special IDs first */
33909 -       if ((irqnr >= 1020 && irqnr <= 1023))
33910 -               return;
33912         if (static_branch_likely(&supports_deactivate_key))
33913                 gic_write_eoir(irqnr);
33914         else
33915 diff --git a/drivers/leds/blink/Kconfig b/drivers/leds/blink/Kconfig
33916 index 265b53476a80..6dedc58c47b3 100644
33917 --- a/drivers/leds/blink/Kconfig
33918 +++ b/drivers/leds/blink/Kconfig
33919 @@ -9,6 +9,7 @@ if LEDS_BLINK
33921  config LEDS_BLINK_LGM
33922         tristate "LED support for Intel LGM SoC series"
33923 +       depends on GPIOLIB
33924         depends on LEDS_CLASS
33925         depends on MFD_SYSCON
33926         depends on OF
33927 diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
33928 index 4c325301a2fe..94d9067dc8d0 100644
33929 --- a/drivers/mailbox/sprd-mailbox.c
33930 +++ b/drivers/mailbox/sprd-mailbox.c
33931 @@ -60,6 +60,8 @@ struct sprd_mbox_priv {
33932         struct clk              *clk;
33933         u32                     outbox_fifo_depth;
33935 +       struct mutex            lock;
33936 +       u32                     refcnt;
33937         struct mbox_chan        chan[SPRD_MBOX_CHAN_MAX];
33938  };
33940 @@ -115,7 +117,11 @@ static irqreturn_t sprd_mbox_outbox_isr(int irq, void *data)
33941                 id = readl(priv->outbox_base + SPRD_MBOX_ID);
33943                 chan = &priv->chan[id];
33944 -               mbox_chan_received_data(chan, (void *)msg);
33945 +               if (chan->cl)
33946 +                       mbox_chan_received_data(chan, (void *)msg);
33947 +               else
33948 +                       dev_warn_ratelimited(priv->dev,
33949 +                                   "message's been dropped at ch[%d]\n", id);
33951                 /* Trigger to update outbox FIFO pointer */
33952                 writel(0x1, priv->outbox_base + SPRD_MBOX_TRIGGER);
33953 @@ -215,18 +221,22 @@ static int sprd_mbox_startup(struct mbox_chan *chan)
33954         struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
33955         u32 val;
33957 -       /* Select outbox FIFO mode and reset the outbox FIFO status */
33958 -       writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
33959 +       mutex_lock(&priv->lock);
33960 +       if (priv->refcnt++ == 0) {
33961 +               /* Select outbox FIFO mode and reset the outbox FIFO status */
33962 +               writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
33964 -       /* Enable inbox FIFO overflow and delivery interrupt */
33965 -       val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
33966 -       val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
33967 -       writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
33968 +               /* Enable inbox FIFO overflow and delivery interrupt */
33969 +               val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
33970 +               val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
33971 +               writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
33973 -       /* Enable outbox FIFO not empty interrupt */
33974 -       val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
33975 -       val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
33976 -       writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
33977 +               /* Enable outbox FIFO not empty interrupt */
33978 +               val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
33979 +               val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
33980 +               writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
33981 +       }
33982 +       mutex_unlock(&priv->lock);
33984         return 0;
33986 @@ -235,9 +245,13 @@ static void sprd_mbox_shutdown(struct mbox_chan *chan)
33988         struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
33990 -       /* Disable inbox & outbox interrupt */
33991 -       writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
33992 -       writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
33993 +       mutex_lock(&priv->lock);
33994 +       if (--priv->refcnt == 0) {
33995 +               /* Disable inbox & outbox interrupt */
33996 +               writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
33997 +               writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
33998 +       }
33999 +       mutex_unlock(&priv->lock);
34002  static const struct mbox_chan_ops sprd_mbox_ops = {
34003 @@ -266,6 +280,7 @@ static int sprd_mbox_probe(struct platform_device *pdev)
34004                 return -ENOMEM;
34006         priv->dev = dev;
34007 +       mutex_init(&priv->lock);
34009         /*
34010          * The Spreadtrum mailbox uses an inbox to send messages to the target
34011 diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
34012 index 82d4e0880a99..4fb635c0baa0 100644
34013 --- a/drivers/md/bcache/writeback.c
34014 +++ b/drivers/md/bcache/writeback.c
34015 @@ -110,13 +110,13 @@ static void __update_writeback_rate(struct cached_dev *dc)
34016                 int64_t fps;
34018                 if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID) {
34019 -                       fp_term = dc->writeback_rate_fp_term_low *
34020 +                       fp_term = (int64_t)dc->writeback_rate_fp_term_low *
34021                         (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW);
34022                 } else if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH) {
34023 -                       fp_term = dc->writeback_rate_fp_term_mid *
34024 +                       fp_term = (int64_t)dc->writeback_rate_fp_term_mid *
34025                         (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID);
34026                 } else {
34027 -                       fp_term = dc->writeback_rate_fp_term_high *
34028 +                       fp_term = (int64_t)dc->writeback_rate_fp_term_high *
34029                         (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH);
34030                 }
34031                 fps = div_s64(dirty, dirty_buckets) * fp_term;
34032 diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
34033 index 46b5d542b8fe..362c887d33b3 100644
34034 --- a/drivers/md/dm-integrity.c
34035 +++ b/drivers/md/dm-integrity.c
34036 @@ -4039,6 +4039,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
34037                         if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
34038                                 r = -EINVAL;
34039                                 ti->error = "Invalid bitmap_flush_interval argument";
34040 +                               goto bad;
34041                         }
34042                         ic->bitmap_flush_interval = msecs_to_jiffies(val);
34043                 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
34044 diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
34045 index cab12b2251ba..91461b6904c1 100644
34046 --- a/drivers/md/dm-raid.c
34047 +++ b/drivers/md/dm-raid.c
34048 @@ -1868,6 +1868,14 @@ static bool rs_takeover_requested(struct raid_set *rs)
34049         return rs->md.new_level != rs->md.level;
34052 +/* True if layout is set to reshape. */
34053 +static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev)
34055 +       return (use_mddev ? rs->md.delta_disks : rs->delta_disks) ||
34056 +              rs->md.new_layout != rs->md.layout ||
34057 +              rs->md.new_chunk_sectors != rs->md.chunk_sectors;
34060  /* True if @rs is requested to reshape by ctr */
34061  static bool rs_reshape_requested(struct raid_set *rs)
34063 @@ -1880,9 +1888,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
34064         if (rs_is_raid0(rs))
34065                 return false;
34067 -       change = mddev->new_layout != mddev->layout ||
34068 -                mddev->new_chunk_sectors != mddev->chunk_sectors ||
34069 -                rs->delta_disks;
34070 +       change = rs_is_layout_change(rs, false);
34072         /* Historical case to support raid1 reshape without delta disks */
34073         if (rs_is_raid1(rs)) {
34074 @@ -2817,7 +2823,7 @@ static sector_t _get_reshape_sectors(struct raid_set *rs)
34077  /*
34078 - *
34079 + * Reshape:
34080   * - change raid layout
34081   * - change chunk size
34082   * - add disks
34083 @@ -2926,6 +2932,20 @@ static int rs_setup_reshape(struct raid_set *rs)
34084         return r;
34088 + * If the md resync thread has updated superblock with max reshape position
34089 + * at the end of a reshape but not (yet) reset the layout configuration
34090 + * changes -> reset the latter.
34091 + */
34092 +static void rs_reset_inconclusive_reshape(struct raid_set *rs)
34094 +       if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) {
34095 +               rs_set_cur(rs);
34096 +               rs->md.delta_disks = 0;
34097 +               rs->md.reshape_backwards = 0;
34098 +       }
34101  /*
34102   * Enable/disable discard support on RAID set depending on
34103   * RAID level and discard properties of underlying RAID members.
34104 @@ -3212,11 +3232,14 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
34105         if (r)
34106                 goto bad;
34108 +       /* Catch any inconclusive reshape superblock content. */
34109 +       rs_reset_inconclusive_reshape(rs);
34111         /* Start raid set read-only and assumed clean to change in raid_resume() */
34112         rs->md.ro = 1;
34113         rs->md.in_sync = 1;
34115 -       /* Keep array frozen */
34116 +       /* Keep array frozen until resume. */
34117         set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
34119         /* Has to be held on running the array */
34120 @@ -3230,7 +3253,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
34121         }
34123         r = md_start(&rs->md);
34125         if (r) {
34126                 ti->error = "Failed to start raid array";
34127                 mddev_unlock(&rs->md);
34128 diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
34129 index 13b4385f4d5a..9c3bc3711b33 100644
34130 --- a/drivers/md/dm-rq.c
34131 +++ b/drivers/md/dm-rq.c
34132 @@ -569,6 +569,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
34133         blk_mq_free_tag_set(md->tag_set);
34134  out_kfree_tag_set:
34135         kfree(md->tag_set);
34136 +       md->tag_set = NULL;
34138         return err;
34140 @@ -578,6 +579,7 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
34141         if (md->tag_set) {
34142                 blk_mq_free_tag_set(md->tag_set);
34143                 kfree(md->tag_set);
34144 +               md->tag_set = NULL;
34145         }
34148 diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
34149 index 200c5d0f08bf..ea3130e11680 100644
34150 --- a/drivers/md/md-bitmap.c
34151 +++ b/drivers/md/md-bitmap.c
34152 @@ -1722,6 +1722,8 @@ void md_bitmap_flush(struct mddev *mddev)
34153         md_bitmap_daemon_work(mddev);
34154         bitmap->daemon_lastrun -= sleep;
34155         md_bitmap_daemon_work(mddev);
34156 +       if (mddev->bitmap_info.external)
34157 +               md_super_wait(mddev);
34158         md_bitmap_update_sb(bitmap);
34161 diff --git a/drivers/md/md.c b/drivers/md/md.c
34162 index 21da0c48f6c2..2a9553efc2d1 100644
34163 --- a/drivers/md/md.c
34164 +++ b/drivers/md/md.c
34165 @@ -734,7 +734,34 @@ void mddev_init(struct mddev *mddev)
34167  EXPORT_SYMBOL_GPL(mddev_init);
34169 +static struct mddev *mddev_find_locked(dev_t unit)
34171 +       struct mddev *mddev;
34173 +       list_for_each_entry(mddev, &all_mddevs, all_mddevs)
34174 +               if (mddev->unit == unit)
34175 +                       return mddev;
34177 +       return NULL;
34180  static struct mddev *mddev_find(dev_t unit)
34182 +       struct mddev *mddev;
34184 +       if (MAJOR(unit) != MD_MAJOR)
34185 +               unit &= ~((1 << MdpMinorShift) - 1);
34187 +       spin_lock(&all_mddevs_lock);
34188 +       mddev = mddev_find_locked(unit);
34189 +       if (mddev)
34190 +               mddev_get(mddev);
34191 +       spin_unlock(&all_mddevs_lock);
34193 +       return mddev;
34196 +static struct mddev *mddev_find_or_alloc(dev_t unit)
34198         struct mddev *mddev, *new = NULL;
34200 @@ -745,13 +772,13 @@ static struct mddev *mddev_find(dev_t unit)
34201         spin_lock(&all_mddevs_lock);
34203         if (unit) {
34204 -               list_for_each_entry(mddev, &all_mddevs, all_mddevs)
34205 -                       if (mddev->unit == unit) {
34206 -                               mddev_get(mddev);
34207 -                               spin_unlock(&all_mddevs_lock);
34208 -                               kfree(new);
34209 -                               return mddev;
34210 -                       }
34211 +               mddev = mddev_find_locked(unit);
34212 +               if (mddev) {
34213 +                       mddev_get(mddev);
34214 +                       spin_unlock(&all_mddevs_lock);
34215 +                       kfree(new);
34216 +                       return mddev;
34217 +               }
34219                 if (new) {
34220                         list_add(&new->all_mddevs, &all_mddevs);
34221 @@ -777,12 +804,7 @@ static struct mddev *mddev_find(dev_t unit)
34222                                 return NULL;
34223                         }
34225 -                       is_free = 1;
34226 -                       list_for_each_entry(mddev, &all_mddevs, all_mddevs)
34227 -                               if (mddev->unit == dev) {
34228 -                                       is_free = 0;
34229 -                                       break;
34230 -                               }
34231 +                       is_free = !mddev_find_locked(dev);
34232                 }
34233                 new->unit = dev;
34234                 new->md_minor = MINOR(dev);
34235 @@ -5644,7 +5666,7 @@ static int md_alloc(dev_t dev, char *name)
34236          * writing to /sys/module/md_mod/parameters/new_array.
34237          */
34238         static DEFINE_MUTEX(disks_mutex);
34239 -       struct mddev *mddev = mddev_find(dev);
34240 +       struct mddev *mddev = mddev_find_or_alloc(dev);
34241         struct gendisk *disk;
34242         int partitioned;
34243         int shift;
34244 @@ -6524,11 +6546,9 @@ static void autorun_devices(int part)
34246                 md_probe(dev);
34247                 mddev = mddev_find(dev);
34248 -               if (!mddev || !mddev->gendisk) {
34249 -                       if (mddev)
34250 -                               mddev_put(mddev);
34251 +               if (!mddev)
34252                         break;
34253 -               }
34255                 if (mddev_lock(mddev))
34256                         pr_warn("md: %s locked, cannot run\n", mdname(mddev));
34257                 else if (mddev->raid_disks || mddev->major_version
34258 @@ -7821,8 +7841,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
34259                 /* Wait until bdev->bd_disk is definitely gone */
34260                 if (work_pending(&mddev->del_work))
34261                         flush_workqueue(md_misc_wq);
34262 -               /* Then retry the open from the top */
34263 -               return -ERESTARTSYS;
34264 +               return -EBUSY;
34265         }
34266         BUG_ON(mddev != bdev->bd_disk->private_data);
34268 @@ -8153,7 +8172,11 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
34269         loff_t l = *pos;
34270         struct mddev *mddev;
34272 -       if (l >= 0x10000)
34273 +       if (l == 0x10000) {
34274 +               ++*pos;
34275 +               return (void *)2;
34276 +       }
34277 +       if (l > 0x10000)
34278                 return NULL;
34279         if (!l--)
34280                 /* header */
34281 @@ -9251,11 +9274,11 @@ void md_check_recovery(struct mddev *mddev)
34282                 }
34284                 if (mddev_is_clustered(mddev)) {
34285 -                       struct md_rdev *rdev;
34286 +                       struct md_rdev *rdev, *tmp;
34287                         /* kick the device if another node issued a
34288                          * remove disk.
34289                          */
34290 -                       rdev_for_each(rdev, mddev) {
34291 +                       rdev_for_each_safe(rdev, tmp, mddev) {
34292                                 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
34293                                                 rdev->raid_disk < 0)
34294                                         md_kick_rdev_from_array(rdev);
34295 @@ -9569,7 +9592,7 @@ static int __init md_init(void)
34296  static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
34298         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
34299 -       struct md_rdev *rdev2;
34300 +       struct md_rdev *rdev2, *tmp;
34301         int role, ret;
34302         char b[BDEVNAME_SIZE];
34304 @@ -9586,7 +9609,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
34305         }
34307         /* Check for change of roles in the active devices */
34308 -       rdev_for_each(rdev2, mddev) {
34309 +       rdev_for_each_safe(rdev2, tmp, mddev) {
34310                 if (test_bit(Faulty, &rdev2->flags))
34311                         continue;
34313 diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
34314 index fe073d92f01e..70cfdea27efd 100644
34315 --- a/drivers/md/persistent-data/dm-btree-internal.h
34316 +++ b/drivers/md/persistent-data/dm-btree-internal.h
34317 @@ -34,12 +34,12 @@ struct node_header {
34318         __le32 max_entries;
34319         __le32 value_size;
34320         __le32 padding;
34321 -} __packed;
34322 +} __attribute__((packed, aligned(8)));
34324  struct btree_node {
34325         struct node_header header;
34326         __le64 keys[];
34327 -} __packed;
34328 +} __attribute__((packed, aligned(8)));
34331  /*
34332 diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
34333 index d8b4125e338c..a213bf11738f 100644
34334 --- a/drivers/md/persistent-data/dm-space-map-common.c
34335 +++ b/drivers/md/persistent-data/dm-space-map-common.c
34336 @@ -339,6 +339,8 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
34337          */
34338         begin = do_div(index_begin, ll->entries_per_block);
34339         end = do_div(end, ll->entries_per_block);
34340 +       if (end == 0)
34341 +               end = ll->entries_per_block;
34343         for (i = index_begin; i < index_end; i++, begin = 0) {
34344                 struct dm_block *blk;
34345 diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
34346 index 8de63ce39bdd..87e17909ef52 100644
34347 --- a/drivers/md/persistent-data/dm-space-map-common.h
34348 +++ b/drivers/md/persistent-data/dm-space-map-common.h
34349 @@ -33,7 +33,7 @@ struct disk_index_entry {
34350         __le64 blocknr;
34351         __le32 nr_free;
34352         __le32 none_free_before;
34353 -} __packed;
34354 +} __attribute__ ((packed, aligned(8)));
34357  #define MAX_METADATA_BITMAPS 255
34358 @@ -43,7 +43,7 @@ struct disk_metadata_index {
34359         __le64 blocknr;
34361         struct disk_index_entry index[MAX_METADATA_BITMAPS];
34362 -} __packed;
34363 +} __attribute__ ((packed, aligned(8)));
34365  struct ll_disk;
34367 @@ -86,7 +86,7 @@ struct disk_sm_root {
34368         __le64 nr_allocated;
34369         __le64 bitmap_root;
34370         __le64 ref_count_root;
34371 -} __packed;
34372 +} __attribute__ ((packed, aligned(8)));
34374  #define ENTRIES_PER_BYTE 4
34376 @@ -94,7 +94,7 @@ struct disk_bitmap_header {
34377         __le32 csum;
34378         __le32 not_used;
34379         __le64 blocknr;
34380 -} __packed;
34381 +} __attribute__ ((packed, aligned(8)));
34383  enum allocation_event {
34384         SM_NONE,
34385 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
34386 index d2378765dc15..ced076ba560e 100644
34387 --- a/drivers/md/raid1.c
34388 +++ b/drivers/md/raid1.c
34389 @@ -478,6 +478,8 @@ static void raid1_end_write_request(struct bio *bio)
34390                 if (!test_bit(Faulty, &rdev->flags))
34391                         set_bit(R1BIO_WriteError, &r1_bio->state);
34392                 else {
34393 +                       /* Fail the request */
34394 +                       set_bit(R1BIO_Degraded, &r1_bio->state);
34395                         /* Finished with this branch */
34396                         r1_bio->bios[mirror] = NULL;
34397                         to_put = bio;
34398 diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c
34399 index f2d13b71416c..e50fa0ff7c5d 100644
34400 --- a/drivers/media/common/saa7146/saa7146_core.c
34401 +++ b/drivers/media/common/saa7146/saa7146_core.c
34402 @@ -253,7 +253,7 @@ int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt
34403                          i, sg_dma_address(list), sg_dma_len(list),
34404                          list->offset);
34405  */
34406 -               for (p = 0; p * 4096 < list->length; p++, ptr++) {
34407 +               for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr++) {
34408                         *ptr = cpu_to_le32(sg_dma_address(list) + p * 4096);
34409                         nr_pages++;
34410                 }
34411 diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
34412 index 7b8795eca589..66215d9106a4 100644
34413 --- a/drivers/media/common/saa7146/saa7146_video.c
34414 +++ b/drivers/media/common/saa7146/saa7146_video.c
34415 @@ -247,9 +247,8 @@ static int saa7146_pgtable_build(struct saa7146_dev *dev, struct saa7146_buf *bu
34417                 /* walk all pages, copy all page addresses to ptr1 */
34418                 for (i = 0; i < length; i++, list++) {
34419 -                       for (p = 0; p * 4096 < list->length; p++, ptr1++) {
34420 +                       for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr1++)
34421                                 *ptr1 = cpu_to_le32(sg_dma_address(list) - list->offset);
34422 -                       }
34423                 }
34424  /*
34425                 ptr1 = pt1->cpu;
34426 diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
34427 index 5ff7bedee247..3862ddc86ec4 100644
34428 --- a/drivers/media/dvb-core/dvbdev.c
34429 +++ b/drivers/media/dvb-core/dvbdev.c
34430 @@ -241,6 +241,7 @@ static void dvb_media_device_free(struct dvb_device *dvbdev)
34432         if (dvbdev->adapter->conn) {
34433                 media_device_unregister_entity(dvbdev->adapter->conn);
34434 +               kfree(dvbdev->adapter->conn);
34435                 dvbdev->adapter->conn = NULL;
34436                 kfree(dvbdev->adapter->conn_pads);
34437                 dvbdev->adapter->conn_pads = NULL;
34438 diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
34439 index cfa4cdde99d8..02e8aa11e36e 100644
34440 --- a/drivers/media/dvb-frontends/m88ds3103.c
34441 +++ b/drivers/media/dvb-frontends/m88ds3103.c
34442 @@ -1904,8 +1904,8 @@ static int m88ds3103_probe(struct i2c_client *client,
34444                 dev->dt_client = i2c_new_dummy_device(client->adapter,
34445                                                       dev->dt_addr);
34446 -               if (!dev->dt_client) {
34447 -                       ret = -ENODEV;
34448 +               if (IS_ERR(dev->dt_client)) {
34449 +                       ret = PTR_ERR(dev->dt_client);
34450                         goto err_kfree;
34451                 }
34452         }
34453 diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
34454 index a3161d709015..ab7883cff8b2 100644
34455 --- a/drivers/media/i2c/adv7511-v4l2.c
34456 +++ b/drivers/media/i2c/adv7511-v4l2.c
34457 @@ -1964,7 +1964,7 @@ static int adv7511_remove(struct i2c_client *client)
34459         adv7511_set_isr(sd, false);
34460         adv7511_init_setup(sd);
34461 -       cancel_delayed_work(&state->edid_handler);
34462 +       cancel_delayed_work_sync(&state->edid_handler);
34463         i2c_unregister_device(state->i2c_edid);
34464         i2c_unregister_device(state->i2c_cec);
34465         i2c_unregister_device(state->i2c_pktmem);
34466 diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
34467 index 09004d928d11..d1f58795794f 100644
34468 --- a/drivers/media/i2c/adv7604.c
34469 +++ b/drivers/media/i2c/adv7604.c
34470 @@ -3616,7 +3616,7 @@ static int adv76xx_remove(struct i2c_client *client)
34471         io_write(sd, 0x6e, 0);
34472         io_write(sd, 0x73, 0);
34474 -       cancel_delayed_work(&state->delayed_work_enable_hotplug);
34475 +       cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
34476         v4l2_async_unregister_subdev(sd);
34477         media_entity_cleanup(&sd->entity);
34478         adv76xx_unregister_clients(to_state(sd));
34479 diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
34480 index 0855f648416d..f7d2b6cd3008 100644
34481 --- a/drivers/media/i2c/adv7842.c
34482 +++ b/drivers/media/i2c/adv7842.c
34483 @@ -3586,7 +3586,7 @@ static int adv7842_remove(struct i2c_client *client)
34484         struct adv7842_state *state = to_state(sd);
34486         adv7842_irq_enable(sd, false);
34487 -       cancel_delayed_work(&state->delayed_work_enable_hotplug);
34488 +       cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
34489         v4l2_device_unregister_subdev(sd);
34490         media_entity_cleanup(&sd->entity);
34491         adv7842_unregister_clients(sd);
34492 diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
34493 index 15afbb4f5b31..4505594996bd 100644
34494 --- a/drivers/media/i2c/ccs/ccs-core.c
34495 +++ b/drivers/media/i2c/ccs/ccs-core.c
34496 @@ -3522,11 +3522,11 @@ static int ccs_probe(struct i2c_client *client)
34497         sensor->pll.scale_n = CCS_LIM(sensor, SCALER_N_MIN);
34499         ccs_create_subdev(sensor, sensor->scaler, " scaler", 2,
34500 -                         MEDIA_ENT_F_CAM_SENSOR);
34501 +                         MEDIA_ENT_F_PROC_VIDEO_SCALER);
34502         ccs_create_subdev(sensor, sensor->binner, " binner", 2,
34503                           MEDIA_ENT_F_PROC_VIDEO_SCALER);
34504         ccs_create_subdev(sensor, sensor->pixel_array, " pixel_array", 1,
34505 -                         MEDIA_ENT_F_PROC_VIDEO_SCALER);
34506 +                         MEDIA_ENT_F_CAM_SENSOR);
34508         rval = ccs_init_controls(sensor);
34509         if (rval < 0)
34510 diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
34511 index 6e3382b85a90..49ba39418360 100644
34512 --- a/drivers/media/i2c/imx219.c
34513 +++ b/drivers/media/i2c/imx219.c
34514 @@ -1035,29 +1035,47 @@ static int imx219_start_streaming(struct imx219 *imx219)
34515         const struct imx219_reg_list *reg_list;
34516         int ret;
34518 +       ret = pm_runtime_get_sync(&client->dev);
34519 +       if (ret < 0) {
34520 +               pm_runtime_put_noidle(&client->dev);
34521 +               return ret;
34522 +       }
34524         /* Apply default values of current mode */
34525         reg_list = &imx219->mode->reg_list;
34526         ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs);
34527         if (ret) {
34528                 dev_err(&client->dev, "%s failed to set mode\n", __func__);
34529 -               return ret;
34530 +               goto err_rpm_put;
34531         }
34533         ret = imx219_set_framefmt(imx219);
34534         if (ret) {
34535                 dev_err(&client->dev, "%s failed to set frame format: %d\n",
34536                         __func__, ret);
34537 -               return ret;
34538 +               goto err_rpm_put;
34539         }
34541         /* Apply customized values from user */
34542         ret =  __v4l2_ctrl_handler_setup(imx219->sd.ctrl_handler);
34543         if (ret)
34544 -               return ret;
34545 +               goto err_rpm_put;
34547         /* set stream on register */
34548 -       return imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
34549 -                               IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
34550 +       ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
34551 +                              IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
34552 +       if (ret)
34553 +               goto err_rpm_put;
34555 +       /* vflip and hflip cannot change during streaming */
34556 +       __v4l2_ctrl_grab(imx219->vflip, true);
34557 +       __v4l2_ctrl_grab(imx219->hflip, true);
34559 +       return 0;
34561 +err_rpm_put:
34562 +       pm_runtime_put(&client->dev);
34563 +       return ret;
34566  static void imx219_stop_streaming(struct imx219 *imx219)
34567 @@ -1070,12 +1088,16 @@ static void imx219_stop_streaming(struct imx219 *imx219)
34568                                IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
34569         if (ret)
34570                 dev_err(&client->dev, "%s failed to set stream\n", __func__);
34572 +       __v4l2_ctrl_grab(imx219->vflip, false);
34573 +       __v4l2_ctrl_grab(imx219->hflip, false);
34575 +       pm_runtime_put(&client->dev);
34578  static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
34580         struct imx219 *imx219 = to_imx219(sd);
34581 -       struct i2c_client *client = v4l2_get_subdevdata(sd);
34582         int ret = 0;
34584         mutex_lock(&imx219->mutex);
34585 @@ -1085,36 +1107,23 @@ static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
34586         }
34588         if (enable) {
34589 -               ret = pm_runtime_get_sync(&client->dev);
34590 -               if (ret < 0) {
34591 -                       pm_runtime_put_noidle(&client->dev);
34592 -                       goto err_unlock;
34593 -               }
34595                 /*
34596                  * Apply default & customized values
34597                  * and then start streaming.
34598                  */
34599                 ret = imx219_start_streaming(imx219);
34600                 if (ret)
34601 -                       goto err_rpm_put;
34602 +                       goto err_unlock;
34603         } else {
34604                 imx219_stop_streaming(imx219);
34605 -               pm_runtime_put(&client->dev);
34606         }
34608         imx219->streaming = enable;
34610 -       /* vflip and hflip cannot change during streaming */
34611 -       __v4l2_ctrl_grab(imx219->vflip, enable);
34612 -       __v4l2_ctrl_grab(imx219->hflip, enable);
34614         mutex_unlock(&imx219->mutex);
34616         return ret;
34618 -err_rpm_put:
34619 -       pm_runtime_put(&client->dev);
34620  err_unlock:
34621         mutex_unlock(&imx219->mutex);
34623 diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
34624 index 39530d43590e..a7caf2eb5771 100644
34625 --- a/drivers/media/i2c/msp3400-driver.c
34626 +++ b/drivers/media/i2c/msp3400-driver.c
34627 @@ -170,7 +170,7 @@ static int msp_read(struct i2c_client *client, int dev, int addr)
34628                         break;
34629                 dev_warn(&client->dev, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
34630                        dev, addr);
34631 -               schedule_timeout_interruptible(msecs_to_jiffies(10));
34632 +               schedule_msec_hrtimeout_interruptible((10));
34633         }
34634         if (err == 3) {
34635                 dev_warn(&client->dev, "resetting chip, sound will go off.\n");
34636 @@ -211,7 +211,7 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val)
34637                         break;
34638                 dev_warn(&client->dev, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
34639                        dev, addr);
34640 -               schedule_timeout_interruptible(msecs_to_jiffies(10));
34641 +               schedule_msec_hrtimeout_interruptible((10));
34642         }
34643         if (err == 3) {
34644                 dev_warn(&client->dev, "resetting chip, sound will go off.\n");
34645 diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
34646 index dcc21515e5a4..179d107f494c 100644
34647 --- a/drivers/media/i2c/rdacm21.c
34648 +++ b/drivers/media/i2c/rdacm21.c
34649 @@ -345,7 +345,7 @@ static int ov10640_initialize(struct rdacm21_device *dev)
34650         /* Read OV10640 ID to test communications. */
34651         ov490_write_reg(dev, OV490_SCCB_SLAVE0_DIR, OV490_SCCB_SLAVE_READ);
34652         ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_HIGH, OV10640_CHIP_ID >> 8);
34653 -       ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_LOW, (u8)OV10640_CHIP_ID);
34654 +       ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_LOW, OV10640_CHIP_ID & 0xff);
34656         /* Trigger SCCB slave transaction and give it some time to complete. */
34657         ov490_write_reg(dev, OV490_HOST_CMD, OV490_HOST_CMD_TRIGGER);
34658 diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
34659 index 831b5b54fd78..1b309bb743c7 100644
34660 --- a/drivers/media/i2c/tc358743.c
34661 +++ b/drivers/media/i2c/tc358743.c
34662 @@ -2193,7 +2193,7 @@ static int tc358743_remove(struct i2c_client *client)
34663                 del_timer_sync(&state->timer);
34664                 flush_work(&state->work_i2c_poll);
34665         }
34666 -       cancel_delayed_work(&state->delayed_work_enable_hotplug);
34667 +       cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
34668         cec_unregister_adapter(state->cec_adap);
34669         v4l2_async_unregister_subdev(sd);
34670         v4l2_device_unregister_subdev(sd);
34671 diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
34672 index a09bf0a39d05..89bb7e6dc7a4 100644
34673 --- a/drivers/media/i2c/tda1997x.c
34674 +++ b/drivers/media/i2c/tda1997x.c
34675 @@ -2804,7 +2804,7 @@ static int tda1997x_remove(struct i2c_client *client)
34676         media_entity_cleanup(&sd->entity);
34677         v4l2_ctrl_handler_free(&state->hdl);
34678         regulator_bulk_disable(TDA1997X_NUM_SUPPLIES, state->supplies);
34679 -       cancel_delayed_work(&state->delayed_work_enable_hpd);
34680 +       cancel_delayed_work_sync(&state->delayed_work_enable_hpd);
34681         mutex_destroy(&state->page_lock);
34682         mutex_destroy(&state->lock);
34684 diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
34685 index cf7cfda94107..f63e17489547 100644
34686 --- a/drivers/media/pci/cx18/cx18-gpio.c
34687 +++ b/drivers/media/pci/cx18/cx18-gpio.c
34688 @@ -81,11 +81,11 @@ static void gpio_reset_seq(struct cx18 *cx, u32 active_lo, u32 active_hi,
34690         /* Assert */
34691         gpio_update(cx, mask, ~active_lo);
34692 -       schedule_timeout_uninterruptible(msecs_to_jiffies(assert_msecs));
34693 +       schedule_msec_hrtimeout_uninterruptible((assert_msecs));
34695         /* Deassert */
34696         gpio_update(cx, mask, ~active_hi);
34697 -       schedule_timeout_uninterruptible(msecs_to_jiffies(recovery_msecs));
34698 +       schedule_msec_hrtimeout_uninterruptible((recovery_msecs));
34701  /*
34702 diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
34703 index 22f55a7840a6..d0ca260ecf70 100644
34704 --- a/drivers/media/pci/cx23885/cx23885-core.c
34705 +++ b/drivers/media/pci/cx23885/cx23885-core.c
34706 @@ -2077,6 +2077,15 @@ static struct {
34707          * 0x1423 is the PCI ID for the IOMMU found on Kaveri
34708          */
34709         { PCI_VENDOR_ID_AMD, 0x1423 },
34710 +       /* 0x1481 is the PCI ID for the IOMMU found on Starship/Matisse
34711 +        */
34712 +       { PCI_VENDOR_ID_AMD, 0x1481 },
34713 +       /* 0x1419 is the PCI ID for the IOMMU found on 15h (Models 10h-1fh) family
34714 +        */
34715 +       { PCI_VENDOR_ID_AMD, 0x1419 },
34716 +       /* 0x5a23 is the PCI ID for the IOMMU found on RD890S/RD990
34717 +        */
34718 +       { PCI_VENDOR_ID_ATI, 0x5a23 },
34719  };
34721  static bool cx23885_does_need_dma_reset(void)
34722 diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
34723 index 6e8c0c230e11..fecef85bd62e 100644
34724 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
34725 +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
34726 @@ -302,7 +302,7 @@ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
34727         if (!q->sensor)
34728                 return -ENODEV;
34730 -       freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes);
34731 +       freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
34732         if (freq < 0) {
34733                 dev_err(dev, "error %lld, invalid link_freq\n", freq);
34734                 return freq;
34735 diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c
34736 index 856e7ab7f33e..766a26251337 100644
34737 --- a/drivers/media/pci/ivtv/ivtv-gpio.c
34738 +++ b/drivers/media/pci/ivtv/ivtv-gpio.c
34739 @@ -105,7 +105,7 @@ void ivtv_reset_ir_gpio(struct ivtv *itv)
34740         curout = (curout & ~0xF) | 1;
34741         write_reg(curout, IVTV_REG_GPIO_OUT);
34742         /* We could use something else for smaller time */
34743 -       schedule_timeout_interruptible(msecs_to_jiffies(1));
34744 +       schedule_msec_hrtimeout_interruptible((1));
34745         curout |= 2;
34746         write_reg(curout, IVTV_REG_GPIO_OUT);
34747         curdir &= ~0x80;
34748 @@ -125,11 +125,11 @@ int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value)
34749         curout = read_reg(IVTV_REG_GPIO_OUT);
34750         curout &= ~(1 << itv->card->xceive_pin);
34751         write_reg(curout, IVTV_REG_GPIO_OUT);
34752 -       schedule_timeout_interruptible(msecs_to_jiffies(1));
34753 +       schedule_msec_hrtimeout_interruptible((1));
34755         curout |= 1 << itv->card->xceive_pin;
34756         write_reg(curout, IVTV_REG_GPIO_OUT);
34757 -       schedule_timeout_interruptible(msecs_to_jiffies(1));
34758 +       schedule_msec_hrtimeout_interruptible((1));
34759         return 0;
34762 diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
34763 index 35dccb31174c..8181cd65e876 100644
34764 --- a/drivers/media/pci/ivtv/ivtv-ioctl.c
34765 +++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
34766 @@ -1139,7 +1139,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std)
34767                                 TASK_UNINTERRUPTIBLE);
34768                 if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
34769                         break;
34770 -               schedule_timeout(msecs_to_jiffies(25));
34771 +               schedule_msec_hrtimeout((25));
34772         }
34773         finish_wait(&itv->vsync_waitq, &wait);
34774         mutex_lock(&itv->serialize_lock);
34775 diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
34776 index f04ee84bab5f..c4469b4b8f99 100644
34777 --- a/drivers/media/pci/ivtv/ivtv-streams.c
34778 +++ b/drivers/media/pci/ivtv/ivtv-streams.c
34779 @@ -849,7 +849,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
34780                         while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
34781                                 time_before(jiffies,
34782                                             then + msecs_to_jiffies(2000))) {
34783 -                               schedule_timeout(msecs_to_jiffies(10));
34784 +                               schedule_msec_hrtimeout((10));
34785                         }
34787                         /* To convert jiffies to ms, we must multiply by 1000
34788 diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
34789 index 391572a6ec76..efb757d5168a 100644
34790 --- a/drivers/media/pci/saa7134/saa7134-core.c
34791 +++ b/drivers/media/pci/saa7134/saa7134-core.c
34792 @@ -243,7 +243,7 @@ int saa7134_pgtable_build(struct pci_dev *pci, struct saa7134_pgtable *pt,
34794         ptr = pt->cpu + startpage;
34795         for (i = 0; i < length; i++, list = sg_next(list)) {
34796 -               for (p = 0; p * 4096 < list->length; p++, ptr++)
34797 +               for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr++)
34798                         *ptr = cpu_to_le32(sg_dma_address(list) +
34799                                                 list->offset + p * 4096);
34800         }
34801 diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
34802 index 11e1eb6a6809..1d1d32e043f1 100644
34803 --- a/drivers/media/pci/saa7164/saa7164-encoder.c
34804 +++ b/drivers/media/pci/saa7164/saa7164-encoder.c
34805 @@ -1008,7 +1008,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
34806                 printk(KERN_ERR "%s() failed (errno = %d), NO PCI configuration\n",
34807                         __func__, result);
34808                 result = -ENOMEM;
34809 -               goto failed;
34810 +               goto fail_pci;
34811         }
34813         /* Establish encoder defaults here */
34814 @@ -1062,7 +1062,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
34815                           100000, ENCODER_DEF_BITRATE);
34816         if (hdl->error) {
34817                 result = hdl->error;
34818 -               goto failed;
34819 +               goto fail_hdl;
34820         }
34822         port->std = V4L2_STD_NTSC_M;
34823 @@ -1080,7 +1080,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
34824                 printk(KERN_INFO "%s: can't allocate mpeg device\n",
34825                         dev->name);
34826                 result = -ENOMEM;
34827 -               goto failed;
34828 +               goto fail_hdl;
34829         }
34831         port->v4l_device->ctrl_handler = hdl;
34832 @@ -1091,10 +1091,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
34833         if (result < 0) {
34834                 printk(KERN_INFO "%s: can't register mpeg device\n",
34835                         dev->name);
34836 -               /* TODO: We're going to leak here if we don't dealloc
34837 -                The buffers above. The unreg function can't deal wit it.
34838 -               */
34839 -               goto failed;
34840 +               goto fail_reg;
34841         }
34843         printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
34844 @@ -1116,9 +1113,14 @@ int saa7164_encoder_register(struct saa7164_port *port)
34846         saa7164_api_set_encoder(port);
34847         saa7164_api_get_encoder(port);
34848 +       return 0;
34850 -       result = 0;
34851 -failed:
34852 +fail_reg:
34853 +       video_device_release(port->v4l_device);
34854 +       port->v4l_device = NULL;
34855 +fail_hdl:
34856 +       v4l2_ctrl_handler_free(hdl);
34857 +fail_pci:
34858         return result;
34861 diff --git a/drivers/media/pci/sta2x11/Kconfig b/drivers/media/pci/sta2x11/Kconfig
34862 index 4dd98f94a91e..27bb78513631 100644
34863 --- a/drivers/media/pci/sta2x11/Kconfig
34864 +++ b/drivers/media/pci/sta2x11/Kconfig
34865 @@ -3,6 +3,7 @@ config STA2X11_VIP
34866         tristate "STA2X11 VIP Video For Linux"
34867         depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS && I2C
34868         depends on STA2X11 || COMPILE_TEST
34869 +       select GPIOLIB if MEDIA_SUBDRV_AUTOSELECT
34870         select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT
34871         select VIDEOBUF2_DMA_CONTIG
34872         select MEDIA_CONTROLLER
34873 diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
34874 index fd1831e97b22..1ddb5d6354cf 100644
34875 --- a/drivers/media/platform/Kconfig
34876 +++ b/drivers/media/platform/Kconfig
34877 @@ -244,6 +244,7 @@ config VIDEO_MEDIATEK_JPEG
34878         depends on MTK_IOMMU_V1 || MTK_IOMMU || COMPILE_TEST
34879         depends on VIDEO_DEV && VIDEO_V4L2
34880         depends on ARCH_MEDIATEK || COMPILE_TEST
34881 +       depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
34882         select VIDEOBUF2_DMA_CONTIG
34883         select V4L2_MEM2MEM_DEV
34884         help
34885 @@ -271,6 +272,7 @@ config VIDEO_MEDIATEK_MDP
34886         depends on MTK_IOMMU || COMPILE_TEST
34887         depends on VIDEO_DEV && VIDEO_V4L2
34888         depends on ARCH_MEDIATEK || COMPILE_TEST
34889 +       depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
34890         select VIDEOBUF2_DMA_CONTIG
34891         select V4L2_MEM2MEM_DEV
34892         select VIDEO_MEDIATEK_VPU
34893 @@ -291,6 +293,7 @@ config VIDEO_MEDIATEK_VCODEC
34894         # our dependencies, to avoid missing symbols during link.
34895         depends on VIDEO_MEDIATEK_VPU || !VIDEO_MEDIATEK_VPU
34896         depends on MTK_SCP || !MTK_SCP
34897 +       depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
34898         select VIDEOBUF2_DMA_CONTIG
34899         select V4L2_MEM2MEM_DEV
34900         select VIDEO_MEDIATEK_VCODEC_VPU if VIDEO_MEDIATEK_VPU
34901 diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
34902 index f2c4dadd6a0e..7bb6babdcade 100644
34903 --- a/drivers/media/platform/aspeed-video.c
34904 +++ b/drivers/media/platform/aspeed-video.c
34905 @@ -514,8 +514,8 @@ static void aspeed_video_off(struct aspeed_video *video)
34906         aspeed_video_write(video, VE_INTERRUPT_STATUS, 0xffffffff);
34908         /* Turn off the relevant clocks */
34909 -       clk_disable(video->vclk);
34910         clk_disable(video->eclk);
34911 +       clk_disable(video->vclk);
34913         clear_bit(VIDEO_CLOCKS_ON, &video->flags);
34915 @@ -526,8 +526,8 @@ static void aspeed_video_on(struct aspeed_video *video)
34916                 return;
34918         /* Turn on the relevant clocks */
34919 -       clk_enable(video->eclk);
34920         clk_enable(video->vclk);
34921 +       clk_enable(video->eclk);
34923         set_bit(VIDEO_CLOCKS_ON, &video->flags);
34925 @@ -1719,8 +1719,11 @@ static int aspeed_video_probe(struct platform_device *pdev)
34926                 return rc;
34928         rc = aspeed_video_setup_video(video);
34929 -       if (rc)
34930 +       if (rc) {
34931 +               clk_unprepare(video->vclk);
34932 +               clk_unprepare(video->eclk);
34933                 return rc;
34934 +       }
34936         return 0;
34938 diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
34939 index 995e95272e51..e600764dce96 100644
34940 --- a/drivers/media/platform/coda/coda-common.c
34941 +++ b/drivers/media/platform/coda/coda-common.c
34942 @@ -2062,7 +2062,9 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
34943         if (q_data_dst->fourcc == V4L2_PIX_FMT_JPEG)
34944                 ctx->params.gop_size = 1;
34945         ctx->gopcounter = ctx->params.gop_size - 1;
34946 -       v4l2_ctrl_s_ctrl(ctx->mb_err_cnt_ctrl, 0);
34947 +       /* Only decoders have this control */
34948 +       if (ctx->mb_err_cnt_ctrl)
34949 +               v4l2_ctrl_s_ctrl(ctx->mb_err_cnt_ctrl, 0);
34951         ret = ctx->ops->start_streaming(ctx);
34952         if (ctx->inst_type == CODA_INST_DECODER) {
34953 diff --git a/drivers/media/platform/meson/ge2d/ge2d.c b/drivers/media/platform/meson/ge2d/ge2d.c
34954 index 153612ca96fc..a1393fefa8ae 100644
34955 --- a/drivers/media/platform/meson/ge2d/ge2d.c
34956 +++ b/drivers/media/platform/meson/ge2d/ge2d.c
34957 @@ -757,7 +757,7 @@ static int ge2d_s_ctrl(struct v4l2_ctrl *ctrl)
34959                 if (ctrl->val == 90) {
34960                         ctx->hflip = 0;
34961 -                       ctx->vflip = 0;
34962 +                       ctx->vflip = 1;
34963                         ctx->xy_swap = 1;
34964                 } else if (ctrl->val == 180) {
34965                         ctx->hflip = 1;
34966 @@ -765,7 +765,7 @@ static int ge2d_s_ctrl(struct v4l2_ctrl *ctrl)
34967                         ctx->xy_swap = 0;
34968                 } else if (ctrl->val == 270) {
34969                         ctx->hflip = 1;
34970 -                       ctx->vflip = 1;
34971 +                       ctx->vflip = 0;
34972                         ctx->xy_swap = 1;
34973                 } else {
34974                         ctx->hflip = 0;
34975 diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
34976 index f9896c121fd8..ae374bb2a48f 100644
34977 --- a/drivers/media/platform/qcom/venus/core.c
34978 +++ b/drivers/media/platform/qcom/venus/core.c
34979 @@ -218,18 +218,17 @@ static int venus_probe(struct platform_device *pdev)
34980                 return -ENOMEM;
34982         core->dev = dev;
34983 -       platform_set_drvdata(pdev, core);
34985         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
34986         core->base = devm_ioremap_resource(dev, r);
34987         if (IS_ERR(core->base))
34988                 return PTR_ERR(core->base);
34990 -       core->video_path = of_icc_get(dev, "video-mem");
34991 +       core->video_path = devm_of_icc_get(dev, "video-mem");
34992         if (IS_ERR(core->video_path))
34993                 return PTR_ERR(core->video_path);
34995 -       core->cpucfg_path = of_icc_get(dev, "cpu-cfg");
34996 +       core->cpucfg_path = devm_of_icc_get(dev, "cpu-cfg");
34997         if (IS_ERR(core->cpucfg_path))
34998                 return PTR_ERR(core->cpucfg_path);
35000 @@ -248,7 +247,7 @@ static int venus_probe(struct platform_device *pdev)
35001                 return -ENODEV;
35003         if (core->pm_ops->core_get) {
35004 -               ret = core->pm_ops->core_get(dev);
35005 +               ret = core->pm_ops->core_get(core);
35006                 if (ret)
35007                         return ret;
35008         }
35009 @@ -273,6 +272,12 @@ static int venus_probe(struct platform_device *pdev)
35010         if (ret)
35011                 goto err_core_put;
35013 +       ret = v4l2_device_register(dev, &core->v4l2_dev);
35014 +       if (ret)
35015 +               goto err_core_deinit;
35017 +       platform_set_drvdata(pdev, core);
35019         pm_runtime_enable(dev);
35021         ret = pm_runtime_get_sync(dev);
35022 @@ -307,10 +312,6 @@ static int venus_probe(struct platform_device *pdev)
35023         if (ret)
35024                 goto err_venus_shutdown;
35026 -       ret = v4l2_device_register(dev, &core->v4l2_dev);
35027 -       if (ret)
35028 -               goto err_core_deinit;
35030         ret = pm_runtime_put_sync(dev);
35031         if (ret) {
35032                 pm_runtime_get_noresume(dev);
35033 @@ -323,8 +324,6 @@ static int venus_probe(struct platform_device *pdev)
35035  err_dev_unregister:
35036         v4l2_device_unregister(&core->v4l2_dev);
35037 -err_core_deinit:
35038 -       hfi_core_deinit(core, false);
35039  err_venus_shutdown:
35040         venus_shutdown(core);
35041  err_runtime_disable:
35042 @@ -332,9 +331,11 @@ static int venus_probe(struct platform_device *pdev)
35043         pm_runtime_set_suspended(dev);
35044         pm_runtime_disable(dev);
35045         hfi_destroy(core);
35046 +err_core_deinit:
35047 +       hfi_core_deinit(core, false);
35048  err_core_put:
35049         if (core->pm_ops->core_put)
35050 -               core->pm_ops->core_put(dev);
35051 +               core->pm_ops->core_put(core);
35052         return ret;
35055 @@ -360,14 +361,14 @@ static int venus_remove(struct platform_device *pdev)
35056         pm_runtime_disable(dev);
35058         if (pm_ops->core_put)
35059 -               pm_ops->core_put(dev);
35060 +               pm_ops->core_put(core);
35062 -       hfi_destroy(core);
35063 +       v4l2_device_unregister(&core->v4l2_dev);
35065 -       icc_put(core->video_path);
35066 -       icc_put(core->cpucfg_path);
35067 +       hfi_destroy(core);
35069         v4l2_device_unregister(&core->v4l2_dev);
35071         mutex_destroy(&core->pm_lock);
35072         mutex_destroy(&core->lock);
35073         venus_dbgfs_deinit(core);
35074 @@ -396,7 +397,7 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev)
35075                 return ret;
35077         if (pm_ops->core_power) {
35078 -               ret = pm_ops->core_power(dev, POWER_OFF);
35079 +               ret = pm_ops->core_power(core, POWER_OFF);
35080                 if (ret)
35081                         return ret;
35082         }
35083 @@ -414,7 +415,7 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev)
35084  err_video_path:
35085         icc_set_bw(core->cpucfg_path, kbps_to_icc(1000), 0);
35086  err_cpucfg_path:
35087 -       pm_ops->core_power(dev, POWER_ON);
35088 +       pm_ops->core_power(core, POWER_ON);
35090         return ret;
35092 @@ -434,7 +435,7 @@ static __maybe_unused int venus_runtime_resume(struct device *dev)
35093                 return ret;
35095         if (pm_ops->core_power) {
35096 -               ret = pm_ops->core_power(dev, POWER_ON);
35097 +               ret = pm_ops->core_power(core, POWER_ON);
35098                 if (ret)
35099                         return ret;
35100         }
35101 diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
35102 index 4f7565834469..558510a8dfc8 100644
35103 --- a/drivers/media/platform/qcom/venus/hfi_cmds.c
35104 +++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
35105 @@ -1039,6 +1039,18 @@ static int pkt_session_set_property_1x(struct hfi_session_set_property_pkt *pkt,
35106                 pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hierp);
35107                 break;
35108         }
35109 +       case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO: {
35110 +               struct hfi_uncompressed_plane_actual_info *in = pdata;
35111 +               struct hfi_uncompressed_plane_actual_info *info = prop_data;
35113 +               info->buffer_type = in->buffer_type;
35114 +               info->num_planes = in->num_planes;
35115 +               info->plane_format[0] = in->plane_format[0];
35116 +               if (in->num_planes > 1)
35117 +                       info->plane_format[1] = in->plane_format[1];
35118 +               pkt->shdr.hdr.size += sizeof(u32) + sizeof(*info);
35119 +               break;
35120 +       }
35122         /* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
35123         case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
35124 @@ -1205,18 +1217,6 @@ pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt,
35125                 pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cu);
35126                 break;
35127         }
35128 -       case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO: {
35129 -               struct hfi_uncompressed_plane_actual_info *in = pdata;
35130 -               struct hfi_uncompressed_plane_actual_info *info = prop_data;
35132 -               info->buffer_type = in->buffer_type;
35133 -               info->num_planes = in->num_planes;
35134 -               info->plane_format[0] = in->plane_format[0];
35135 -               if (in->num_planes > 1)
35136 -                       info->plane_format[1] = in->plane_format[1];
35137 -               pkt->shdr.hdr.size += sizeof(u32) + sizeof(*info);
35138 -               break;
35139 -       }
35140         case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE:
35141         case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
35142         case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE:
35143 diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
35144 index 7263c0c32695..5b8389b98299 100644
35145 --- a/drivers/media/platform/qcom/venus/hfi_parser.c
35146 +++ b/drivers/media/platform/qcom/venus/hfi_parser.c
35147 @@ -235,13 +235,13 @@ static int hfi_platform_parser(struct venus_core *core, struct venus_inst *inst)
35148         u32 enc_codecs, dec_codecs, count = 0;
35149         unsigned int entries;
35151 -       if (inst)
35152 -               return 0;
35154         plat = hfi_platform_get(core->res->hfi_version);
35155         if (!plat)
35156                 return -EINVAL;
35158 +       if (inst)
35159 +               return 0;
35161         if (plat->codecs)
35162                 plat->codecs(&enc_codecs, &dec_codecs, &count);
35164 @@ -277,8 +277,10 @@ u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
35166         parser_init(inst, &codecs, &domain);
35168 -       core->codecs_count = 0;
35169 -       memset(core->caps, 0, sizeof(core->caps));
35170 +       if (core->res->hfi_version > HFI_VERSION_1XX) {
35171 +               core->codecs_count = 0;
35172 +               memset(core->caps, 0, sizeof(core->caps));
35173 +       }
35175         while (words_count) {
35176                 data = word + 1;
35177 diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
35178 index 43c4e3d9e281..95b4d40ff6a5 100644
35179 --- a/drivers/media/platform/qcom/venus/pm_helpers.c
35180 +++ b/drivers/media/platform/qcom/venus/pm_helpers.c
35181 @@ -277,16 +277,28 @@ static int load_scale_v1(struct venus_inst *inst)
35182         return 0;
35185 -static int core_get_v1(struct device *dev)
35186 +static int core_get_v1(struct venus_core *core)
35188 -       struct venus_core *core = dev_get_drvdata(dev);
35189 +       int ret;
35191 +       ret = core_clks_get(core);
35192 +       if (ret)
35193 +               return ret;
35195 -       return core_clks_get(core);
35196 +       core->opp_table = dev_pm_opp_set_clkname(core->dev, "core");
35197 +       if (IS_ERR(core->opp_table))
35198 +               return PTR_ERR(core->opp_table);
35200 +       return 0;
35203 -static int core_power_v1(struct device *dev, int on)
35204 +static void core_put_v1(struct venus_core *core)
35206 +       dev_pm_opp_put_clkname(core->opp_table);
35209 +static int core_power_v1(struct venus_core *core, int on)
35211 -       struct venus_core *core = dev_get_drvdata(dev);
35212         int ret = 0;
35214         if (on == POWER_ON)
35215 @@ -299,6 +311,7 @@ static int core_power_v1(struct device *dev, int on)
35217  static const struct venus_pm_ops pm_ops_v1 = {
35218         .core_get = core_get_v1,
35219 +       .core_put = core_put_v1,
35220         .core_power = core_power_v1,
35221         .load_scale = load_scale_v1,
35222  };
35223 @@ -371,6 +384,7 @@ static int venc_power_v3(struct device *dev, int on)
35225  static const struct venus_pm_ops pm_ops_v3 = {
35226         .core_get = core_get_v1,
35227 +       .core_put = core_put_v1,
35228         .core_power = core_power_v1,
35229         .vdec_get = vdec_get_v3,
35230         .vdec_power = vdec_power_v3,
35231 @@ -753,12 +767,12 @@ static int venc_power_v4(struct device *dev, int on)
35232         return ret;
35235 -static int vcodec_domains_get(struct device *dev)
35236 +static int vcodec_domains_get(struct venus_core *core)
35238         int ret;
35239         struct opp_table *opp_table;
35240         struct device **opp_virt_dev;
35241 -       struct venus_core *core = dev_get_drvdata(dev);
35242 +       struct device *dev = core->dev;
35243         const struct venus_resources *res = core->res;
35244         struct device *pd;
35245         unsigned int i;
35246 @@ -809,9 +823,8 @@ static int vcodec_domains_get(struct device *dev)
35247         return ret;
35250 -static void vcodec_domains_put(struct device *dev)
35251 +static void vcodec_domains_put(struct venus_core *core)
35253 -       struct venus_core *core = dev_get_drvdata(dev);
35254         const struct venus_resources *res = core->res;
35255         unsigned int i;
35257 @@ -834,9 +847,9 @@ static void vcodec_domains_put(struct device *dev)
35258         dev_pm_opp_detach_genpd(core->opp_table);
35261 -static int core_get_v4(struct device *dev)
35262 +static int core_get_v4(struct venus_core *core)
35264 -       struct venus_core *core = dev_get_drvdata(dev);
35265 +       struct device *dev = core->dev;
35266         const struct venus_resources *res = core->res;
35267         int ret;
35269 @@ -875,7 +888,7 @@ static int core_get_v4(struct device *dev)
35270                 }
35271         }
35273 -       ret = vcodec_domains_get(dev);
35274 +       ret = vcodec_domains_get(core);
35275         if (ret) {
35276                 if (core->has_opp_table)
35277                         dev_pm_opp_of_remove_table(dev);
35278 @@ -886,14 +899,14 @@ static int core_get_v4(struct device *dev)
35279         return 0;
35282 -static void core_put_v4(struct device *dev)
35283 +static void core_put_v4(struct venus_core *core)
35285 -       struct venus_core *core = dev_get_drvdata(dev);
35286 +       struct device *dev = core->dev;
35288         if (legacy_binding)
35289                 return;
35291 -       vcodec_domains_put(dev);
35292 +       vcodec_domains_put(core);
35294         if (core->has_opp_table)
35295                 dev_pm_opp_of_remove_table(dev);
35296 @@ -901,9 +914,9 @@ static void core_put_v4(struct device *dev)
35300 -static int core_power_v4(struct device *dev, int on)
35301 +static int core_power_v4(struct venus_core *core, int on)
35303 -       struct venus_core *core = dev_get_drvdata(dev);
35304 +       struct device *dev = core->dev;
35305         struct device *pmctrl = core->pmdomains[0];
35306         int ret = 0;
35308 diff --git a/drivers/media/platform/qcom/venus/pm_helpers.h b/drivers/media/platform/qcom/venus/pm_helpers.h
35309 index aa2f6afa2354..a492c50c5543 100644
35310 --- a/drivers/media/platform/qcom/venus/pm_helpers.h
35311 +++ b/drivers/media/platform/qcom/venus/pm_helpers.h
35312 @@ -4,14 +4,15 @@
35313  #define __VENUS_PM_HELPERS_H__
35315  struct device;
35316 +struct venus_core;
35318  #define POWER_ON       1
35319  #define POWER_OFF      0
35321  struct venus_pm_ops {
35322 -       int (*core_get)(struct device *dev);
35323 -       void (*core_put)(struct device *dev);
35324 -       int (*core_power)(struct device *dev, int on);
35325 +       int (*core_get)(struct venus_core *core);
35326 +       void (*core_put)(struct venus_core *core);
35327 +       int (*core_power)(struct venus_core *core, int on);
35329         int (*vdec_get)(struct device *dev);
35330         void (*vdec_put)(struct device *dev);
35331 diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
35332 index a52b80055173..abef0037bf55 100644
35333 --- a/drivers/media/platform/qcom/venus/venc_ctrls.c
35334 +++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
35335 @@ -359,7 +359,7 @@ int venc_ctrl_init(struct venus_inst *inst)
35336                 V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
35337                 ~((1 << V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) |
35338                 (1 << V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME)),
35339 -               V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE);
35340 +               V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
35342         v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
35343                 V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
35344 diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
35345 index 813670ed9577..79deed8adcea 100644
35346 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
35347 +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
35348 @@ -520,14 +520,15 @@ static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
35349                                    struct v4l2_mbus_framefmt *format,
35350                                    unsigned int which)
35352 -       const struct rkisp1_isp_mbus_info *mbus_info;
35353 -       struct v4l2_mbus_framefmt *src_fmt;
35354 +       const struct rkisp1_isp_mbus_info *sink_mbus_info;
35355 +       struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
35357 +       sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK, which);
35358         src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SRC, which);
35359 -       mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code);
35360 +       sink_mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
35362         /* for YUV formats, userspace can change the mbus code on the src pad if it is supported */
35363 -       if (mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
35364 +       if (sink_mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
35365             rkisp1_rsz_get_yuv_mbus_info(format->code))
35366                 src_fmt->code = format->code;
35368 diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c
35369 index 2b270093009c..a27f638df11c 100644
35370 --- a/drivers/media/platform/sti/bdisp/bdisp-debug.c
35371 +++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c
35372 @@ -480,7 +480,7 @@ static int regs_show(struct seq_file *s, void *data)
35373         int ret;
35374         unsigned int i;
35376 -       ret = pm_runtime_get_sync(bdisp->dev);
35377 +       ret = pm_runtime_resume_and_get(bdisp->dev);
35378         if (ret < 0) {
35379                 seq_puts(s, "Cannot wake up IP\n");
35380                 return 0;
35381 diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
35382 index b55de9ab64d8..3181d0781b61 100644
35383 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
35384 +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
35385 @@ -151,8 +151,10 @@ static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count)
35386         }
35388         subdev = sun6i_video_remote_subdev(video, NULL);
35389 -       if (!subdev)
35390 +       if (!subdev) {
35391 +               ret = -EINVAL;
35392                 goto stop_media_pipeline;
35393 +       }
35395         config.pixelformat = video->fmt.fmt.pix.pixelformat;
35396         config.code = video->mbus_code;
35397 diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
35398 index ed863bf5ea80..671e4a928993 100644
35399 --- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
35400 +++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
35401 @@ -589,7 +589,7 @@ static int deinterlace_start_streaming(struct vb2_queue *vq, unsigned int count)
35402         int ret;
35404         if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
35405 -               ret = pm_runtime_get_sync(dev);
35406 +               ret = pm_runtime_resume_and_get(dev);
35407                 if (ret < 0) {
35408                         dev_err(dev, "Failed to enable module\n");
35410 diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
35411 index cb0437b4c331..163fffc0e1d4 100644
35412 --- a/drivers/media/radio/radio-mr800.c
35413 +++ b/drivers/media/radio/radio-mr800.c
35414 @@ -366,7 +366,7 @@ static int vidioc_s_hw_freq_seek(struct file *file, void *priv,
35415                         retval = -ENODATA;
35416                         break;
35417                 }
35418 -               if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
35419 +               if (schedule_msec_hrtimeout_interruptible((10))) {
35420                         retval = -ERESTARTSYS;
35421                         break;
35422                 }
35423 diff --git a/drivers/media/radio/radio-tea5777.c b/drivers/media/radio/radio-tea5777.c
35424 index fb9de7bbcd19..e53cf45e7f3f 100644
35425 --- a/drivers/media/radio/radio-tea5777.c
35426 +++ b/drivers/media/radio/radio-tea5777.c
35427 @@ -235,7 +235,7 @@ static int radio_tea5777_update_read_reg(struct radio_tea5777 *tea, int wait)
35428         }
35430         if (wait) {
35431 -               if (schedule_timeout_interruptible(msecs_to_jiffies(wait)))
35432 +               if (schedule_msec_hrtimeout_interruptible((wait)))
35433                         return -ERESTARTSYS;
35434         }
35436 diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
35437 index c37315226c42..e73e6393403c 100644
35438 --- a/drivers/media/radio/tea575x.c
35439 +++ b/drivers/media/radio/tea575x.c
35440 @@ -401,7 +401,7 @@ int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea,
35441         for (;;) {
35442                 if (time_after(jiffies, timeout))
35443                         break;
35444 -               if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
35445 +               if (schedule_msec_hrtimeout_interruptible((10))) {
35446                         /* some signal arrived, stop search */
35447                         tea->val &= ~TEA575X_BIT_SEARCH;
35448                         snd_tea575x_set_freq(tea);
35449 diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
35450 index 0c6229592e13..e5c4a6941d26 100644
35451 --- a/drivers/media/rc/ite-cir.c
35452 +++ b/drivers/media/rc/ite-cir.c
35453 @@ -276,8 +276,14 @@ static irqreturn_t ite_cir_isr(int irq, void *data)
35454         /* read the interrupt flags */
35455         iflags = dev->params.get_irq_causes(dev);
35457 +       /* Check for RX overflow */
35458 +       if (iflags & ITE_IRQ_RX_FIFO_OVERRUN) {
35459 +               dev_warn(&dev->rdev->dev, "receive overflow\n");
35460 +               ir_raw_event_reset(dev->rdev);
35461 +       }
35463         /* check for the receive interrupt */
35464 -       if (iflags & (ITE_IRQ_RX_FIFO | ITE_IRQ_RX_FIFO_OVERRUN)) {
35465 +       if (iflags & ITE_IRQ_RX_FIFO) {
35466                 /* read the FIFO bytes */
35467                 rx_bytes =
35468                         dev->params.get_rx_bytes(dev, rx_buf,
35469 diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
35470 index 0dc65ef3aa14..ca0ebf6ad9cc 100644
35471 --- a/drivers/media/test-drivers/vivid/vivid-core.c
35472 +++ b/drivers/media/test-drivers/vivid/vivid-core.c
35473 @@ -205,13 +205,13 @@ static const u8 vivid_hdmi_edid[256] = {
35474         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
35475         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7b,
35477 -       0x02, 0x03, 0x3f, 0xf0, 0x51, 0x61, 0x60, 0x5f,
35478 +       0x02, 0x03, 0x3f, 0xf1, 0x51, 0x61, 0x60, 0x5f,
35479         0x5e, 0x5d, 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21,
35480         0x20, 0x05, 0x14, 0x02, 0x11, 0x01, 0x23, 0x09,
35481         0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03,
35482         0x0c, 0x00, 0x10, 0x00, 0x00, 0x3c, 0x21, 0x00,
35483         0x60, 0x01, 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4,
35484 -       0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xea, 0xe3,
35485 +       0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xca, 0xe3,
35486         0x05, 0x00, 0x00, 0xe3, 0x06, 0x01, 0x00, 0x4d,
35487         0xd0, 0x00, 0xa0, 0xf0, 0x70, 0x3e, 0x80, 0x30,
35488         0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00,
35489 @@ -220,7 +220,7 @@ static const u8 vivid_hdmi_edid[256] = {
35490         0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80, 0x51,
35491         0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0xc0,
35492         0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00,
35493 -       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63,
35494 +       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82,
35495  };
35497  static int vidioc_querycap(struct file *file, void  *priv,
35498 diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c
35499 index ac1e981e8342..9f731f085179 100644
35500 --- a/drivers/media/test-drivers/vivid/vivid-vid-out.c
35501 +++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c
35502 @@ -1021,7 +1021,7 @@ int vivid_vid_out_s_fbuf(struct file *file, void *fh,
35503                 return -EINVAL;
35504         }
35505         dev->fbuf_out_flags &= ~(chroma_flags | alpha_flags);
35506 -       dev->fbuf_out_flags = a->flags & (chroma_flags | alpha_flags);
35507 +       dev->fbuf_out_flags |= a->flags & (chroma_flags | alpha_flags);
35508         return 0;
35511 diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
35512 index b3505f402476..8647c50b66e5 100644
35513 --- a/drivers/media/tuners/m88rs6000t.c
35514 +++ b/drivers/media/tuners/m88rs6000t.c
35515 @@ -525,7 +525,7 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
35516         PGA2_cri = PGA2_GC >> 2;
35517         PGA2_crf = PGA2_GC & 0x03;
35519 -       for (i = 0; i <= RF_GC; i++)
35520 +       for (i = 0; i <= RF_GC && i < ARRAY_SIZE(RFGS); i++)
35521                 RFG += RFGS[i];
35523         if (RF_GC == 0)
35524 @@ -537,12 +537,12 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
35525         if (RF_GC == 3)
35526                 RFG += 100;
35528 -       for (i = 0; i <= IF_GC; i++)
35529 +       for (i = 0; i <= IF_GC && i < ARRAY_SIZE(IFGS); i++)
35530                 IFG += IFGS[i];
35532         TIAG = TIA_GC * TIA_GS;
35534 -       for (i = 0; i <= BB_GC; i++)
35535 +       for (i = 0; i <= BB_GC && i < ARRAY_SIZE(BBGS); i++)
35536                 BBG += BBGS[i];
35538         PGA2G = PGA2_cri * PGA2_cri_GS + PGA2_crf * PGA2_crf_GS;
35539 diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
35540 index c1a7634e27b4..28e1fd64dd3c 100644
35541 --- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
35542 +++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
35543 @@ -79,11 +79,17 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
35544                         }
35545                 }
35547 -               if ((ret = dvb_usb_adapter_stream_init(adap)) ||
35548 -                       (ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs)) ||
35549 -                       (ret = dvb_usb_adapter_frontend_init(adap))) {
35550 +               ret = dvb_usb_adapter_stream_init(adap);
35551 +               if (ret)
35552                         return ret;
35553 -               }
35555 +               ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs);
35556 +               if (ret)
35557 +                       goto dvb_init_err;
35559 +               ret = dvb_usb_adapter_frontend_init(adap);
35560 +               if (ret)
35561 +                       goto frontend_init_err;
35563                 /* use exclusive FE lock if there is multiple shared FEs */
35564                 if (adap->fe_adap[1].fe)
35565 @@ -103,6 +109,12 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
35566         }
35568         return 0;
35570 +frontend_init_err:
35571 +       dvb_usb_adapter_dvb_exit(adap);
35572 +dvb_init_err:
35573 +       dvb_usb_adapter_stream_exit(adap);
35574 +       return ret;
35577  static int dvb_usb_adapter_exit(struct dvb_usb_device *d)
35578 @@ -158,22 +170,20 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
35580                 if (d->props.priv_init != NULL) {
35581                         ret = d->props.priv_init(d);
35582 -                       if (ret != 0) {
35583 -                               kfree(d->priv);
35584 -                               d->priv = NULL;
35585 -                               return ret;
35586 -                       }
35587 +                       if (ret != 0)
35588 +                               goto err_priv_init;
35589                 }
35590         }
35592         /* check the capabilities and set appropriate variables */
35593         dvb_usb_device_power_ctrl(d, 1);
35595 -       if ((ret = dvb_usb_i2c_init(d)) ||
35596 -               (ret = dvb_usb_adapter_init(d, adapter_nums))) {
35597 -               dvb_usb_exit(d);
35598 -               return ret;
35599 -       }
35600 +       ret = dvb_usb_i2c_init(d);
35601 +       if (ret)
35602 +               goto err_i2c_init;
35603 +       ret = dvb_usb_adapter_init(d, adapter_nums);
35604 +       if (ret)
35605 +               goto err_adapter_init;
35607         if ((ret = dvb_usb_remote_init(d)))
35608                 err("could not initialize remote control.");
35609 @@ -181,6 +191,17 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
35610         dvb_usb_device_power_ctrl(d, 0);
35612         return 0;
35614 +err_adapter_init:
35615 +       dvb_usb_adapter_exit(d);
35616 +err_i2c_init:
35617 +       dvb_usb_i2c_exit(d);
35618 +       if (d->priv && d->props.priv_destroy)
35619 +               d->props.priv_destroy(d);
35620 +err_priv_init:
35621 +       kfree(d->priv);
35622 +       d->priv = NULL;
35623 +       return ret;
35626  /* determine the name and the state of the just found USB device */
35627 @@ -255,41 +276,50 @@ int dvb_usb_device_init(struct usb_interface *intf,
35628         if (du != NULL)
35629                 *du = NULL;
35631 -       if ((desc = dvb_usb_find_device(udev, props, &cold)) == NULL) {
35632 +       d = kzalloc(sizeof(*d), GFP_KERNEL);
35633 +       if (!d) {
35634 +               err("no memory for 'struct dvb_usb_device'");
35635 +               return -ENOMEM;
35636 +       }
35638 +       memcpy(&d->props, props, sizeof(struct dvb_usb_device_properties));
35640 +       desc = dvb_usb_find_device(udev, &d->props, &cold);
35641 +       if (!desc) {
35642                 deb_err("something went very wrong, device was not found in current device list - let's see what comes next.\n");
35643 -               return -ENODEV;
35644 +               ret = -ENODEV;
35645 +               goto error;
35646         }
35648         if (cold) {
35649                 info("found a '%s' in cold state, will try to load a firmware", desc->name);
35650                 ret = dvb_usb_download_firmware(udev, props);
35651                 if (!props->no_reconnect || ret != 0)
35652 -                       return ret;
35653 +                       goto error;
35654         }
35656         info("found a '%s' in warm state.", desc->name);
35657 -       d = kzalloc(sizeof(struct dvb_usb_device), GFP_KERNEL);
35658 -       if (d == NULL) {
35659 -               err("no memory for 'struct dvb_usb_device'");
35660 -               return -ENOMEM;
35661 -       }
35663         d->udev = udev;
35664 -       memcpy(&d->props, props, sizeof(struct dvb_usb_device_properties));
35665         d->desc = desc;
35666         d->owner = owner;
35668         usb_set_intfdata(intf, d);
35670 -       if (du != NULL)
35671 +       ret = dvb_usb_init(d, adapter_nums);
35672 +       if (ret) {
35673 +               info("%s error while loading driver (%d)", desc->name, ret);
35674 +               goto error;
35675 +       }
35677 +       if (du)
35678                 *du = d;
35680 -       ret = dvb_usb_init(d, adapter_nums);
35681 +       info("%s successfully initialized and connected.", desc->name);
35682 +       return 0;
35684 -       if (ret == 0)
35685 -               info("%s successfully initialized and connected.", desc->name);
35686 -       else
35687 -               info("%s error while loading driver (%d)", desc->name, ret);
35688 + error:
35689 +       usb_set_intfdata(intf, NULL);
35690 +       kfree(d);
35691         return ret;
35693  EXPORT_SYMBOL(dvb_usb_device_init);
35694 diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
35695 index 741be0e69447..2b8ad2bde8a4 100644
35696 --- a/drivers/media/usb/dvb-usb/dvb-usb.h
35697 +++ b/drivers/media/usb/dvb-usb/dvb-usb.h
35698 @@ -487,7 +487,7 @@ extern int __must_check
35699  dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16);
35701  /* commonly used remote control parsing */
35702 -extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[], u32 *, int *);
35703 +extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[5], u32 *, int *);
35705  /* commonly used firmware download types and function */
35706  struct hexline {
35707 diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
35708 index 526424279637..471bd74667e3 100644
35709 --- a/drivers/media/usb/em28xx/em28xx-dvb.c
35710 +++ b/drivers/media/usb/em28xx/em28xx-dvb.c
35711 @@ -2010,6 +2010,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
35712         return result;
35714  out_free:
35715 +       em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
35716         kfree(dvb);
35717         dev->dvb = NULL;
35718         goto ret;
35719 diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
35720 index 158c8e28ed2c..47d8f28bfdfc 100644
35721 --- a/drivers/media/usb/gspca/gspca.c
35722 +++ b/drivers/media/usb/gspca/gspca.c
35723 @@ -1576,6 +1576,8 @@ int gspca_dev_probe2(struct usb_interface *intf,
35724  #endif
35725         v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler);
35726         v4l2_device_unregister(&gspca_dev->v4l2_dev);
35727 +       if (sd_desc->probe_error)
35728 +               sd_desc->probe_error(gspca_dev);
35729         kfree(gspca_dev->usb_buf);
35730         kfree(gspca_dev);
35731         return ret;
35732 diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h
35733 index b0ced2e14006..a6554d5e9e1a 100644
35734 --- a/drivers/media/usb/gspca/gspca.h
35735 +++ b/drivers/media/usb/gspca/gspca.h
35736 @@ -105,6 +105,7 @@ struct sd_desc {
35737         cam_cf_op config;       /* called on probe */
35738         cam_op init;            /* called on probe and resume */
35739         cam_op init_controls;   /* called on probe */
35740 +       cam_v_op probe_error;   /* called if probe failed, do cleanup here */
35741         cam_op start;           /* called on stream on after URBs creation */
35742         cam_pkt_op pkt_scan;
35743  /* optional operations */
35744 diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
35745 index 97799cfb832e..949111070971 100644
35746 --- a/drivers/media/usb/gspca/sq905.c
35747 +++ b/drivers/media/usb/gspca/sq905.c
35748 @@ -158,7 +158,7 @@ static int
35749  sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock)
35751         int ret;
35752 -       int act_len;
35753 +       int act_len = 0;
35755         gspca_dev->usb_buf[0] = '\0';
35756         if (need_lock)
35757 diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
35758 index 95673fc0a99c..d9bc2aacc885 100644
35759 --- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
35760 +++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
35761 @@ -529,12 +529,21 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
35762  static int stv06xx_config(struct gspca_dev *gspca_dev,
35763                           const struct usb_device_id *id);
35765 +static void stv06xx_probe_error(struct gspca_dev *gspca_dev)
35767 +       struct sd *sd = (struct sd *)gspca_dev;
35769 +       kfree(sd->sensor_priv);
35770 +       sd->sensor_priv = NULL;
35773  /* sub-driver description */
35774  static const struct sd_desc sd_desc = {
35775         .name = MODULE_NAME,
35776         .config = stv06xx_config,
35777         .init = stv06xx_init,
35778         .init_controls = stv06xx_init_controls,
35779 +       .probe_error = stv06xx_probe_error,
35780         .start = stv06xx_start,
35781         .stopN = stv06xx_stopN,
35782         .pkt_scan = stv06xx_pkt_scan,
35783 diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
35784 index 30ef2a3110f7..9a791d8ef200 100644
35785 --- a/drivers/media/usb/uvc/uvc_driver.c
35786 +++ b/drivers/media/usb/uvc/uvc_driver.c
35787 @@ -1712,10 +1712,35 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
35788                         if (forward->bNrInPins != 1) {
35789                                 uvc_dbg(chain->dev, DESCR,
35790                                         "Extension unit %d has more than 1 input pin\n",
35791 -                                       entity->id);
35792 +                                       forward->id);
35793                                 return -EINVAL;
35794                         }
35796 +                       /*
35797 +                        * Some devices reference an output terminal as the
35798 +                        * source of extension units. This is incorrect, as
35799 +                        * output terminals only have an input pin, and thus
35800 +                        * can't be connected to any entity in the forward
35801 +                        * direction. The resulting topology would cause issues
35802 +                        * when registering the media controller graph. To
35803 +                        * avoid this problem, connect the extension unit to
35804 +                        * the source of the output terminal instead.
35805 +                        */
35806 +                       if (UVC_ENTITY_IS_OTERM(entity)) {
35807 +                               struct uvc_entity *source;
35809 +                               source = uvc_entity_by_id(chain->dev,
35810 +                                                         entity->baSourceID[0]);
35811 +                               if (!source) {
35812 +                                       uvc_dbg(chain->dev, DESCR,
35813 +                                               "Can't connect extension unit %u in chain\n",
35814 +                                               forward->id);
35815 +                                       break;
35816 +                               }
35818 +                               forward->baSourceID[0] = source->id;
35819 +                       }
35821                         list_add_tail(&forward->chain, &chain->entities);
35822                         if (!found)
35823                                 uvc_dbg_cont(PROBE, " (->");
35824 @@ -1735,6 +1760,13 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
35825                                 return -EINVAL;
35826                         }
35828 +                       if (UVC_ENTITY_IS_OTERM(entity)) {
35829 +                               uvc_dbg(chain->dev, DESCR,
35830 +                                       "Unsupported connection between output terminals %u and %u\n",
35831 +                                       entity->id, forward->id);
35832 +                               break;
35833 +                       }
35835                         list_add_tail(&forward->chain, &chain->entities);
35836                         if (!found)
35837                                 uvc_dbg_cont(PROBE, " (->");
35838 diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
35839 index d29b861367ea..1ef611e08323 100644
35840 --- a/drivers/media/usb/zr364xx/zr364xx.c
35841 +++ b/drivers/media/usb/zr364xx/zr364xx.c
35842 @@ -1430,7 +1430,7 @@ static int zr364xx_probe(struct usb_interface *intf,
35843         if (hdl->error) {
35844                 err = hdl->error;
35845                 dev_err(&udev->dev, "couldn't register control\n");
35846 -               goto unregister;
35847 +               goto free_hdlr_and_unreg_dev;
35848         }
35849         /* save the init method used by this camera */
35850         cam->method = id->driver_info;
35851 @@ -1503,7 +1503,7 @@ static int zr364xx_probe(struct usb_interface *intf,
35852         if (!cam->read_endpoint) {
35853                 err = -ENOMEM;
35854                 dev_err(&intf->dev, "Could not find bulk-in endpoint\n");
35855 -               goto unregister;
35856 +               goto free_hdlr_and_unreg_dev;
35857         }
35859         /* v4l */
35860 @@ -1515,7 +1515,7 @@ static int zr364xx_probe(struct usb_interface *intf,
35861         /* load zr364xx board specific */
35862         err = zr364xx_board_init(cam);
35863         if (err)
35864 -               goto unregister;
35865 +               goto free_hdlr_and_unreg_dev;
35866         err = v4l2_ctrl_handler_setup(hdl);
35867         if (err)
35868                 goto board_uninit;
35869 @@ -1533,7 +1533,7 @@ static int zr364xx_probe(struct usb_interface *intf,
35870         err = video_register_device(&cam->vdev, VFL_TYPE_VIDEO, -1);
35871         if (err) {
35872                 dev_err(&udev->dev, "video_register_device failed\n");
35873 -               goto free_handler;
35874 +               goto board_uninit;
35875         }
35876         cam->v4l2_dev.release = zr364xx_release;
35878 @@ -1541,11 +1541,10 @@ static int zr364xx_probe(struct usb_interface *intf,
35879                  video_device_node_name(&cam->vdev));
35880         return 0;
35882 -free_handler:
35883 -       v4l2_ctrl_handler_free(hdl);
35884  board_uninit:
35885         zr364xx_board_uninit(cam);
35886 -unregister:
35887 +free_hdlr_and_unreg_dev:
35888 +       v4l2_ctrl_handler_free(hdl);
35889         v4l2_device_unregister(&cam->v4l2_dev);
35890  free_cam:
35891         kfree(cam);
35892 diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
35893 index 016cf6204cbb..6219c8185782 100644
35894 --- a/drivers/media/v4l2-core/v4l2-ctrls.c
35895 +++ b/drivers/media/v4l2-core/v4l2-ctrls.c
35896 @@ -1675,6 +1675,8 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
35897                 p_fwht_params->version = V4L2_FWHT_VERSION;
35898                 p_fwht_params->width = 1280;
35899                 p_fwht_params->height = 720;
35900 +               p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV |
35901 +                       (2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET);
35902                 break;
35903         }
35905 @@ -2395,7 +2397,16 @@ static void new_to_req(struct v4l2_ctrl_ref *ref)
35906         if (!ref)
35907                 return;
35908         ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req);
35909 -       ref->req = ref;
35910 +       ref->valid_p_req = true;
35913 +/* Copy the current value to the request value */
35914 +static void cur_to_req(struct v4l2_ctrl_ref *ref)
35916 +       if (!ref)
35917 +               return;
35918 +       ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->p_req);
35919 +       ref->valid_p_req = true;
35922  /* Copy the request value to the new value */
35923 @@ -2403,8 +2414,8 @@ static void req_to_new(struct v4l2_ctrl_ref *ref)
35925         if (!ref)
35926                 return;
35927 -       if (ref->req)
35928 -               ptr_to_ptr(ref->ctrl, ref->req->p_req, ref->ctrl->p_new);
35929 +       if (ref->valid_p_req)
35930 +               ptr_to_ptr(ref->ctrl, ref->p_req, ref->ctrl->p_new);
35931         else
35932                 ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new);
35934 @@ -2541,7 +2552,15 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
35935         if (hdl == NULL || hdl->buckets == NULL)
35936                 return;
35938 -       if (!hdl->req_obj.req && !list_empty(&hdl->requests)) {
35939 +       /*
35940 +        * If the main handler is freed and it is used by handler objects in
35941 +        * outstanding requests, then unbind and put those objects before
35942 +        * freeing the main handler.
35943 +        *
35944 +        * The main handler can be identified by having a NULL ops pointer in
35945 +        * the request object.
35946 +        */
35947 +       if (!hdl->req_obj.ops && !list_empty(&hdl->requests)) {
35948                 struct v4l2_ctrl_handler *req, *next_req;
35950                 list_for_each_entry_safe(req, next_req, &hdl->requests, requests) {
35951 @@ -3571,39 +3590,8 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
35952         struct v4l2_ctrl_handler *hdl =
35953                 container_of(obj, struct v4l2_ctrl_handler, req_obj);
35954         struct v4l2_ctrl_handler *main_hdl = obj->priv;
35955 -       struct v4l2_ctrl_handler *prev_hdl = NULL;
35956 -       struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL;
35958         mutex_lock(main_hdl->lock);
35959 -       if (list_empty(&main_hdl->requests_queued))
35960 -               goto queue;
35962 -       prev_hdl = list_last_entry(&main_hdl->requests_queued,
35963 -                                  struct v4l2_ctrl_handler, requests_queued);
35964 -       /*
35965 -        * Note: prev_hdl and hdl must contain the same list of control
35966 -        * references, so if any differences are detected then that is a
35967 -        * driver bug and the WARN_ON is triggered.
35968 -        */
35969 -       mutex_lock(prev_hdl->lock);
35970 -       ref_ctrl_prev = list_first_entry(&prev_hdl->ctrl_refs,
35971 -                                        struct v4l2_ctrl_ref, node);
35972 -       list_for_each_entry(ref_ctrl, &hdl->ctrl_refs, node) {
35973 -               if (ref_ctrl->req)
35974 -                       continue;
35975 -               while (ref_ctrl_prev->ctrl->id < ref_ctrl->ctrl->id) {
35976 -                       /* Should never happen, but just in case... */
35977 -                       if (list_is_last(&ref_ctrl_prev->node,
35978 -                                        &prev_hdl->ctrl_refs))
35979 -                               break;
35980 -                       ref_ctrl_prev = list_next_entry(ref_ctrl_prev, node);
35981 -               }
35982 -               if (WARN_ON(ref_ctrl_prev->ctrl->id != ref_ctrl->ctrl->id))
35983 -                       break;
35984 -               ref_ctrl->req = ref_ctrl_prev->req;
35985 -       }
35986 -       mutex_unlock(prev_hdl->lock);
35987 -queue:
35988         list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
35989         hdl->request_is_queued = true;
35990         mutex_unlock(main_hdl->lock);
35991 @@ -3615,8 +3603,8 @@ static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
35992                 container_of(obj, struct v4l2_ctrl_handler, req_obj);
35993         struct v4l2_ctrl_handler *main_hdl = obj->priv;
35995 -       list_del_init(&hdl->requests);
35996         mutex_lock(main_hdl->lock);
35997 +       list_del_init(&hdl->requests);
35998         if (hdl->request_is_queued) {
35999                 list_del_init(&hdl->requests_queued);
36000                 hdl->request_is_queued = false;
36001 @@ -3660,7 +3648,7 @@ v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
36003         struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
36005 -       return (ref && ref->req == ref) ? ref->ctrl : NULL;
36006 +       return (ref && ref->valid_p_req) ? ref->ctrl : NULL;
36008  EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find);
36010 @@ -3675,8 +3663,11 @@ static int v4l2_ctrl_request_bind(struct media_request *req,
36011         if (!ret) {
36012                 ret = media_request_object_bind(req, &req_ops,
36013                                                 from, false, &hdl->req_obj);
36014 -               if (!ret)
36015 +               if (!ret) {
36016 +                       mutex_lock(from->lock);
36017                         list_add_tail(&hdl->requests, &from->requests);
36018 +                       mutex_unlock(from->lock);
36019 +               }
36020         }
36021         return ret;
36023 @@ -3846,7 +3837,13 @@ static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
36024         return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
36027 -/* Get extended controls. Allocates the helpers array if needed. */
36029 + * Get extended controls. Allocates the helpers array if needed.
36030 + *
36031 + * Note that v4l2_g_ext_ctrls_common() with 'which' set to
36032 + * V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was
36033 + * completed, and in that case valid_p_req is true for all controls.
36034 + */
36035  static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
36036                                    struct v4l2_ext_controls *cs,
36037                                    struct video_device *vdev)
36038 @@ -3855,9 +3852,10 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
36039         struct v4l2_ctrl_helper *helpers = helper;
36040         int ret;
36041         int i, j;
36042 -       bool def_value;
36043 +       bool is_default, is_request;
36045 -       def_value = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
36046 +       is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
36047 +       is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL);
36049         cs->error_idx = cs->count;
36050         cs->which = V4L2_CTRL_ID2WHICH(cs->which);
36051 @@ -3883,11 +3881,9 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
36052                         ret = -EACCES;
36054         for (i = 0; !ret && i < cs->count; i++) {
36055 -               int (*ctrl_to_user)(struct v4l2_ext_control *c,
36056 -                                   struct v4l2_ctrl *ctrl);
36057                 struct v4l2_ctrl *master;
36059 -               ctrl_to_user = def_value ? def_to_user : cur_to_user;
36060 +               bool is_volatile = false;
36061 +               u32 idx = i;
36063                 if (helpers[i].mref == NULL)
36064                         continue;
36065 @@ -3897,31 +3893,48 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
36067                 v4l2_ctrl_lock(master);
36069 -               /* g_volatile_ctrl will update the new control values */
36070 -               if (!def_value &&
36071 +               /*
36072 +                * g_volatile_ctrl will update the new control values.
36073 +                * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL and
36074 +                * V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests
36075 +                * it is v4l2_ctrl_request_complete() that copies the
36076 +                * volatile controls at the time of request completion
36077 +                * to the request, so you don't want to do that again.
36078 +                */
36079 +               if (!is_default && !is_request &&
36080                     ((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
36081                     (master->has_volatiles && !is_cur_manual(master)))) {
36082                         for (j = 0; j < master->ncontrols; j++)
36083                                 cur_to_new(master->cluster[j]);
36084                         ret = call_op(master, g_volatile_ctrl);
36085 -                       ctrl_to_user = new_to_user;
36086 +                       is_volatile = true;
36087                 }
36088 -               /* If OK, then copy the current (for non-volatile controls)
36089 -                  or the new (for volatile controls) control values to the
36090 -                  caller */
36091 -               if (!ret) {
36092 -                       u32 idx = i;
36094 -                       do {
36095 -                               if (helpers[idx].ref->req)
36096 -                                       ret = req_to_user(cs->controls + idx,
36097 -                                               helpers[idx].ref->req);
36098 -                               else
36099 -                                       ret = ctrl_to_user(cs->controls + idx,
36100 -                                               helpers[idx].ref->ctrl);
36101 -                               idx = helpers[idx].next;
36102 -                       } while (!ret && idx);
36103 +               if (ret) {
36104 +                       v4l2_ctrl_unlock(master);
36105 +                       break;
36106                 }
36108 +               /*
36109 +                * Copy the default value (if is_default is true), the
36110 +                * request value (if is_request is true and p_req is valid),
36111 +                * the new volatile value (if is_volatile is true) or the
36112 +                * current value.
36113 +                */
36114 +               do {
36115 +                       struct v4l2_ctrl_ref *ref = helpers[idx].ref;
36117 +                       if (is_default)
36118 +                               ret = def_to_user(cs->controls + idx, ref->ctrl);
36119 +                       else if (is_request && ref->valid_p_req)
36120 +                               ret = req_to_user(cs->controls + idx, ref);
36121 +                       else if (is_volatile)
36122 +                               ret = new_to_user(cs->controls + idx, ref->ctrl);
36123 +                       else
36124 +                               ret = cur_to_user(cs->controls + idx, ref->ctrl);
36125 +                       idx = helpers[idx].next;
36126 +               } while (!ret && idx);
36128                 v4l2_ctrl_unlock(master);
36129         }
36131 @@ -4564,8 +4577,6 @@ void v4l2_ctrl_request_complete(struct media_request *req,
36132                 unsigned int i;
36134                 if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
36135 -                       ref->req = ref;
36137                         v4l2_ctrl_lock(master);
36138                         /* g_volatile_ctrl will update the current control values */
36139                         for (i = 0; i < master->ncontrols; i++)
36140 @@ -4575,21 +4586,12 @@ void v4l2_ctrl_request_complete(struct media_request *req,
36141                         v4l2_ctrl_unlock(master);
36142                         continue;
36143                 }
36144 -               if (ref->req == ref)
36145 +               if (ref->valid_p_req)
36146                         continue;
36148 +               /* Copy the current control value into the request */
36149                 v4l2_ctrl_lock(ctrl);
36150 -               if (ref->req) {
36151 -                       ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req);
36152 -               } else {
36153 -                       ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req);
36154 -                       /*
36155 -                        * Set ref->req to ensure that when userspace wants to
36156 -                        * obtain the controls of this request it will take
36157 -                        * this value and not the current value of the control.
36158 -                        */
36159 -                       ref->req = ref;
36160 -               }
36161 +               cur_to_req(ref);
36162                 v4l2_ctrl_unlock(ctrl);
36163         }
36165 @@ -4653,7 +4655,7 @@ int v4l2_ctrl_request_setup(struct media_request *req,
36166                                 struct v4l2_ctrl_ref *r =
36167                                         find_ref(hdl, master->cluster[i]->id);
36169 -                               if (r->req && r == r->req) {
36170 +                               if (r->valid_p_req) {
36171                                         have_new_data = true;
36172                                         break;
36173                                 }
36174 diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
36175 index cfa730cfd145..f80c2ea39ca4 100644
36176 --- a/drivers/memory/omap-gpmc.c
36177 +++ b/drivers/memory/omap-gpmc.c
36178 @@ -1009,8 +1009,8 @@ EXPORT_SYMBOL(gpmc_cs_request);
36180  void gpmc_cs_free(int cs)
36182 -       struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
36183 -       struct resource *res = &gpmc->mem;
36184 +       struct gpmc_cs_data *gpmc;
36185 +       struct resource *res;
36187         spin_lock(&gpmc_mem_lock);
36188         if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
36189 @@ -1018,6 +1018,9 @@ void gpmc_cs_free(int cs)
36190                 spin_unlock(&gpmc_mem_lock);
36191                 return;
36192         }
36193 +       gpmc = &gpmc_cs[cs];
36194 +       res = &gpmc->mem;
36196         gpmc_cs_disable_mem(cs);
36197         if (res->flags)
36198                 release_resource(res);
36199 diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
36200 index 3b5b1045edd9..9c0a28416777 100644
36201 --- a/drivers/memory/pl353-smc.c
36202 +++ b/drivers/memory/pl353-smc.c
36203 @@ -63,7 +63,7 @@
36204  /* ECC memory config register specific constants */
36205  #define PL353_SMC_ECC_MEMCFG_MODE_MASK 0xC
36206  #define PL353_SMC_ECC_MEMCFG_MODE_SHIFT        2
36207 -#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK       0xC
36208 +#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK       0x3
36210  #define PL353_SMC_DC_UPT_NAND_REGS     ((4 << 23) |    /* CS: NAND chip */ \
36211                                  (2 << 21))     /* UpdateRegs operation */
36212 diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
36213 index 8d36e221def1..45eed659b0c6 100644
36214 --- a/drivers/memory/renesas-rpc-if.c
36215 +++ b/drivers/memory/renesas-rpc-if.c
36216 @@ -192,10 +192,10 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
36217         }
36219         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
36220 -       rpc->size = resource_size(res);
36221         rpc->dirmap = devm_ioremap_resource(&pdev->dev, res);
36222         if (IS_ERR(rpc->dirmap))
36223                 rpc->dirmap = NULL;
36224 +       rpc->size = resource_size(res);
36226         rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
36228 diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
36229 index 1dabb509dec3..dee503640e12 100644
36230 --- a/drivers/memory/samsung/exynos5422-dmc.c
36231 +++ b/drivers/memory/samsung/exynos5422-dmc.c
36232 @@ -1298,7 +1298,9 @@ static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
36234         dmc->curr_volt = target_volt;
36236 -       clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
36237 +       ret = clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
36238 +       if (ret)
36239 +               return ret;
36241         clk_prepare_enable(dmc->fout_bpll);
36242         clk_prepare_enable(dmc->mout_bpll);
36243 diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
36244 index 077d9ab112b7..d919ae9691e2 100644
36245 --- a/drivers/mfd/arizona-irq.c
36246 +++ b/drivers/mfd/arizona-irq.c
36247 @@ -100,7 +100,7 @@ static irqreturn_t arizona_irq_thread(int irq, void *data)
36248         unsigned int val;
36249         int ret;
36251 -       ret = pm_runtime_get_sync(arizona->dev);
36252 +       ret = pm_runtime_resume_and_get(arizona->dev);
36253         if (ret < 0) {
36254                 dev_err(arizona->dev, "Failed to resume device: %d\n", ret);
36255                 return IRQ_NONE;
36256 diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
36257 index 3781d0bb7786..783a14af18e2 100644
36258 --- a/drivers/mfd/da9063-i2c.c
36259 +++ b/drivers/mfd/da9063-i2c.c
36260 @@ -442,6 +442,16 @@ static int da9063_i2c_probe(struct i2c_client *i2c,
36261                 return ret;
36262         }
36264 +       /* If SMBus is not available and only I2C is possible, enter I2C mode */
36265 +       if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C)) {
36266 +               ret = regmap_clear_bits(da9063->regmap, DA9063_REG_CONFIG_J,
36267 +                                       DA9063_TWOWIRE_TO);
36268 +               if (ret < 0) {
36269 +                       dev_err(da9063->dev, "Failed to set Two-Wire Bus Mode.\n");
36270 +                       return -EIO;
36271 +               }
36272 +       }
36274         return da9063_device_init(da9063, i2c->irq);
36277 diff --git a/drivers/mfd/intel_pmt.c b/drivers/mfd/intel_pmt.c
36278 index 744b230cdcca..65da2b17a204 100644
36279 --- a/drivers/mfd/intel_pmt.c
36280 +++ b/drivers/mfd/intel_pmt.c
36281 @@ -79,19 +79,18 @@ static int pmt_add_dev(struct pci_dev *pdev, struct intel_dvsec_header *header,
36282         case DVSEC_INTEL_ID_WATCHER:
36283                 if (quirks & PMT_QUIRK_NO_WATCHER) {
36284                         dev_info(dev, "Watcher not supported\n");
36285 -                       return 0;
36286 +                       return -EINVAL;
36287                 }
36288                 name = "pmt_watcher";
36289                 break;
36290         case DVSEC_INTEL_ID_CRASHLOG:
36291                 if (quirks & PMT_QUIRK_NO_CRASHLOG) {
36292                         dev_info(dev, "Crashlog not supported\n");
36293 -                       return 0;
36294 +                       return -EINVAL;
36295                 }
36296                 name = "pmt_crashlog";
36297                 break;
36298         default:
36299 -               dev_err(dev, "Unrecognized PMT capability: %d\n", id);
36300                 return -EINVAL;
36301         }
36303 @@ -174,12 +173,8 @@ static int pmt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
36304                 header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
36306                 ret = pmt_add_dev(pdev, &header, quirks);
36307 -               if (ret) {
36308 -                       dev_warn(&pdev->dev,
36309 -                                "Failed to add device for DVSEC id %d\n",
36310 -                                header.id);
36311 +               if (ret)
36312                         continue;
36313 -               }
36315                 found_devices = true;
36316         } while (true);
36317 diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
36318 index add603359124..44ed2fce0319 100644
36319 --- a/drivers/mfd/stm32-timers.c
36320 +++ b/drivers/mfd/stm32-timers.c
36321 @@ -158,13 +158,18 @@ static const struct regmap_config stm32_timers_regmap_cfg = {
36323  static void stm32_timers_get_arr_size(struct stm32_timers *ddata)
36325 +       u32 arr;
36327 +       /* Backup ARR to restore it after getting the maximum value */
36328 +       regmap_read(ddata->regmap, TIM_ARR, &arr);
36330         /*
36331          * Only the available bits will be written so when readback
36332          * we get the maximum value of auto reload register
36333          */
36334         regmap_write(ddata->regmap, TIM_ARR, ~0L);
36335         regmap_read(ddata->regmap, TIM_ARR, &ddata->max_arr);
36336 -       regmap_write(ddata->regmap, TIM_ARR, 0x0);
36337 +       regmap_write(ddata->regmap, TIM_ARR, arr);
36340  static int stm32_timers_dma_probe(struct device *dev,
36341 diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
36342 index 90f3292230c9..1dd39483e7c1 100644
36343 --- a/drivers/mfd/stmpe.c
36344 +++ b/drivers/mfd/stmpe.c
36345 @@ -312,7 +312,7 @@ EXPORT_SYMBOL_GPL(stmpe_set_altfunc);
36346   * GPIO (all variants)
36347   */
36349 -static const struct resource stmpe_gpio_resources[] = {
36350 +static struct resource stmpe_gpio_resources[] = {
36351         /* Start and end filled dynamically */
36352         {
36353                 .flags  = IORESOURCE_IRQ,
36354 @@ -336,7 +336,8 @@ static const struct mfd_cell stmpe_gpio_cell_noirq = {
36355   * Keypad (1601, 2401, 2403)
36356   */
36358 -static const struct resource stmpe_keypad_resources[] = {
36359 +static struct resource stmpe_keypad_resources[] = {
36360 +       /* Start and end filled dynamically */
36361         {
36362                 .name   = "KEYPAD",
36363                 .flags  = IORESOURCE_IRQ,
36364 @@ -357,7 +358,8 @@ static const struct mfd_cell stmpe_keypad_cell = {
36365  /*
36366   * PWM (1601, 2401, 2403)
36367   */
36368 -static const struct resource stmpe_pwm_resources[] = {
36369 +static struct resource stmpe_pwm_resources[] = {
36370 +       /* Start and end filled dynamically */
36371         {
36372                 .name   = "PWM0",
36373                 .flags  = IORESOURCE_IRQ,
36374 @@ -445,7 +447,8 @@ static struct stmpe_variant_info stmpe801_noirq = {
36375   * Touchscreen (STMPE811 or STMPE610)
36376   */
36378 -static const struct resource stmpe_ts_resources[] = {
36379 +static struct resource stmpe_ts_resources[] = {
36380 +       /* Start and end filled dynamically */
36381         {
36382                 .name   = "TOUCH_DET",
36383                 .flags  = IORESOURCE_IRQ,
36384 @@ -467,7 +470,8 @@ static const struct mfd_cell stmpe_ts_cell = {
36385   * ADC (STMPE811)
36386   */
36388 -static const struct resource stmpe_adc_resources[] = {
36389 +static struct resource stmpe_adc_resources[] = {
36390 +       /* Start and end filled dynamically */
36391         {
36392                 .name   = "STMPE_TEMP_SENS",
36393                 .flags  = IORESOURCE_IRQ,
36394 diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
36395 index b690796d24d4..448b13da62b4 100644
36396 --- a/drivers/mfd/ucb1x00-core.c
36397 +++ b/drivers/mfd/ucb1x00-core.c
36398 @@ -250,7 +250,7 @@ unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
36399                         break;
36400                 /* yield to other processes */
36401                 set_current_state(TASK_INTERRUPTIBLE);
36402 -               schedule_timeout(1);
36403 +               schedule_min_hrtimeout();
36404         }
36406         return UCB_ADC_DAT(val);
36407 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
36408 index dd65cedf3b12..9d14bf444481 100644
36409 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
36410 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
36411 @@ -208,7 +208,7 @@ static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000};
36412  static int lis3_3dlh_rates[4] = {50, 100, 400, 1000};
36414  /* ODR is Output Data Rate */
36415 -static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
36416 +static int lis3lv02d_get_odr_index(struct lis3lv02d *lis3)
36418         u8 ctrl;
36419         int shift;
36420 @@ -216,15 +216,23 @@ static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
36421         lis3->read(lis3, CTRL_REG1, &ctrl);
36422         ctrl &= lis3->odr_mask;
36423         shift = ffs(lis3->odr_mask) - 1;
36424 -       return lis3->odrs[(ctrl >> shift)];
36425 +       return (ctrl >> shift);
36428  static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3)
36430 -       int div = lis3lv02d_get_odr(lis3);
36431 +       int odr_idx = lis3lv02d_get_odr_index(lis3);
36432 +       int div = lis3->odrs[odr_idx];
36434 -       if (WARN_ONCE(div == 0, "device returned spurious data"))
36435 +       if (div == 0) {
36436 +               if (odr_idx == 0) {
36437 +                       /* Power-down mode, not sampling no need to sleep */
36438 +                       return 0;
36439 +               }
36441 +               dev_err(&lis3->pdev->dev, "Error unknown odrs-index: %d\n", odr_idx);
36442                 return -ENXIO;
36443 +       }
36445         /* LIS3 power on delay is quite long */
36446         msleep(lis3->pwron_delay / div);
36447 @@ -816,9 +824,12 @@ static ssize_t lis3lv02d_rate_show(struct device *dev,
36448                         struct device_attribute *attr, char *buf)
36450         struct lis3lv02d *lis3 = dev_get_drvdata(dev);
36451 +       int odr_idx;
36453         lis3lv02d_sysfs_poweron(lis3);
36454 -       return sprintf(buf, "%d\n", lis3lv02d_get_odr(lis3));
36456 +       odr_idx = lis3lv02d_get_odr_index(lis3);
36457 +       return sprintf(buf, "%d\n", lis3->odrs[odr_idx]);
36460  static ssize_t lis3lv02d_rate_set(struct device *dev,
36461 diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
36462 index 14be76d4c2e6..cb34925e10f1 100644
36463 --- a/drivers/misc/mei/hw-me-regs.h
36464 +++ b/drivers/misc/mei/hw-me-regs.h
36465 @@ -105,6 +105,7 @@
36467  #define MEI_DEV_ID_ADP_S      0x7AE8  /* Alder Lake Point S */
36468  #define MEI_DEV_ID_ADP_LP     0x7A60  /* Alder Lake Point LP */
36469 +#define MEI_DEV_ID_ADP_P      0x51E0  /* Alder Lake Point P */
36471  /*
36472   * MEI HW Section
36473 diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
36474 index a7e179626b63..c3393b383e59 100644
36475 --- a/drivers/misc/mei/pci-me.c
36476 +++ b/drivers/misc/mei/pci-me.c
36477 @@ -111,6 +111,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
36479         {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
36480         {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
36481 +       {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
36483         /* required last entry */
36484         {0, }
36485 diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
36486 index 8e6607fc8a67..b9ab770bbdb5 100644
36487 --- a/drivers/misc/sgi-xp/xpc_channel.c
36488 +++ b/drivers/misc/sgi-xp/xpc_channel.c
36489 @@ -834,7 +834,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
36491         atomic_inc(&ch->n_on_msg_allocate_wq);
36492         prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
36493 -       ret = schedule_timeout(1);
36494 +       ret = schedule_min_hrtimeout();
36495         finish_wait(&ch->msg_allocate_wq, &wait);
36496         atomic_dec(&ch->n_on_msg_allocate_wq);
36498 diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
36499 index 345addd9306d..fa8a7fce4481 100644
36500 --- a/drivers/misc/vmw_vmci/vmci_doorbell.c
36501 +++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
36502 @@ -326,7 +326,7 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
36503  bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn)
36505         int result;
36506 -       struct vmci_notify_bm_set_msg bitmap_set_msg;
36507 +       struct vmci_notify_bm_set_msg bitmap_set_msg = { };
36509         bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
36510                                                   VMCI_SET_NOTIFY_BITMAP);
36511 diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
36512 index cc8eeb361fcd..1018dc77269d 100644
36513 --- a/drivers/misc/vmw_vmci/vmci_guest.c
36514 +++ b/drivers/misc/vmw_vmci/vmci_guest.c
36515 @@ -168,7 +168,7 @@ static int vmci_check_host_caps(struct pci_dev *pdev)
36516                                 VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
36517         struct vmci_datagram *check_msg;
36519 -       check_msg = kmalloc(msg_size, GFP_KERNEL);
36520 +       check_msg = kzalloc(msg_size, GFP_KERNEL);
36521         if (!check_msg) {
36522                 dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
36523                 return -ENOMEM;
36524 diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
36525 index d666e24fbe0e..a4c06ef67394 100644
36526 --- a/drivers/mmc/core/block.c
36527 +++ b/drivers/mmc/core/block.c
36528 @@ -572,6 +572,18 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
36529                 main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK;
36530         }
36532 +       /*
36533 +        * Make sure to update CACHE_CTRL in case it was changed. The cache
36534 +        * will get turned back on if the card is re-initialized, e.g.
36535 +        * suspend/resume or hw reset in recovery.
36536 +        */
36537 +       if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) &&
36538 +           (cmd.opcode == MMC_SWITCH)) {
36539 +               u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1;
36541 +               card->ext_csd.cache_ctrl = value;
36542 +       }
36544         /*
36545          * According to the SD specs, some commands require a delay after
36546          * issuing the command.
36547 @@ -2224,6 +2236,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
36548         case MMC_ISSUE_ASYNC:
36549                 switch (req_op(req)) {
36550                 case REQ_OP_FLUSH:
36551 +                       if (!mmc_cache_enabled(host)) {
36552 +                               blk_mq_end_request(req, BLK_STS_OK);
36553 +                               return MMC_REQ_FINISHED;
36554 +                       }
36555                         ret = mmc_blk_cqe_issue_flush(mq, req);
36556                         break;
36557                 case REQ_OP_READ:
36558 diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
36559 index 1136b859ddd8..e30c4e88e404 100644
36560 --- a/drivers/mmc/core/core.c
36561 +++ b/drivers/mmc/core/core.c
36562 @@ -1207,7 +1207,7 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
36564         err = mmc_wait_for_cmd(host, &cmd, 0);
36565         if (err)
36566 -               return err;
36567 +               goto power_cycle;
36569         if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
36570                 return -EIO;
36571 @@ -2369,80 +2369,6 @@ void mmc_stop_host(struct mmc_host *host)
36572         mmc_release_host(host);
36575 -#ifdef CONFIG_PM_SLEEP
36576 -/* Do the card removal on suspend if card is assumed removeable
36577 - * Do that in pm notifier while userspace isn't yet frozen, so we will be able
36578 -   to sync the card.
36580 -static int mmc_pm_notify(struct notifier_block *notify_block,
36581 -                       unsigned long mode, void *unused)
36583 -       struct mmc_host *host = container_of(
36584 -               notify_block, struct mmc_host, pm_notify);
36585 -       unsigned long flags;
36586 -       int err = 0;
36588 -       switch (mode) {
36589 -       case PM_HIBERNATION_PREPARE:
36590 -       case PM_SUSPEND_PREPARE:
36591 -       case PM_RESTORE_PREPARE:
36592 -               spin_lock_irqsave(&host->lock, flags);
36593 -               host->rescan_disable = 1;
36594 -               spin_unlock_irqrestore(&host->lock, flags);
36595 -               cancel_delayed_work_sync(&host->detect);
36597 -               if (!host->bus_ops)
36598 -                       break;
36600 -               /* Validate prerequisites for suspend */
36601 -               if (host->bus_ops->pre_suspend)
36602 -                       err = host->bus_ops->pre_suspend(host);
36603 -               if (!err)
36604 -                       break;
36606 -               if (!mmc_card_is_removable(host)) {
36607 -                       dev_warn(mmc_dev(host),
36608 -                                "pre_suspend failed for non-removable host: "
36609 -                                "%d\n", err);
36610 -                       /* Avoid removing non-removable hosts */
36611 -                       break;
36612 -               }
36614 -               /* Calling bus_ops->remove() with a claimed host can deadlock */
36615 -               host->bus_ops->remove(host);
36616 -               mmc_claim_host(host);
36617 -               mmc_detach_bus(host);
36618 -               mmc_power_off(host);
36619 -               mmc_release_host(host);
36620 -               host->pm_flags = 0;
36621 -               break;
36623 -       case PM_POST_SUSPEND:
36624 -       case PM_POST_HIBERNATION:
36625 -       case PM_POST_RESTORE:
36627 -               spin_lock_irqsave(&host->lock, flags);
36628 -               host->rescan_disable = 0;
36629 -               spin_unlock_irqrestore(&host->lock, flags);
36630 -               _mmc_detect_change(host, 0, false);
36632 -       }
36634 -       return 0;
36637 -void mmc_register_pm_notifier(struct mmc_host *host)
36639 -       host->pm_notify.notifier_call = mmc_pm_notify;
36640 -       register_pm_notifier(&host->pm_notify);
36643 -void mmc_unregister_pm_notifier(struct mmc_host *host)
36645 -       unregister_pm_notifier(&host->pm_notify);
36647 -#endif
36649  static int __init mmc_init(void)
36651         int ret;
36652 diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
36653 index 575ac0257af2..db3c9c68875d 100644
36654 --- a/drivers/mmc/core/core.h
36655 +++ b/drivers/mmc/core/core.h
36656 @@ -29,6 +29,7 @@ struct mmc_bus_ops {
36657         int (*shutdown)(struct mmc_host *);
36658         int (*hw_reset)(struct mmc_host *);
36659         int (*sw_reset)(struct mmc_host *);
36660 +       bool (*cache_enabled)(struct mmc_host *);
36661  };
36663  void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
36664 @@ -93,14 +94,6 @@ int mmc_execute_tuning(struct mmc_card *card);
36665  int mmc_hs200_to_hs400(struct mmc_card *card);
36666  int mmc_hs400_to_hs200(struct mmc_card *card);
36668 -#ifdef CONFIG_PM_SLEEP
36669 -void mmc_register_pm_notifier(struct mmc_host *host);
36670 -void mmc_unregister_pm_notifier(struct mmc_host *host);
36671 -#else
36672 -static inline void mmc_register_pm_notifier(struct mmc_host *host) { }
36673 -static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { }
36674 -#endif
36676  void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq);
36677  bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
36679 @@ -171,4 +164,12 @@ static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
36680                 host->ops->post_req(host, mrq, err);
36683 +static inline bool mmc_cache_enabled(struct mmc_host *host)
36685 +       if (host->bus_ops->cache_enabled)
36686 +               return host->bus_ops->cache_enabled(host);
36688 +       return false;
36691  #endif
36692 diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
36693 index 9b89a91b6b47..fe05b3645fe9 100644
36694 --- a/drivers/mmc/core/host.c
36695 +++ b/drivers/mmc/core/host.c
36696 @@ -35,6 +35,42 @@
36698  static DEFINE_IDA(mmc_host_ida);
36700 +#ifdef CONFIG_PM_SLEEP
36701 +static int mmc_host_class_prepare(struct device *dev)
36703 +       struct mmc_host *host = cls_dev_to_mmc_host(dev);
36705 +       /*
36706 +        * It's safe to access the bus_ops pointer, as both userspace and the
36707 +        * workqueue for detecting cards are frozen at this point.
36708 +        */
36709 +       if (!host->bus_ops)
36710 +               return 0;
36712 +       /* Validate conditions for system suspend. */
36713 +       if (host->bus_ops->pre_suspend)
36714 +               return host->bus_ops->pre_suspend(host);
36716 +       return 0;
36719 +static void mmc_host_class_complete(struct device *dev)
36721 +       struct mmc_host *host = cls_dev_to_mmc_host(dev);
36723 +       _mmc_detect_change(host, 0, false);
36726 +static const struct dev_pm_ops mmc_host_class_dev_pm_ops = {
36727 +       .prepare = mmc_host_class_prepare,
36728 +       .complete = mmc_host_class_complete,
36731 +#define MMC_HOST_CLASS_DEV_PM_OPS (&mmc_host_class_dev_pm_ops)
36732 +#else
36733 +#define MMC_HOST_CLASS_DEV_PM_OPS NULL
36734 +#endif
36736  static void mmc_host_classdev_release(struct device *dev)
36738         struct mmc_host *host = cls_dev_to_mmc_host(dev);
36739 @@ -46,6 +82,7 @@ static void mmc_host_classdev_release(struct device *dev)
36740  static struct class mmc_host_class = {
36741         .name           = "mmc_host",
36742         .dev_release    = mmc_host_classdev_release,
36743 +       .pm             = MMC_HOST_CLASS_DEV_PM_OPS,
36744  };
36746  int mmc_register_host_class(void)
36747 @@ -538,8 +575,6 @@ int mmc_add_host(struct mmc_host *host)
36748  #endif
36750         mmc_start_host(host);
36751 -       mmc_register_pm_notifier(host);
36753         return 0;
36756 @@ -555,7 +590,6 @@ EXPORT_SYMBOL(mmc_add_host);
36757   */
36758  void mmc_remove_host(struct mmc_host *host)
36760 -       mmc_unregister_pm_notifier(host);
36761         mmc_stop_host(host);
36763  #ifdef CONFIG_DEBUG_FS
36764 diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
36765 index 8741271d3971..4d2b4b0da93c 100644
36766 --- a/drivers/mmc/core/mmc.c
36767 +++ b/drivers/mmc/core/mmc.c
36768 @@ -2029,6 +2029,12 @@ static void mmc_detect(struct mmc_host *host)
36769         }
36772 +static bool _mmc_cache_enabled(struct mmc_host *host)
36774 +       return host->card->ext_csd.cache_size > 0 &&
36775 +              host->card->ext_csd.cache_ctrl & 1;
36778  static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
36780         int err = 0;
36781 @@ -2208,6 +2214,7 @@ static const struct mmc_bus_ops mmc_ops = {
36782         .alive = mmc_alive,
36783         .shutdown = mmc_shutdown,
36784         .hw_reset = _mmc_hw_reset,
36785 +       .cache_enabled = _mmc_cache_enabled,
36786  };
36788  /*
36789 diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
36790 index 265d95ec82ce..c458f6b626a2 100644
36791 --- a/drivers/mmc/core/mmc_ops.c
36792 +++ b/drivers/mmc/core/mmc_ops.c
36793 @@ -988,9 +988,7 @@ int mmc_flush_cache(struct mmc_card *card)
36795         int err = 0;
36797 -       if (mmc_card_mmc(card) &&
36798 -                       (card->ext_csd.cache_size > 0) &&
36799 -                       (card->ext_csd.cache_ctrl & 1)) {
36800 +       if (mmc_cache_enabled(card->host)) {
36801                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
36802                                  EXT_CSD_FLUSH_CACHE, 1,
36803                                  MMC_CACHE_FLUSH_TIMEOUT_MS);
36804 diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
36805 index 6fa51a6ed058..2c48d6504101 100644
36806 --- a/drivers/mmc/core/sd.c
36807 +++ b/drivers/mmc/core/sd.c
36808 @@ -135,6 +135,9 @@ static int mmc_decode_csd(struct mmc_card *card)
36809                         csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1;
36810                         csd->erase_size <<= csd->write_blkbits - 9;
36811                 }
36813 +               if (UNSTUFF_BITS(resp, 13, 1))
36814 +                       mmc_card_set_readonly(card);
36815                 break;
36816         case 1:
36817                 /*
36818 @@ -169,6 +172,9 @@ static int mmc_decode_csd(struct mmc_card *card)
36819                 csd->write_blkbits = 9;
36820                 csd->write_partial = 0;
36821                 csd->erase_size = 1;
36823 +               if (UNSTUFF_BITS(resp, 13, 1))
36824 +                       mmc_card_set_readonly(card);
36825                 break;
36826         default:
36827                 pr_err("%s: unrecognised CSD structure version %d\n",
36828 diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
36829 index 0fda7784cab2..3eb94ac2712e 100644
36830 --- a/drivers/mmc/core/sdio.c
36831 +++ b/drivers/mmc/core/sdio.c
36832 @@ -985,21 +985,37 @@ static void mmc_sdio_detect(struct mmc_host *host)
36833   */
36834  static int mmc_sdio_pre_suspend(struct mmc_host *host)
36836 -       int i, err = 0;
36837 +       int i;
36839         for (i = 0; i < host->card->sdio_funcs; i++) {
36840                 struct sdio_func *func = host->card->sdio_func[i];
36841                 if (func && sdio_func_present(func) && func->dev.driver) {
36842                         const struct dev_pm_ops *pmops = func->dev.driver->pm;
36843 -                       if (!pmops || !pmops->suspend || !pmops->resume) {
36844 +                       if (!pmops || !pmops->suspend || !pmops->resume)
36845                                 /* force removal of entire card in that case */
36846 -                               err = -ENOSYS;
36847 -                               break;
36848 -                       }
36849 +                               goto remove;
36850                 }
36851         }
36853 -       return err;
36854 +       return 0;
36856 +remove:
36857 +       if (!mmc_card_is_removable(host)) {
36858 +               dev_warn(mmc_dev(host),
36859 +                        "missing suspend/resume ops for non-removable SDIO card\n");
36860 +               /* Don't remove a non-removable card - we can't re-detect it. */
36861 +               return 0;
36862 +       }
36864 +       /* Remove the SDIO card and let it be re-detected later on. */
36865 +       mmc_sdio_remove(host);
36866 +       mmc_claim_host(host);
36867 +       mmc_detach_bus(host);
36868 +       mmc_power_off(host);
36869 +       mmc_release_host(host);
36870 +       host->pm_flags = 0;
36872 +       return 0;
36875  /*
36876 diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
36877 index f9780c65ebe9..f24623aac2db 100644
36878 --- a/drivers/mmc/host/sdhci-brcmstb.c
36879 +++ b/drivers/mmc/host/sdhci-brcmstb.c
36880 @@ -199,7 +199,6 @@ static int sdhci_brcmstb_add_host(struct sdhci_host *host,
36881         if (dma64) {
36882                 dev_dbg(mmc_dev(host->mmc), "Using 64 bit DMA\n");
36883                 cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
36884 -               cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
36885         }
36887         ret = cqhci_init(cq_host, host->mmc, dma64);
36888 diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
36889 index a20459744d21..94327988da91 100644
36890 --- a/drivers/mmc/host/sdhci-esdhc-imx.c
36891 +++ b/drivers/mmc/host/sdhci-esdhc-imx.c
36892 @@ -1488,7 +1488,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
36894         mmc_of_parse_voltage(np, &host->ocr_mask);
36896 -       if (esdhc_is_usdhc(imx_data)) {
36897 +       if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pinctrl)) {
36898                 imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
36899                                                 ESDHC_PINCTRL_STATE_100MHZ);
36900                 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
36901 diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
36902 index 9552708846ca..bf04a08eeba1 100644
36903 --- a/drivers/mmc/host/sdhci-pci-core.c
36904 +++ b/drivers/mmc/host/sdhci-pci-core.c
36905 @@ -516,6 +516,7 @@ struct intel_host {
36906         int     drv_strength;
36907         bool    d3_retune;
36908         bool    rpm_retune_ok;
36909 +       bool    needs_pwr_off;
36910         u32     glk_rx_ctrl1;
36911         u32     glk_tun_val;
36912         u32     active_ltr;
36913 @@ -643,9 +644,25 @@ static int bxt_get_cd(struct mmc_host *mmc)
36914  static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
36915                                   unsigned short vdd)
36917 +       struct sdhci_pci_slot *slot = sdhci_priv(host);
36918 +       struct intel_host *intel_host = sdhci_pci_priv(slot);
36919         int cntr;
36920         u8 reg;
36922 +       /*
36923 +        * Bus power may control card power, but a full reset still may not
36924 +        * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can.
36925 +        * That might be needed to initialize correctly, if the card was left
36926 +        * powered on previously.
36927 +        */
36928 +       if (intel_host->needs_pwr_off) {
36929 +               intel_host->needs_pwr_off = false;
36930 +               if (mode != MMC_POWER_OFF) {
36931 +                       sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
36932 +                       usleep_range(10000, 12500);
36933 +               }
36934 +       }
36936         sdhci_set_power(host, mode, vdd);
36938         if (mode == MMC_POWER_OFF)
36939 @@ -1135,6 +1152,14 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
36940         return 0;
36943 +static void byt_needs_pwr_off(struct sdhci_pci_slot *slot)
36945 +       struct intel_host *intel_host = sdhci_pci_priv(slot);
36946 +       u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL);
36948 +       intel_host->needs_pwr_off = reg  & SDHCI_POWER_ON;
36951  static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
36953         byt_probe_slot(slot);
36954 @@ -1152,6 +1177,8 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
36955             slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3)
36956                 slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V;
36958 +       byt_needs_pwr_off(slot);
36960         return 0;
36963 @@ -1903,6 +1930,8 @@ static const struct pci_device_id pci_ids[] = {
36964         SDHCI_PCI_DEVICE(INTEL, CMLH_SD,   intel_byt_sd),
36965         SDHCI_PCI_DEVICE(INTEL, JSL_EMMC,  intel_glk_emmc),
36966         SDHCI_PCI_DEVICE(INTEL, JSL_SD,    intel_byt_sd),
36967 +       SDHCI_PCI_DEVICE(INTEL, LKF_EMMC,  intel_glk_emmc),
36968 +       SDHCI_PCI_DEVICE(INTEL, LKF_SD,    intel_byt_sd),
36969         SDHCI_PCI_DEVICE(O2, 8120,     o2),
36970         SDHCI_PCI_DEVICE(O2, 8220,     o2),
36971         SDHCI_PCI_DEVICE(O2, 8221,     o2),
36972 diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
36973 index d0ed232af0eb..8f90c4163bb5 100644
36974 --- a/drivers/mmc/host/sdhci-pci.h
36975 +++ b/drivers/mmc/host/sdhci-pci.h
36976 @@ -57,6 +57,8 @@
36977  #define PCI_DEVICE_ID_INTEL_CMLH_SD    0x06f5
36978  #define PCI_DEVICE_ID_INTEL_JSL_EMMC   0x4dc4
36979  #define PCI_DEVICE_ID_INTEL_JSL_SD     0x4df8
36980 +#define PCI_DEVICE_ID_INTEL_LKF_EMMC   0x98c4
36981 +#define PCI_DEVICE_ID_INTEL_LKF_SD     0x98f8
36983  #define PCI_DEVICE_ID_SYSKONNECT_8000  0x8000
36984  #define PCI_DEVICE_ID_VIA_95D0         0x95d0
36985 diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
36986 index 41d193fa77bb..8ea9132ebca4 100644
36987 --- a/drivers/mmc/host/sdhci-tegra.c
36988 +++ b/drivers/mmc/host/sdhci-tegra.c
36989 @@ -119,6 +119,10 @@
36990  /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
36991  #define SDHCI_TEGRA_CQE_BASE_ADDR                      0xF000
36993 +#define SDHCI_TEGRA_CQE_TRNS_MODE      (SDHCI_TRNS_MULTI | \
36994 +                                        SDHCI_TRNS_BLK_CNT_EN | \
36995 +                                        SDHCI_TRNS_DMA)
36997  struct sdhci_tegra_soc_data {
36998         const struct sdhci_pltfm_data *pdata;
36999         u64 dma_mask;
37000 @@ -1156,6 +1160,7 @@ static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
37001  static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
37003         struct mmc_host *mmc = cq_host->mmc;
37004 +       struct sdhci_host *host = mmc_priv(mmc);
37005         u8 ctrl;
37006         ktime_t timeout;
37007         bool timed_out;
37008 @@ -1170,6 +1175,7 @@ static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
37009          */
37010         if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
37011             cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
37012 +               sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
37013                 sdhci_cqe_enable(mmc);
37014                 writel(val, cq_host->mmio + reg);
37015                 timeout = ktime_add_us(ktime_get(), 50);
37016 @@ -1205,6 +1211,7 @@ static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
37017  static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
37019         struct cqhci_host *cq_host = mmc->cqe_private;
37020 +       struct sdhci_host *host = mmc_priv(mmc);
37021         u32 val;
37023         /*
37024 @@ -1218,6 +1225,7 @@ static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
37025                 if (val & CQHCI_ENABLE)
37026                         cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
37027                                      CQHCI_CFG);
37028 +               sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
37029                 sdhci_cqe_enable(mmc);
37030                 if (val & CQHCI_ENABLE)
37031                         cqhci_writel(cq_host, val, CQHCI_CFG);
37032 @@ -1281,12 +1289,36 @@ static void tegra_sdhci_set_timeout(struct sdhci_host *host,
37033         __sdhci_set_timeout(host, cmd);
37036 +static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
37038 +       struct cqhci_host *cq_host = mmc->cqe_private;
37039 +       u32 reg;
37041 +       reg = cqhci_readl(cq_host, CQHCI_CFG);
37042 +       reg |= CQHCI_ENABLE;
37043 +       cqhci_writel(cq_host, reg, CQHCI_CFG);
37046 +static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
37048 +       struct cqhci_host *cq_host = mmc->cqe_private;
37049 +       struct sdhci_host *host = mmc_priv(mmc);
37050 +       u32 reg;
37052 +       reg = cqhci_readl(cq_host, CQHCI_CFG);
37053 +       reg &= ~CQHCI_ENABLE;
37054 +       cqhci_writel(cq_host, reg, CQHCI_CFG);
37055 +       sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
37058  static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
37059         .write_l    = tegra_cqhci_writel,
37060         .enable = sdhci_tegra_cqe_enable,
37061         .disable = sdhci_cqe_disable,
37062         .dumpregs = sdhci_tegra_dumpregs,
37063         .update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
37064 +       .pre_enable = sdhci_tegra_cqe_pre_enable,
37065 +       .post_disable = sdhci_tegra_cqe_post_disable,
37066  };
37068  static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
37069 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
37070 index 2d73407ee52e..a9e20818ff3a 100644
37071 --- a/drivers/mmc/host/sdhci.c
37072 +++ b/drivers/mmc/host/sdhci.c
37073 @@ -2996,6 +2996,37 @@ static bool sdhci_request_done(struct sdhci_host *host)
37074                 return true;
37075         }
37077 +       /*
37078 +        * The controller needs a reset of internal state machines
37079 +        * upon error conditions.
37080 +        */
37081 +       if (sdhci_needs_reset(host, mrq)) {
37082 +               /*
37083 +                * Do not finish until command and data lines are available for
37084 +                * reset. Note there can only be one other mrq, so it cannot
37085 +                * also be in mrqs_done, otherwise host->cmd and host->data_cmd
37086 +                * would both be null.
37087 +                */
37088 +               if (host->cmd || host->data_cmd) {
37089 +                       spin_unlock_irqrestore(&host->lock, flags);
37090 +                       return true;
37091 +               }
37093 +               /* Some controllers need this kick or reset won't work here */
37094 +               if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
37095 +                       /* This is to force an update */
37096 +                       host->ops->set_clock(host, host->clock);
37098 +               /*
37099 +                * Spec says we should do both at the same time, but Ricoh
37100 +                * controllers do not like that.
37101 +                */
37102 +               sdhci_do_reset(host, SDHCI_RESET_CMD);
37103 +               sdhci_do_reset(host, SDHCI_RESET_DATA);
37105 +               host->pending_reset = false;
37106 +       }
37108         /*
37109          * Always unmap the data buffers if they were mapped by
37110          * sdhci_prepare_data() whenever we finish with a request.
37111 @@ -3059,35 +3090,6 @@ static bool sdhci_request_done(struct sdhci_host *host)
37112                 }
37113         }
37115 -       /*
37116 -        * The controller needs a reset of internal state machines
37117 -        * upon error conditions.
37118 -        */
37119 -       if (sdhci_needs_reset(host, mrq)) {
37120 -               /*
37121 -                * Do not finish until command and data lines are available for
37122 -                * reset. Note there can only be one other mrq, so it cannot
37123 -                * also be in mrqs_done, otherwise host->cmd and host->data_cmd
37124 -                * would both be null.
37125 -                */
37126 -               if (host->cmd || host->data_cmd) {
37127 -                       spin_unlock_irqrestore(&host->lock, flags);
37128 -                       return true;
37129 -               }
37131 -               /* Some controllers need this kick or reset won't work here */
37132 -               if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
37133 -                       /* This is to force an update */
37134 -                       host->ops->set_clock(host, host->clock);
37136 -               /* Spec says we should do both at the same time, but Ricoh
37137 -                  controllers do not like that. */
37138 -               sdhci_do_reset(host, SDHCI_RESET_CMD);
37139 -               sdhci_do_reset(host, SDHCI_RESET_DATA);
37141 -               host->pending_reset = false;
37142 -       }
37144         host->mrqs_done[i] = NULL;
37146         spin_unlock_irqrestore(&host->lock, flags);
37147 diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
37148 index 2413b6750cec..ccbf9885a52b 100644
37149 --- a/drivers/mmc/host/uniphier-sd.c
37150 +++ b/drivers/mmc/host/uniphier-sd.c
37151 @@ -635,7 +635,7 @@ static int uniphier_sd_probe(struct platform_device *pdev)
37153         ret = tmio_mmc_host_probe(host);
37154         if (ret)
37155 -               goto free_host;
37156 +               goto disable_clk;
37158         ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
37159                                dev_name(dev), host);
37160 @@ -646,6 +646,8 @@ static int uniphier_sd_probe(struct platform_device *pdev)
37162  remove_host:
37163         tmio_mmc_host_remove(host);
37164 +disable_clk:
37165 +       uniphier_sd_clk_disable(host);
37166  free_host:
37167         tmio_mmc_host_free(host);
37169 @@ -658,6 +660,7 @@ static int uniphier_sd_remove(struct platform_device *pdev)
37171         tmio_mmc_host_remove(host);
37172         uniphier_sd_clk_disable(host);
37173 +       tmio_mmc_host_free(host);
37175         return 0;
37177 diff --git a/drivers/mtd/maps/physmap-bt1-rom.c b/drivers/mtd/maps/physmap-bt1-rom.c
37178 index a35450002284..58782cfaf71c 100644
37179 --- a/drivers/mtd/maps/physmap-bt1-rom.c
37180 +++ b/drivers/mtd/maps/physmap-bt1-rom.c
37181 @@ -79,7 +79,7 @@ static void __xipram bt1_rom_map_copy_from(struct map_info *map,
37182         if (shift) {
37183                 chunk = min_t(ssize_t, 4 - shift, len);
37184                 data = readl_relaxed(src - shift);
37185 -               memcpy(to, &data + shift, chunk);
37186 +               memcpy(to, (char *)&data + shift, chunk);
37187                 src += chunk;
37188                 to += chunk;
37189                 len -= chunk;
37190 diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
37191 index 001ed5deb622..4f63b8430c71 100644
37192 --- a/drivers/mtd/maps/physmap-core.c
37193 +++ b/drivers/mtd/maps/physmap-core.c
37194 @@ -69,8 +69,10 @@ static int physmap_flash_remove(struct platform_device *dev)
37195         int i, err = 0;
37197         info = platform_get_drvdata(dev);
37198 -       if (!info)
37199 +       if (!info) {
37200 +               err = -EINVAL;
37201                 goto out;
37202 +       }
37204         if (info->cmtd) {
37205                 err = mtd_device_unregister(info->cmtd);
37206 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
37207 index 323035d4f2d0..688de663cabf 100644
37208 --- a/drivers/mtd/mtdchar.c
37209 +++ b/drivers/mtd/mtdchar.c
37210 @@ -651,16 +651,12 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
37211         case MEMGETINFO:
37212         case MEMREADOOB:
37213         case MEMREADOOB64:
37214 -       case MEMLOCK:
37215 -       case MEMUNLOCK:
37216         case MEMISLOCKED:
37217         case MEMGETOOBSEL:
37218         case MEMGETBADBLOCK:
37219 -       case MEMSETBADBLOCK:
37220         case OTPSELECT:
37221         case OTPGETREGIONCOUNT:
37222         case OTPGETREGIONINFO:
37223 -       case OTPLOCK:
37224         case ECCGETLAYOUT:
37225         case ECCGETSTATS:
37226         case MTDFILEMODE:
37227 @@ -671,9 +667,13 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
37228         /* "dangerous" commands */
37229         case MEMERASE:
37230         case MEMERASE64:
37231 +       case MEMLOCK:
37232 +       case MEMUNLOCK:
37233 +       case MEMSETBADBLOCK:
37234         case MEMWRITEOOB:
37235         case MEMWRITEOOB64:
37236         case MEMWRITE:
37237 +       case OTPLOCK:
37238                 if (!(file->f_mode & FMODE_WRITE))
37239                         return -EPERM;
37240                 break;
37241 diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
37242 index 2d6423d89a17..d97ddc65b5d4 100644
37243 --- a/drivers/mtd/mtdcore.c
37244 +++ b/drivers/mtd/mtdcore.c
37245 @@ -820,6 +820,9 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
37247         /* Prefer parsed partitions over driver-provided fallback */
37248         ret = parse_mtd_partitions(mtd, types, parser_data);
37249 +       if (ret == -EPROBE_DEFER)
37250 +               goto out;
37252         if (ret > 0)
37253                 ret = 0;
37254         else if (nr_parts)
37255 diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
37256 index 12ca4f19cb14..665fd9020b76 100644
37257 --- a/drivers/mtd/mtdpart.c
37258 +++ b/drivers/mtd/mtdpart.c
37259 @@ -331,7 +331,7 @@ static int __del_mtd_partitions(struct mtd_info *mtd)
37261         list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
37262                 if (mtd_has_partitions(child))
37263 -                       del_mtd_partitions(child);
37264 +                       __del_mtd_partitions(child);
37266                 pr_info("Deleting %s MTD partition\n", child->name);
37267                 ret = del_mtd_device(child);
37268 diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
37269 index e6ceec8f50dc..8aab1017b460 100644
37270 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c
37271 +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
37272 @@ -883,10 +883,12 @@ static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
37273                                                           NULL, 0,
37274                                                           chip->ecc.strength);
37276 -               if (ret >= 0)
37277 +               if (ret >= 0) {
37278 +                       mtd->ecc_stats.corrected += ret;
37279                         max_bitflips = max(ret, max_bitflips);
37280 -               else
37281 +               } else {
37282                         mtd->ecc_stats.failed++;
37283 +               }
37285                 databuf += chip->ecc.size;
37286                 eccbuf += chip->ecc.bytes;
37287 diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
37288 index 659eaa6f0980..5ff4291380c5 100644
37289 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
37290 +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
37291 @@ -2688,6 +2688,12 @@ static int brcmnand_attach_chip(struct nand_chip *chip)
37293         ret = brcmstb_choose_ecc_layout(host);
37295 +       /* If OOB is written with ECC enabled it will cause ECC errors */
37296 +       if (is_hamming_ecc(host->ctrl, &host->hwcfg)) {
37297 +               chip->ecc.write_oob = brcmnand_write_oob_raw;
37298 +               chip->ecc.read_oob = brcmnand_read_oob_raw;
37299 +       }
37301         return ret;
37304 diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
37305 index 0101c0fab50a..a24e2f57fa68 100644
37306 --- a/drivers/mtd/nand/raw/fsmc_nand.c
37307 +++ b/drivers/mtd/nand/raw/fsmc_nand.c
37308 @@ -1077,11 +1077,13 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
37309                 host->read_dma_chan = dma_request_channel(mask, filter, NULL);
37310                 if (!host->read_dma_chan) {
37311                         dev_err(&pdev->dev, "Unable to get read dma channel\n");
37312 +                       ret = -ENODEV;
37313                         goto disable_clk;
37314                 }
37315                 host->write_dma_chan = dma_request_channel(mask, filter, NULL);
37316                 if (!host->write_dma_chan) {
37317                         dev_err(&pdev->dev, "Unable to get write dma channel\n");
37318 +                       ret = -ENODEV;
37319                         goto release_dma_read_chan;
37320                 }
37321         }
37322 diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
37323 index 3fa8c22d3f36..4d08e4ab5c1b 100644
37324 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
37325 +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
37326 @@ -2449,7 +2449,7 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
37327         this->bch_geometry.auxiliary_size = 128;
37328         ret = gpmi_alloc_dma_buffer(this);
37329         if (ret)
37330 -               goto err_out;
37331 +               return ret;
37333         nand_controller_init(&this->base);
37334         this->base.ops = &gpmi_nand_controller_ops;
37335 diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
37336 index fd4c318b520f..87c23bb320bf 100644
37337 --- a/drivers/mtd/nand/raw/qcom_nandc.c
37338 +++ b/drivers/mtd/nand/raw/qcom_nandc.c
37339 @@ -2898,7 +2898,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
37340         struct device *dev = nandc->dev;
37341         struct device_node *dn = dev->of_node, *child;
37342         struct qcom_nand_host *host;
37343 -       int ret;
37344 +       int ret = -ENODEV;
37346         for_each_available_child_of_node(dn, child) {
37347                 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
37348 @@ -2916,10 +2916,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
37349                 list_add_tail(&host->node, &nandc->host_list);
37350         }
37352 -       if (list_empty(&nandc->host_list))
37353 -               return -ENODEV;
37355 -       return 0;
37356 +       return ret;
37359  /* parse custom DT properties here */
37360 diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
37361 index 61d932c1b718..17f63f95f4a2 100644
37362 --- a/drivers/mtd/nand/spi/core.c
37363 +++ b/drivers/mtd/nand/spi/core.c
37364 @@ -1263,12 +1263,14 @@ static const struct spi_device_id spinand_ids[] = {
37365         { .name = "spi-nand" },
37366         { /* sentinel */ },
37367  };
37368 +MODULE_DEVICE_TABLE(spi, spinand_ids);
37370  #ifdef CONFIG_OF
37371  static const struct of_device_id spinand_of_ids[] = {
37372         { .compatible = "spi-nand" },
37373         { /* sentinel */ },
37374  };
37375 +MODULE_DEVICE_TABLE(of, spinand_of_ids);
37376  #endif
37378  static struct spi_mem_driver spinand_drv = {
37379 diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
37380 index 808cb33d71f8..d9083308f6ba 100644
37381 --- a/drivers/mtd/parsers/qcomsmempart.c
37382 +++ b/drivers/mtd/parsers/qcomsmempart.c
37383 @@ -65,6 +65,13 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
37384         int ret, i, numparts;
37385         char *name, *c;
37387 +       if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
37388 +                       && mtd->type == MTD_NORFLASH) {
37389 +               pr_err("%s: SMEM partition parser is incompatible with 4K sectors\n",
37390 +                               mtd->name);
37391 +               return -EINVAL;
37392 +       }
37394         pr_debug("Parsing partition table info from SMEM\n");
37395         ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
37396         if (IS_ERR(ptable)) {
37397 @@ -104,7 +111,7 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
37398          * complete partition table
37399          */
37400         ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
37401 -       if (IS_ERR_OR_NULL(ptable)) {
37402 +       if (IS_ERR(ptable)) {
37403                 pr_err("Error reading partition table\n");
37404                 return PTR_ERR(ptable);
37405         }
37406 diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
37407 index 0522304f52fa..72bc1342c3ff 100644
37408 --- a/drivers/mtd/spi-nor/core.c
37409 +++ b/drivers/mtd/spi-nor/core.c
37410 @@ -3301,6 +3301,37 @@ static void spi_nor_resume(struct mtd_info *mtd)
37411                 dev_err(dev, "resume() failed\n");
37414 +static int spi_nor_get_device(struct mtd_info *mtd)
37416 +       struct mtd_info *master = mtd_get_master(mtd);
37417 +       struct spi_nor *nor = mtd_to_spi_nor(master);
37418 +       struct device *dev;
37420 +       if (nor->spimem)
37421 +               dev = nor->spimem->spi->controller->dev.parent;
37422 +       else
37423 +               dev = nor->dev;
37425 +       if (!try_module_get(dev->driver->owner))
37426 +               return -ENODEV;
37428 +       return 0;
37431 +static void spi_nor_put_device(struct mtd_info *mtd)
37433 +       struct mtd_info *master = mtd_get_master(mtd);
37434 +       struct spi_nor *nor = mtd_to_spi_nor(master);
37435 +       struct device *dev;
37437 +       if (nor->spimem)
37438 +               dev = nor->spimem->spi->controller->dev.parent;
37439 +       else
37440 +               dev = nor->dev;
37442 +       module_put(dev->driver->owner);
37445  void spi_nor_restore(struct spi_nor *nor)
37447         /* restore the addressing mode */
37448 @@ -3495,6 +3526,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
37449         mtd->_read = spi_nor_read;
37450         mtd->_suspend = spi_nor_suspend;
37451         mtd->_resume = spi_nor_resume;
37452 +       mtd->_get_device = spi_nor_get_device;
37453 +       mtd->_put_device = spi_nor_put_device;
37455         if (nor->params->locking_ops) {
37456                 mtd->_lock = spi_nor_lock;
37457 diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
37458 index 9203abaac229..662b212787d4 100644
37459 --- a/drivers/mtd/spi-nor/macronix.c
37460 +++ b/drivers/mtd/spi-nor/macronix.c
37461 @@ -73,9 +73,6 @@ static const struct flash_info macronix_parts[] = {
37462                               SECT_4K | SPI_NOR_DUAL_READ |
37463                               SPI_NOR_QUAD_READ) },
37464         { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
37465 -       { "mx25l51245g", INFO(0xc2201a, 0, 64 * 1024, 1024,
37466 -                             SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
37467 -                             SPI_NOR_4B_OPCODES) },
37468         { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024,
37469                               SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
37470                               SPI_NOR_4B_OPCODES) },
37471 diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
37472 index 3d63b15bbaa1..164071e9d457 100644
37473 --- a/drivers/net/caif/caif_hsi.c
37474 +++ b/drivers/net/caif/caif_hsi.c
37475 @@ -924,7 +924,7 @@ static void cfhsi_wake_down(struct work_struct *work)
37476                         break;
37478                 set_current_state(TASK_INTERRUPTIBLE);
37479 -               schedule_timeout(1);
37480 +               schedule_min_hrtimeout();
37481                 retry--;
37482         }
37484 diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
37485 index 6a64fe410987..c3508109263e 100644
37486 --- a/drivers/net/can/dev/skb.c
37487 +++ b/drivers/net/can/dev/skb.c
37488 @@ -151,7 +151,11 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx)
37490         struct can_priv *priv = netdev_priv(dev);
37492 -       BUG_ON(idx >= priv->echo_skb_max);
37493 +       if (idx >= priv->echo_skb_max) {
37494 +               netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
37495 +                          __func__, idx, priv->echo_skb_max);
37496 +               return;
37497 +       }
37499         if (priv->echo_skb[idx]) {
37500                 dev_kfree_skb_any(priv->echo_skb[idx]);
37501 diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
37502 index 0c8d36bc668c..f71127229caf 100644
37503 --- a/drivers/net/can/m_can/m_can.c
37504 +++ b/drivers/net/can/m_can/m_can.c
37505 @@ -1455,6 +1455,8 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
37506         int i;
37507         int putidx;
37509 +       cdev->tx_skb = NULL;
37511         /* Generate ID field for TX buffer Element */
37512         /* Common to all supported M_CAN versions */
37513         if (cf->can_id & CAN_EFF_FLAG) {
37514 @@ -1571,7 +1573,6 @@ static void m_can_tx_work_queue(struct work_struct *ws)
37515                                                    tx_work);
37517         m_can_tx_handler(cdev);
37518 -       cdev->tx_skb = NULL;
37521  static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
37522 diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
37523 index a57da43680d8..bd7d0251be10 100644
37524 --- a/drivers/net/can/spi/mcp251x.c
37525 +++ b/drivers/net/can/spi/mcp251x.c
37526 @@ -956,8 +956,6 @@ static int mcp251x_stop(struct net_device *net)
37528         priv->force_quit = 1;
37529         free_irq(spi->irq, priv);
37530 -       destroy_workqueue(priv->wq);
37531 -       priv->wq = NULL;
37533         mutex_lock(&priv->mcp_lock);
37535 @@ -1224,24 +1222,15 @@ static int mcp251x_open(struct net_device *net)
37536                 goto out_close;
37537         }
37539 -       priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
37540 -                                  0);
37541 -       if (!priv->wq) {
37542 -               ret = -ENOMEM;
37543 -               goto out_clean;
37544 -       }
37545 -       INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
37546 -       INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
37548         ret = mcp251x_hw_wake(spi);
37549         if (ret)
37550 -               goto out_free_wq;
37551 +               goto out_free_irq;
37552         ret = mcp251x_setup(net, spi);
37553         if (ret)
37554 -               goto out_free_wq;
37555 +               goto out_free_irq;
37556         ret = mcp251x_set_normal_mode(spi);
37557         if (ret)
37558 -               goto out_free_wq;
37559 +               goto out_free_irq;
37561         can_led_event(net, CAN_LED_EVENT_OPEN);
37563 @@ -1250,9 +1239,7 @@ static int mcp251x_open(struct net_device *net)
37565         return 0;
37567 -out_free_wq:
37568 -       destroy_workqueue(priv->wq);
37569 -out_clean:
37570 +out_free_irq:
37571         free_irq(spi->irq, priv);
37572         mcp251x_hw_sleep(spi);
37573  out_close:
37574 @@ -1373,6 +1360,15 @@ static int mcp251x_can_probe(struct spi_device *spi)
37575         if (ret)
37576                 goto out_clk;
37578 +       priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
37579 +                                  0);
37580 +       if (!priv->wq) {
37581 +               ret = -ENOMEM;
37582 +               goto out_clk;
37583 +       }
37584 +       INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
37585 +       INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
37587         priv->spi = spi;
37588         mutex_init(&priv->mcp_lock);
37590 @@ -1417,6 +1413,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
37591         return 0;
37593  error_probe:
37594 +       destroy_workqueue(priv->wq);
37595 +       priv->wq = NULL;
37596         mcp251x_power_enable(priv->power, 0);
37598  out_clk:
37599 @@ -1438,6 +1436,9 @@ static int mcp251x_can_remove(struct spi_device *spi)
37601         mcp251x_power_enable(priv->power, 0);
37603 +       destroy_workqueue(priv->wq);
37604 +       priv->wq = NULL;
37606         clk_disable_unprepare(priv->clk);
37608         free_candev(net);
37609 diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
37610 index 799e9d5d3481..4a742aa5c417 100644
37611 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
37612 +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
37613 @@ -2856,8 +2856,8 @@ static int mcp251xfd_probe(struct spi_device *spi)
37615         clk = devm_clk_get(&spi->dev, NULL);
37616         if (IS_ERR(clk))
37617 -               dev_err_probe(&spi->dev, PTR_ERR(clk),
37618 -                             "Failed to get Oscillator (clock)!\n");
37619 +               return dev_err_probe(&spi->dev, PTR_ERR(clk),
37620 +                                    "Failed to get Oscillator (clock)!\n");
37621         freq = clk_get_rate(clk);
37623         /* Sanity check */
37624 @@ -2957,10 +2957,12 @@ static int mcp251xfd_probe(struct spi_device *spi)
37626         err = mcp251xfd_register(priv);
37627         if (err)
37628 -               goto out_free_candev;
37629 +               goto out_can_rx_offload_del;
37631         return 0;
37633 + out_can_rx_offload_del:
37634 +       can_rx_offload_del(&priv->offload);
37635   out_free_candev:
37636         spi->max_speed_hz = priv->spi_max_speed_hz_orig;
37638 diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
37639 index e393e8457d77..4274f78682d9 100644
37640 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c
37641 +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
37642 @@ -288,7 +288,7 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff)
37643         } else {
37644                 /* the PCAN-USB needs time to init */
37645                 set_current_state(TASK_INTERRUPTIBLE);
37646 -               schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT));
37647 +               schedule_msec_hrtimeout((PCAN_USB_STARTUP_TIMEOUT));
37648         }
37650         return err;
37651 diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
37652 index ba5d546d06aa..9c86cacc4a72 100644
37653 --- a/drivers/net/dsa/bcm_sf2.c
37654 +++ b/drivers/net/dsa/bcm_sf2.c
37655 @@ -32,6 +32,36 @@
37656  #include "b53/b53_priv.h"
37657  #include "b53/b53_regs.h"
37659 +static u16 bcm_sf2_reg_rgmii_cntrl(struct bcm_sf2_priv *priv, int port)
37661 +       switch (priv->type) {
37662 +       case BCM4908_DEVICE_ID:
37663 +               switch (port) {
37664 +               case 7:
37665 +                       return REG_RGMII_11_CNTRL;
37666 +               default:
37667 +                       break;
37668 +               }
37669 +               break;
37670 +       default:
37671 +               switch (port) {
37672 +               case 0:
37673 +                       return REG_RGMII_0_CNTRL;
37674 +               case 1:
37675 +                       return REG_RGMII_1_CNTRL;
37676 +               case 2:
37677 +                       return REG_RGMII_2_CNTRL;
37678 +               default:
37679 +                       break;
37680 +               }
37681 +       }
37683 +       WARN_ONCE(1, "Unsupported port %d\n", port);
37685 +       /* RO fallback reg */
37686 +       return REG_SWITCH_STATUS;
37689  /* Return the number of active ports, not counting the IMP (CPU) port */
37690  static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
37692 @@ -647,6 +677,7 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
37694         struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
37695         u32 id_mode_dis = 0, port_mode;
37696 +       u32 reg_rgmii_ctrl;
37697         u32 reg;
37699         if (port == core_readl(priv, CORE_IMP0_PRT_ID))
37700 @@ -670,10 +701,12 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
37701                 return;
37702         }
37704 +       reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
37706         /* Clear id_mode_dis bit, and the existing port mode, let
37707          * RGMII_MODE_EN bet set by mac_link_{up,down}
37708          */
37709 -       reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
37710 +       reg = reg_readl(priv, reg_rgmii_ctrl);
37711         reg &= ~ID_MODE_DIS;
37712         reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
37714 @@ -681,13 +714,14 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
37715         if (id_mode_dis)
37716                 reg |= ID_MODE_DIS;
37718 -       reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
37719 +       reg_writel(priv, reg, reg_rgmii_ctrl);
37722  static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
37723                                     phy_interface_t interface, bool link)
37725         struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
37726 +       u32 reg_rgmii_ctrl;
37727         u32 reg;
37729         if (!phy_interface_mode_is_rgmii(interface) &&
37730 @@ -695,13 +729,15 @@ static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
37731             interface != PHY_INTERFACE_MODE_REVMII)
37732                 return;
37734 +       reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
37736         /* If the link is down, just disable the interface to conserve power */
37737 -       reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
37738 +       reg = reg_readl(priv, reg_rgmii_ctrl);
37739         if (link)
37740                 reg |= RGMII_MODE_EN;
37741         else
37742                 reg &= ~RGMII_MODE_EN;
37743 -       reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
37744 +       reg_writel(priv, reg, reg_rgmii_ctrl);
37747  static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
37748 @@ -735,11 +771,15 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
37750         struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
37751         struct ethtool_eee *p = &priv->dev->ports[port].eee;
37752 -       u32 reg, offset;
37754         bcm_sf2_sw_mac_link_set(ds, port, interface, true);
37756         if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
37757 +               u32 reg_rgmii_ctrl;
37758 +               u32 reg, offset;
37760 +               reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
37762                 if (priv->type == BCM4908_DEVICE_ID ||
37763                     priv->type == BCM7445_DEVICE_ID)
37764                         offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
37765 @@ -750,7 +790,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
37766                     interface == PHY_INTERFACE_MODE_RGMII_TXID ||
37767                     interface == PHY_INTERFACE_MODE_MII ||
37768                     interface == PHY_INTERFACE_MODE_REVMII) {
37769 -                       reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
37770 +                       reg = reg_readl(priv, reg_rgmii_ctrl);
37771                         reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
37773                         if (tx_pause)
37774 @@ -758,7 +798,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
37775                         if (rx_pause)
37776                                 reg |= RX_PAUSE_EN;
37778 -                       reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
37779 +                       reg_writel(priv, reg, reg_rgmii_ctrl);
37780                 }
37782                 reg = SW_OVERRIDE | LINK_STS;
37783 @@ -1144,9 +1184,7 @@ static const u16 bcm_sf2_4908_reg_offsets[] = {
37784         [REG_PHY_REVISION]      = 0x14,
37785         [REG_SPHY_CNTRL]        = 0x24,
37786         [REG_CROSSBAR]          = 0xc8,
37787 -       [REG_RGMII_0_CNTRL]     = 0xe0,
37788 -       [REG_RGMII_1_CNTRL]     = 0xec,
37789 -       [REG_RGMII_2_CNTRL]     = 0xf8,
37790 +       [REG_RGMII_11_CNTRL]    = 0x014c,
37791         [REG_LED_0_CNTRL]       = 0x40,
37792         [REG_LED_1_CNTRL]       = 0x4c,
37793         [REG_LED_2_CNTRL]       = 0x58,
37794 diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
37795 index 1d2d55c9f8aa..9e141d1a0b07 100644
37796 --- a/drivers/net/dsa/bcm_sf2_regs.h
37797 +++ b/drivers/net/dsa/bcm_sf2_regs.h
37798 @@ -21,6 +21,7 @@ enum bcm_sf2_reg_offs {
37799         REG_RGMII_0_CNTRL,
37800         REG_RGMII_1_CNTRL,
37801         REG_RGMII_2_CNTRL,
37802 +       REG_RGMII_11_CNTRL,
37803         REG_LED_0_CNTRL,
37804         REG_LED_1_CNTRL,
37805         REG_LED_2_CNTRL,
37806 @@ -48,8 +49,6 @@ enum bcm_sf2_reg_offs {
37807  #define  PHY_PHYAD_SHIFT               8
37808  #define  PHY_PHYAD_MASK                        0x1F
37810 -#define REG_RGMII_CNTRL_P(x)           (REG_RGMII_0_CNTRL + (x))
37812  /* Relative to REG_RGMII_CNTRL */
37813  #define  RGMII_MODE_EN                 (1 << 0)
37814  #define  ID_MODE_DIS                   (1 << 1)
37815 diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
37816 index 21953d6d484c..ada7a38d4d31 100644
37817 --- a/drivers/net/dsa/mv88e6xxx/devlink.c
37818 +++ b/drivers/net/dsa/mv88e6xxx/devlink.c
37819 @@ -678,7 +678,7 @@ static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
37820                                 sizeof(struct mv88e6xxx_devlink_atu_entry);
37821                         break;
37822                 case MV88E6XXX_REGION_VTU:
37823 -                       size = mv88e6xxx_max_vid(chip) *
37824 +                       size = (mv88e6xxx_max_vid(chip) + 1) *
37825                                 sizeof(struct mv88e6xxx_devlink_vtu_entry);
37826                         break;
37827                 }
37828 diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
37829 index 3195936dc5be..2ce04fef698d 100644
37830 --- a/drivers/net/dsa/mv88e6xxx/serdes.c
37831 +++ b/drivers/net/dsa/mv88e6xxx/serdes.c
37832 @@ -443,15 +443,15 @@ int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
37833  u8 mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
37835         /* There are no configurable serdes lanes on this switch chip but we
37836 -        * need to return non-zero so that callers of
37837 +        * need to return a non-negative lane number so that callers of
37838          * mv88e6xxx_serdes_get_lane() know this is a serdes port.
37839          */
37840         switch (chip->ports[port].cmode) {
37841         case MV88E6185_PORT_STS_CMODE_SERDES:
37842         case MV88E6185_PORT_STS_CMODE_1000BASE_X:
37843 -               return 0xff;
37844 -       default:
37845                 return 0;
37846 +       default:
37847 +               return -ENODEV;
37848         }
37851 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
37852 index b53a0d87371a..cf4249d59383 100644
37853 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
37854 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
37855 @@ -122,7 +122,10 @@ enum board_idx {
37856         NETXTREME_E_VF,
37857         NETXTREME_C_VF,
37858         NETXTREME_S_VF,
37859 +       NETXTREME_C_VF_HV,
37860 +       NETXTREME_E_VF_HV,
37861         NETXTREME_E_P5_VF,
37862 +       NETXTREME_E_P5_VF_HV,
37863  };
37865  /* indexed by enum above */
37866 @@ -170,7 +173,10 @@ static const struct {
37867         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
37868         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
37869         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
37870 +       [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
37871 +       [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
37872         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
37873 +       [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
37874  };
37876  static const struct pci_device_id bnxt_pci_tbl[] = {
37877 @@ -222,15 +228,25 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
37878         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
37879  #ifdef CONFIG_BNXT_SRIOV
37880         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
37881 +       { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
37882 +       { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
37883         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
37884 +       { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
37885         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
37886 +       { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
37887 +       { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
37888 +       { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
37889 +       { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
37890         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
37891         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
37892         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
37893         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
37894         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
37895 +       { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
37896         { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
37897         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
37898 +       { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
37899 +       { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
37900         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
37901  #endif
37902         { 0 }
37903 @@ -265,7 +281,8 @@ static struct workqueue_struct *bnxt_pf_wq;
37904  static bool bnxt_vf_pciid(enum board_idx idx)
37906         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
37907 -               idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
37908 +               idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
37909 +               idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
37912  #define DB_CP_REARM_FLAGS      (DB_KEY_CP | DB_IDX_VALID)
37913 @@ -1732,14 +1749,16 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
37915         cons = rxcmp->rx_cmp_opaque;
37916         if (unlikely(cons != rxr->rx_next_cons)) {
37917 -               int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
37918 +               int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
37920                 /* 0xffff is forced error, don't print it */
37921                 if (rxr->rx_next_cons != 0xffff)
37922                         netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
37923                                     cons, rxr->rx_next_cons);
37924                 bnxt_sched_reset(bp, rxr);
37925 -               return rc1;
37926 +               if (rc1)
37927 +                       return rc1;
37928 +               goto next_rx_no_prod_no_len;
37929         }
37930         rx_buf = &rxr->rx_buf_ring[cons];
37931         data = rx_buf->data;
37932 @@ -9736,7 +9755,9 @@ static ssize_t bnxt_show_temp(struct device *dev,
37933         if (!rc)
37934                 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
37935         mutex_unlock(&bp->hwrm_cmd_lock);
37936 -       return rc ?: len;
37937 +       if (rc)
37938 +               return rc;
37939 +       return len;
37941  static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
37943 diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
37944 index e6d4ad99cc38..3f1c189646f4 100644
37945 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
37946 +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
37947 @@ -521,7 +521,7 @@
37948  #define    CN23XX_BAR1_INDEX_OFFSET                3
37950  #define    CN23XX_PEM_BAR1_INDEX_REG(port, idx)                \
37951 -               (CN23XX_PEM_BAR1_INDEX_START + ((port) << CN23XX_PEM_OFFSET) + \
37952 +               (CN23XX_PEM_BAR1_INDEX_START + (((u64)port) << CN23XX_PEM_OFFSET) + \
37953                  ((idx) << CN23XX_BAR1_INDEX_OFFSET))
37955  /*############################ DPI #########################*/
37956 diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
37957 index f782e6af45e9..50bbe79fb93d 100644
37958 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
37959 +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
37960 @@ -776,7 +776,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
37961         mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
37962         mbx.rq.qs_num = qs->vnic_id;
37963         mbx.rq.rq_num = qidx;
37964 -       mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
37965 +       mbx.rq.cfg = ((u64)rq->caching << 26) | (rq->cq_qs << 19) |
37966                           (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
37967                           (rq->cont_qs_rbdr_idx << 8) |
37968                           (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
37969 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
37970 index 83b46440408b..bde8494215c4 100644
37971 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
37972 +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
37973 @@ -174,31 +174,31 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
37974                                       WORD_MASK, f->fs.nat_lip[15] |
37975                                       f->fs.nat_lip[14] << 8 |
37976                                       f->fs.nat_lip[13] << 16 |
37977 -                                     f->fs.nat_lip[12] << 24, 1);
37978 +                                     (u64)f->fs.nat_lip[12] << 24, 1);
37980                         set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1,
37981                                       WORD_MASK, f->fs.nat_lip[11] |
37982                                       f->fs.nat_lip[10] << 8 |
37983                                       f->fs.nat_lip[9] << 16 |
37984 -                                     f->fs.nat_lip[8] << 24, 1);
37985 +                                     (u64)f->fs.nat_lip[8] << 24, 1);
37987                         set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2,
37988                                       WORD_MASK, f->fs.nat_lip[7] |
37989                                       f->fs.nat_lip[6] << 8 |
37990                                       f->fs.nat_lip[5] << 16 |
37991 -                                     f->fs.nat_lip[4] << 24, 1);
37992 +                                     (u64)f->fs.nat_lip[4] << 24, 1);
37994                         set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3,
37995                                       WORD_MASK, f->fs.nat_lip[3] |
37996                                       f->fs.nat_lip[2] << 8 |
37997                                       f->fs.nat_lip[1] << 16 |
37998 -                                     f->fs.nat_lip[0] << 24, 1);
37999 +                                     (u64)f->fs.nat_lip[0] << 24, 1);
38000                 } else {
38001                         set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W,
38002                                       WORD_MASK, f->fs.nat_lip[3] |
38003                                       f->fs.nat_lip[2] << 8 |
38004                                       f->fs.nat_lip[1] << 16 |
38005 -                                     f->fs.nat_lip[0] << 24, 1);
38006 +                                     (u64)f->fs.nat_lip[0] << 25, 1);
38007                 }
38008         }
38010 @@ -208,25 +208,25 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
38011                                       WORD_MASK, f->fs.nat_fip[15] |
38012                                       f->fs.nat_fip[14] << 8 |
38013                                       f->fs.nat_fip[13] << 16 |
38014 -                                     f->fs.nat_fip[12] << 24, 1);
38015 +                                     (u64)f->fs.nat_fip[12] << 24, 1);
38017                         set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1,
38018                                       WORD_MASK, f->fs.nat_fip[11] |
38019                                       f->fs.nat_fip[10] << 8 |
38020                                       f->fs.nat_fip[9] << 16 |
38021 -                                     f->fs.nat_fip[8] << 24, 1);
38022 +                                     (u64)f->fs.nat_fip[8] << 24, 1);
38024                         set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2,
38025                                       WORD_MASK, f->fs.nat_fip[7] |
38026                                       f->fs.nat_fip[6] << 8 |
38027                                       f->fs.nat_fip[5] << 16 |
38028 -                                     f->fs.nat_fip[4] << 24, 1);
38029 +                                     (u64)f->fs.nat_fip[4] << 24, 1);
38031                         set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3,
38032                                       WORD_MASK, f->fs.nat_fip[3] |
38033                                       f->fs.nat_fip[2] << 8 |
38034                                       f->fs.nat_fip[1] << 16 |
38035 -                                     f->fs.nat_fip[0] << 24, 1);
38036 +                                     (u64)f->fs.nat_fip[0] << 24, 1);
38038                 } else {
38039                         set_tcb_field(adap, f, tid,
38040 @@ -234,13 +234,13 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
38041                                       WORD_MASK, f->fs.nat_fip[3] |
38042                                       f->fs.nat_fip[2] << 8 |
38043                                       f->fs.nat_fip[1] << 16 |
38044 -                                     f->fs.nat_fip[0] << 24, 1);
38045 +                                     (u64)f->fs.nat_fip[0] << 24, 1);
38046                 }
38047         }
38049         set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
38050                       (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
38051 -                     (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0),
38052 +                     (sp ? (nat_fp[1] << 16 | (u64)nat_fp[0] << 24) : 0),
38053                       1);
38056 diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
38057 index f04ec53544ae..b1443ff439de 100644
38058 --- a/drivers/net/ethernet/cisco/enic/enic_main.c
38059 +++ b/drivers/net/ethernet/cisco/enic/enic_main.c
38060 @@ -768,7 +768,7 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
38061         return err;
38064 -static inline void enic_queue_wq_skb(struct enic *enic,
38065 +static inline int enic_queue_wq_skb(struct enic *enic,
38066         struct vnic_wq *wq, struct sk_buff *skb)
38068         unsigned int mss = skb_shinfo(skb)->gso_size;
38069 @@ -814,6 +814,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
38070                 wq->to_use = buf->next;
38071                 dev_kfree_skb(skb);
38072         }
38073 +       return err;
38076  /* netif_tx_lock held, process context with BHs disabled, or BH */
38077 @@ -857,7 +858,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
38078                 return NETDEV_TX_BUSY;
38079         }
38081 -       enic_queue_wq_skb(enic, wq, skb);
38082 +       if (enic_queue_wq_skb(enic, wq, skb))
38083 +               goto error;
38085         if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
38086                 netif_tx_stop_queue(txq);
38087 @@ -865,6 +867,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
38088         if (!netdev_xmit_more() || netif_xmit_stopped(txq))
38089                 vnic_wq_doorbell(wq);
38091 +error:
38092         spin_unlock(&enic->wq_lock[txq_map]);
38094         return NETDEV_TX_OK;
38095 diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
38096 index 67c436400352..de7b31842233 100644
38097 --- a/drivers/net/ethernet/freescale/Makefile
38098 +++ b/drivers/net/ethernet/freescale/Makefile
38099 @@ -24,6 +24,4 @@ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
38101  obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
38103 -obj-$(CONFIG_FSL_ENETC) += enetc/
38104 -obj-$(CONFIG_FSL_ENETC_MDIO) += enetc/
38105 -obj-$(CONFIG_FSL_ENETC_VF) += enetc/
38106 +obj-y += enetc/
38107 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
38108 index 3db882322b2b..70aea9c274fe 100644
38109 --- a/drivers/net/ethernet/freescale/fec_main.c
38110 +++ b/drivers/net/ethernet/freescale/fec_main.c
38111 @@ -2048,6 +2048,8 @@ static int fec_enet_mii_probe(struct net_device *ndev)
38112         fep->link = 0;
38113         fep->full_duplex = 0;
38115 +       phy_dev->mac_managed_pm = 1;
38117         phy_attached_info(phy_dev);
38119         return 0;
38120 @@ -3864,6 +3866,7 @@ static int __maybe_unused fec_resume(struct device *dev)
38121                 netif_device_attach(ndev);
38122                 netif_tx_unlock_bh(ndev);
38123                 napi_enable(&fep->napi);
38124 +               phy_init_hw(ndev->phydev);
38125                 phy_start(ndev->phydev);
38126         }
38127         rtnl_unlock();
38128 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
38129 index bf4302a5cf95..0f70158c2551 100644
38130 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
38131 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
38132 @@ -576,8 +576,8 @@ static int hns3_nic_net_stop(struct net_device *netdev)
38133         if (h->ae_algo->ops->set_timer_task)
38134                 h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
38136 -       netif_tx_stop_all_queues(netdev);
38137         netif_carrier_off(netdev);
38138 +       netif_tx_disable(netdev);
38140         hns3_nic_net_down(netdev);
38142 @@ -823,7 +823,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
38143   * and it is udp packet, which has a dest port as the IANA assigned.
38144   * the hardware is expected to do the checksum offload, but the
38145   * hardware will not do the checksum offload when udp dest port is
38146 - * 4789 or 6081.
38147 + * 4789, 4790 or 6081.
38148   */
38149  static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
38151 @@ -841,7 +841,8 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
38153         if (!(!skb->encapsulation &&
38154               (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
38155 -             l4.udp->dest == htons(GENEVE_UDP_PORT))))
38156 +             l4.udp->dest == htons(GENEVE_UDP_PORT) ||
38157 +             l4.udp->dest == htons(4790))))
38158                 return false;
38160         skb_checksum_help(skb);
38161 @@ -1277,23 +1278,21 @@ static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
38164  static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
38165 -                                  u8 max_non_tso_bd_num)
38166 +                                  u8 max_non_tso_bd_num, unsigned int bd_num,
38167 +                                  unsigned int recursion_level)
38169 +#define HNS3_MAX_RECURSION_LEVEL       24
38171         struct sk_buff *frag_skb;
38172 -       unsigned int bd_num = 0;
38174         /* If the total len is within the max bd limit */
38175 -       if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) &&
38176 +       if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level &&
38177 +                  !skb_has_frag_list(skb) &&
38178                    skb_shinfo(skb)->nr_frags < max_non_tso_bd_num))
38179                 return skb_shinfo(skb)->nr_frags + 1U;
38181 -       /* The below case will always be linearized, return
38182 -        * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized.
38183 -        */
38184 -       if (unlikely(skb->len > HNS3_MAX_TSO_SIZE ||
38185 -                    (!skb_is_gso(skb) && skb->len >
38186 -                     HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))))
38187 -               return HNS3_MAX_TSO_BD_NUM + 1U;
38188 +       if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL))
38189 +               return UINT_MAX;
38191         bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
38193 @@ -1301,7 +1300,8 @@ static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
38194                 return bd_num;
38196         skb_walk_frags(skb, frag_skb) {
38197 -               bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num);
38198 +               bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num,
38199 +                                       bd_num, recursion_level + 1);
38200                 if (bd_num > HNS3_MAX_TSO_BD_NUM)
38201                         return bd_num;
38202         }
38203 @@ -1361,6 +1361,43 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
38204                 size[i] = skb_frag_size(&shinfo->frags[i]);
38207 +static int hns3_skb_linearize(struct hns3_enet_ring *ring,
38208 +                             struct sk_buff *skb,
38209 +                             u8 max_non_tso_bd_num,
38210 +                             unsigned int bd_num)
38212 +       /* 'bd_num == UINT_MAX' means the skb' fraglist has a
38213 +        * recursion level of over HNS3_MAX_RECURSION_LEVEL.
38214 +        */
38215 +       if (bd_num == UINT_MAX) {
38216 +               u64_stats_update_begin(&ring->syncp);
38217 +               ring->stats.over_max_recursion++;
38218 +               u64_stats_update_end(&ring->syncp);
38219 +               return -ENOMEM;
38220 +       }
38222 +       /* The skb->len has exceeded the hw limitation, linearization
38223 +        * will not help.
38224 +        */
38225 +       if (skb->len > HNS3_MAX_TSO_SIZE ||
38226 +           (!skb_is_gso(skb) && skb->len >
38227 +            HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
38228 +               u64_stats_update_begin(&ring->syncp);
38229 +               ring->stats.hw_limitation++;
38230 +               u64_stats_update_end(&ring->syncp);
38231 +               return -ENOMEM;
38232 +       }
38234 +       if (__skb_linearize(skb)) {
38235 +               u64_stats_update_begin(&ring->syncp);
38236 +               ring->stats.sw_err_cnt++;
38237 +               u64_stats_update_end(&ring->syncp);
38238 +               return -ENOMEM;
38239 +       }
38241 +       return 0;
38244  static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
38245                                   struct net_device *netdev,
38246                                   struct sk_buff *skb)
38247 @@ -1370,7 +1407,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
38248         unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
38249         unsigned int bd_num;
38251 -       bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num);
38252 +       bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0);
38253         if (unlikely(bd_num > max_non_tso_bd_num)) {
38254                 if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
38255                     !hns3_skb_need_linearized(skb, bd_size, bd_num,
38256 @@ -1379,16 +1416,11 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
38257                         goto out;
38258                 }
38260 -               if (__skb_linearize(skb))
38261 +               if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
38262 +                                      bd_num))
38263                         return -ENOMEM;
38265                 bd_num = hns3_tx_bd_count(skb->len);
38266 -               if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
38267 -                   (!skb_is_gso(skb) &&
38268 -                    bd_num > max_non_tso_bd_num)) {
38269 -                       trace_hns3_over_max_bd(skb);
38270 -                       return -ENOMEM;
38271 -               }
38273                 u64_stats_update_begin(&ring->syncp);
38274                 ring->stats.tx_copy++;
38275 @@ -1412,6 +1444,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
38276                 return bd_num;
38277         }
38279 +       u64_stats_update_begin(&ring->syncp);
38280 +       ring->stats.tx_busy++;
38281 +       u64_stats_update_end(&ring->syncp);
38283         return -EBUSY;
38286 @@ -1459,6 +1495,7 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
38287                                  struct sk_buff *skb, enum hns_desc_type type)
38289         unsigned int size = skb_headlen(skb);
38290 +       struct sk_buff *frag_skb;
38291         int i, ret, bd_num = 0;
38293         if (size) {
38294 @@ -1483,6 +1520,15 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
38295                 bd_num += ret;
38296         }
38298 +       skb_walk_frags(skb, frag_skb) {
38299 +               ret = hns3_fill_skb_to_desc(ring, frag_skb,
38300 +                                           DESC_TYPE_FRAGLIST_SKB);
38301 +               if (unlikely(ret < 0))
38302 +                       return ret;
38304 +               bd_num += ret;
38305 +       }
38307         return bd_num;
38310 @@ -1513,8 +1559,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
38311         struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
38312         struct netdev_queue *dev_queue;
38313         int pre_ntu, next_to_use_head;
38314 -       struct sk_buff *frag_skb;
38315 -       int bd_num = 0;
38316         bool doorbell;
38317         int ret;
38319 @@ -1530,15 +1574,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
38320         ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
38321         if (unlikely(ret <= 0)) {
38322                 if (ret == -EBUSY) {
38323 -                       u64_stats_update_begin(&ring->syncp);
38324 -                       ring->stats.tx_busy++;
38325 -                       u64_stats_update_end(&ring->syncp);
38326                         hns3_tx_doorbell(ring, 0, true);
38327                         return NETDEV_TX_BUSY;
38328 -               } else if (ret == -ENOMEM) {
38329 -                       u64_stats_update_begin(&ring->syncp);
38330 -                       ring->stats.sw_err_cnt++;
38331 -                       u64_stats_update_end(&ring->syncp);
38332                 }
38334                 hns3_rl_err(netdev, "xmit error: %d!\n", ret);
38335 @@ -1551,21 +1588,14 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
38336         if (unlikely(ret < 0))
38337                 goto fill_err;
38339 +       /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
38340 +        * zero, which is unlikely, and 'ret > 0' means how many tx desc
38341 +        * need to be notified to the hw.
38342 +        */
38343         ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
38344 -       if (unlikely(ret < 0))
38345 +       if (unlikely(ret <= 0))
38346                 goto fill_err;
38348 -       bd_num += ret;
38350 -       skb_walk_frags(skb, frag_skb) {
38351 -               ret = hns3_fill_skb_to_desc(ring, frag_skb,
38352 -                                           DESC_TYPE_FRAGLIST_SKB);
38353 -               if (unlikely(ret < 0))
38354 -                       goto fill_err;
38356 -               bd_num += ret;
38357 -       }
38359         pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
38360                                         (ring->desc_num - 1);
38361         ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
38362 @@ -1576,7 +1606,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
38363         dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
38364         doorbell = __netdev_tx_sent_queue(dev_queue, skb->len,
38365                                           netdev_xmit_more());
38366 -       hns3_tx_doorbell(ring, bd_num, doorbell);
38367 +       hns3_tx_doorbell(ring, ret, doorbell);
38369         return NETDEV_TX_OK;
38371 @@ -1748,11 +1778,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
38372                         tx_drop += ring->stats.tx_l4_proto_err;
38373                         tx_drop += ring->stats.tx_l2l3l4_err;
38374                         tx_drop += ring->stats.tx_tso_err;
38375 +                       tx_drop += ring->stats.over_max_recursion;
38376 +                       tx_drop += ring->stats.hw_limitation;
38377                         tx_errors += ring->stats.sw_err_cnt;
38378                         tx_errors += ring->stats.tx_vlan_err;
38379                         tx_errors += ring->stats.tx_l4_proto_err;
38380                         tx_errors += ring->stats.tx_l2l3l4_err;
38381                         tx_errors += ring->stats.tx_tso_err;
38382 +                       tx_errors += ring->stats.over_max_recursion;
38383 +                       tx_errors += ring->stats.hw_limitation;
38384                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
38386                 /* fetch the rx stats */
38387 @@ -3704,7 +3738,6 @@ static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
38389  static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
38391 -       struct hnae3_ring_chain_node vector_ring_chain;
38392         struct hnae3_handle *h = priv->ae_handle;
38393         struct hns3_enet_tqp_vector *tqp_vector;
38394         int ret;
38395 @@ -3736,6 +3769,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
38396         }
38398         for (i = 0; i < priv->vector_num; i++) {
38399 +               struct hnae3_ring_chain_node vector_ring_chain;
38401                 tqp_vector = &priv->tqp_vector[i];
38403                 tqp_vector->rx_group.total_bytes = 0;
38404 @@ -4554,6 +4589,11 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
38405         struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
38406         int ret = 0;
38408 +       if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
38409 +               netdev_err(kinfo->netdev, "device is not initialized yet\n");
38410 +               return -EFAULT;
38411 +       }
38413         clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
38415         if (netif_running(kinfo->netdev)) {
38416 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
38417 index d069b04ee587..e44224e23315 100644
38418 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
38419 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
38420 @@ -376,6 +376,8 @@ struct ring_stats {
38421                         u64 tx_l4_proto_err;
38422                         u64 tx_l2l3l4_err;
38423                         u64 tx_tso_err;
38424 +                       u64 over_max_recursion;
38425 +                       u64 hw_limitation;
38426                 };
38427                 struct {
38428                         u64 rx_pkts;
38429 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
38430 index adcec4ea7cb9..d20f2e246017 100644
38431 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
38432 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
38433 @@ -44,6 +44,8 @@ static const struct hns3_stats hns3_txq_stats[] = {
38434         HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err),
38435         HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err),
38436         HNS3_TQP_STAT("tso_err", tx_tso_err),
38437 +       HNS3_TQP_STAT("over_max_recursion", over_max_recursion),
38438 +       HNS3_TQP_STAT("hw_limitation", hw_limitation),
38439  };
38441  #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
38442 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
38443 index 0ca7f1b984bf..78d3eb142df8 100644
38444 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
38445 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
38446 @@ -753,8 +753,9 @@ static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en)
38448         /* configure IGU,EGU error interrupts */
38449         hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
38450 +       desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_TYPE);
38451         if (en)
38452 -               desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
38453 +               desc.data[0] |= cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
38455         desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
38457 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
38458 index 608fe26fc3fe..d647f3c84134 100644
38459 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
38460 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
38461 @@ -32,7 +32,8 @@
38462  #define HCLGE_TQP_ECC_ERR_INT_EN_MASK  0x0FFF
38463  #define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK    0x0F000000
38464  #define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN 0x0F000000
38465 -#define HCLGE_IGU_ERR_INT_EN   0x0000066F
38466 +#define HCLGE_IGU_ERR_INT_EN   0x0000000F
38467 +#define HCLGE_IGU_ERR_INT_TYPE 0x00000660
38468  #define HCLGE_IGU_ERR_INT_EN_MASK      0x000F
38469  #define HCLGE_IGU_TNL_ERR_INT_EN    0x0002AABF
38470  #define HCLGE_IGU_TNL_ERR_INT_EN_MASK  0x003F
38471 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
38472 index b0dbe6dcaa7b..7a560d0e19b9 100644
38473 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
38474 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
38475 @@ -11379,7 +11379,6 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
38476  #define REG_LEN_PER_LINE       (REG_NUM_PER_LINE * sizeof(u32))
38477  #define REG_SEPARATOR_LINE     1
38478  #define REG_NUM_REMAIN_MASK    3
38479 -#define BD_LIST_MAX_NUM                30
38481  int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
38483 @@ -11473,15 +11472,19 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
38485         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
38486         int data_len_per_desc, bd_num, i;
38487 -       int bd_num_list[BD_LIST_MAX_NUM];
38488 +       int *bd_num_list;
38489         u32 data_len;
38490         int ret;
38492 +       bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
38493 +       if (!bd_num_list)
38494 +               return -ENOMEM;
38496         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
38497         if (ret) {
38498                 dev_err(&hdev->pdev->dev,
38499                         "Get dfx reg bd num fail, status is %d.\n", ret);
38500 -               return ret;
38501 +               goto out;
38502         }
38504         data_len_per_desc = sizeof_field(struct hclge_desc, data);
38505 @@ -11492,6 +11495,8 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
38506                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
38507         }
38509 +out:
38510 +       kfree(bd_num_list);
38511         return ret;
38514 @@ -11499,16 +11504,20 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
38516         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
38517         int bd_num, bd_num_max, buf_len, i;
38518 -       int bd_num_list[BD_LIST_MAX_NUM];
38519         struct hclge_desc *desc_src;
38520 +       int *bd_num_list;
38521         u32 *reg = data;
38522         int ret;
38524 +       bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
38525 +       if (!bd_num_list)
38526 +               return -ENOMEM;
38528         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
38529         if (ret) {
38530                 dev_err(&hdev->pdev->dev,
38531                         "Get dfx reg bd num fail, status is %d.\n", ret);
38532 -               return ret;
38533 +               goto out;
38534         }
38536         bd_num_max = bd_num_list[0];
38537 @@ -11517,8 +11526,10 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
38539         buf_len = sizeof(*desc_src) * bd_num_max;
38540         desc_src = kzalloc(buf_len, GFP_KERNEL);
38541 -       if (!desc_src)
38542 -               return -ENOMEM;
38543 +       if (!desc_src) {
38544 +               ret = -ENOMEM;
38545 +               goto out;
38546 +       }
38548         for (i = 0; i < dfx_reg_type_num; i++) {
38549                 bd_num = bd_num_list[i];
38550 @@ -11534,6 +11545,8 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
38551         }
38553         kfree(desc_src);
38554 +out:
38555 +       kfree(bd_num_list);
38556         return ret;
38559 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
38560 index 51a36e74f088..c3bb16b1f060 100644
38561 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
38562 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
38563 @@ -535,7 +535,7 @@ static void hclge_get_link_mode(struct hclge_vport *vport,
38564         unsigned long advertising;
38565         unsigned long supported;
38566         unsigned long send_data;
38567 -       u8 msg_data[10];
38568 +       u8 msg_data[10] = {};
38569         u8 dest_vfid;
38571         advertising = hdev->hw.mac.advertising[0];
38572 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
38573 index e89820702540..c194bba187d6 100644
38574 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
38575 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
38576 @@ -255,6 +255,8 @@ void hclge_mac_start_phy(struct hclge_dev *hdev)
38577         if (!phydev)
38578                 return;
38580 +       phy_loopback(phydev, false);
38582         phy_start(phydev);
38585 diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
38586 index 15f93b355099..5069f690cf0b 100644
38587 --- a/drivers/net/ethernet/intel/i40e/i40e.h
38588 +++ b/drivers/net/ethernet/intel/i40e/i40e.h
38589 @@ -1142,7 +1142,6 @@ static inline bool i40e_is_sw_dcb(struct i40e_pf *pf)
38590         return !!(pf->flags & I40E_FLAG_DISABLE_FW_LLDP);
38593 -void i40e_set_lldp_forwarding(struct i40e_pf *pf, bool enable);
38594  #ifdef CONFIG_I40E_DCB
38595  void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
38596                            struct i40e_dcbx_config *old_cfg,
38597 diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
38598 index ce626eace692..140b677f114d 100644
38599 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
38600 +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
38601 @@ -1566,8 +1566,10 @@ enum i40e_aq_phy_type {
38602         I40E_PHY_TYPE_25GBASE_LR                = 0x22,
38603         I40E_PHY_TYPE_25GBASE_AOC               = 0x23,
38604         I40E_PHY_TYPE_25GBASE_ACC               = 0x24,
38605 -       I40E_PHY_TYPE_2_5GBASE_T                = 0x30,
38606 -       I40E_PHY_TYPE_5GBASE_T                  = 0x31,
38607 +       I40E_PHY_TYPE_2_5GBASE_T                = 0x26,
38608 +       I40E_PHY_TYPE_5GBASE_T                  = 0x27,
38609 +       I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS    = 0x30,
38610 +       I40E_PHY_TYPE_5GBASE_T_LINK_STATUS      = 0x31,
38611         I40E_PHY_TYPE_MAX,
38612         I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP   = 0xFD,
38613         I40E_PHY_TYPE_EMPTY                     = 0xFE,
38614 diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
38615 index a2dba32383f6..32f3facbed1a 100644
38616 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c
38617 +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
38618 @@ -375,6 +375,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
38619                                 clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
38620                                           &cdev->state);
38621                                 i40e_client_del_instance(pf);
38622 +                               return;
38623                         }
38624                 }
38625         }
38626 diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
38627 index ec19e18305ec..ce35e064cf60 100644
38628 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c
38629 +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
38630 @@ -1154,8 +1154,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
38631                 break;
38632         case I40E_PHY_TYPE_100BASE_TX:
38633         case I40E_PHY_TYPE_1000BASE_T:
38634 -       case I40E_PHY_TYPE_2_5GBASE_T:
38635 -       case I40E_PHY_TYPE_5GBASE_T:
38636 +       case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
38637 +       case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
38638         case I40E_PHY_TYPE_10GBASE_T:
38639                 media = I40E_MEDIA_TYPE_BASET;
38640                 break;
38641 diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
38642 index 0e92668012e3..93dd58fda272 100644
38643 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
38644 +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
38645 @@ -841,8 +841,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
38646                                                              10000baseT_Full);
38647                 break;
38648         case I40E_PHY_TYPE_10GBASE_T:
38649 -       case I40E_PHY_TYPE_5GBASE_T:
38650 -       case I40E_PHY_TYPE_2_5GBASE_T:
38651 +       case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
38652 +       case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
38653         case I40E_PHY_TYPE_1000BASE_T:
38654         case I40E_PHY_TYPE_100BASE_TX:
38655                 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
38656 @@ -1409,7 +1409,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
38658                 memset(&config, 0, sizeof(config));
38659                 config.phy_type = abilities.phy_type;
38660 -               config.abilities = abilities.abilities;
38661 +               config.abilities = abilities.abilities |
38662 +                                  I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
38663                 config.phy_type_ext = abilities.phy_type_ext;
38664                 config.link_speed = abilities.link_speed;
38665                 config.eee_capability = abilities.eee_capability;
38666 @@ -5287,7 +5288,6 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
38667                         i40e_aq_cfg_lldp_mib_change_event(&pf->hw, false, NULL);
38668                         i40e_aq_stop_lldp(&pf->hw, true, false, NULL);
38669                 } else {
38670 -                       i40e_set_lldp_forwarding(pf, false);
38671                         status = i40e_aq_start_lldp(&pf->hw, false, NULL);
38672                         if (status) {
38673                                 adq_err = pf->hw.aq.asq_last_status;
38674 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
38675 index 527023ee4c07..ac4b44fc19f1 100644
38676 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
38677 +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
38678 @@ -6878,40 +6878,6 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
38680  #endif /* CONFIG_I40E_DCB */
38682 -/**
38683 - * i40e_set_lldp_forwarding - set forwarding of lldp frames
38684 - * @pf: PF being configured
38685 - * @enable: if forwarding to OS shall be enabled
38686 - *
38687 - * Toggle forwarding of lldp frames behavior,
38688 - * When passing DCB control from firmware to software
38689 - * lldp frames must be forwarded to the software based
38690 - * lldp agent.
38691 - */
38692 -void i40e_set_lldp_forwarding(struct i40e_pf *pf, bool enable)
38694 -       if (pf->lan_vsi == I40E_NO_VSI)
38695 -               return;
38697 -       if (!pf->vsi[pf->lan_vsi])
38698 -               return;
38700 -       /* No need to check the outcome, commands may fail
38701 -        * if desired value is already set
38702 -        */
38703 -       i40e_aq_add_rem_control_packet_filter(&pf->hw, NULL, ETH_P_LLDP,
38704 -                                             I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX |
38705 -                                             I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC,
38706 -                                             pf->vsi[pf->lan_vsi]->seid, 0,
38707 -                                             enable, NULL, NULL);
38709 -       i40e_aq_add_rem_control_packet_filter(&pf->hw, NULL, ETH_P_LLDP,
38710 -                                             I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX |
38711 -                                             I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC,
38712 -                                             pf->vsi[pf->lan_vsi]->seid, 0,
38713 -                                             enable, NULL, NULL);
38716  /**
38717   * i40e_print_link_message - print link up or down
38718   * @vsi: the VSI for which link needs a message
38719 @@ -10735,10 +10701,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
38720          */
38721         i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
38722                                                        pf->main_vsi_seid);
38723 -#ifdef CONFIG_I40E_DCB
38724 -       if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)
38725 -               i40e_set_lldp_forwarding(pf, true);
38726 -#endif /* CONFIG_I40E_DCB */
38728         /* restart the VSIs that were rebuilt and running before the reset */
38729         i40e_pf_unquiesce_all_vsi(pf);
38730 @@ -15753,10 +15715,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
38731          */
38732         i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
38733                                                        pf->main_vsi_seid);
38734 -#ifdef CONFIG_I40E_DCB
38735 -       if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)
38736 -               i40e_set_lldp_forwarding(pf, true);
38737 -#endif /* CONFIG_I40E_DCB */
38739         if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
38740                 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
38741 diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
38742 index 06b4271219b1..70b515049540 100644
38743 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
38744 +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
38745 @@ -1961,10 +1961,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
38746                                  union i40e_rx_desc *rx_desc)
38749 -       /* XDP packets use error pointer so abort at this point */
38750 -       if (IS_ERR(skb))
38751 -               return true;
38753         /* ERR_MASK will only have valid bits if EOP set, and
38754          * what we are doing here is actually checking
38755          * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
38756 @@ -2534,7 +2530,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
38757                 }
38759                 /* exit if we failed to retrieve a buffer */
38760 -               if (!skb) {
38761 +               if (!xdp_res && !skb) {
38762                         rx_ring->rx_stats.alloc_buff_failed++;
38763                         rx_buffer->pagecnt_bias++;
38764                         break;
38765 @@ -2547,7 +2543,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
38766                 if (i40e_is_non_eop(rx_ring, rx_desc))
38767                         continue;
38769 -               if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
38770 +               if (xdp_res || i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
38771                         skb = NULL;
38772                         continue;
38773                 }
38774 diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
38775 index 5c10faaca790..c81109a63e90 100644
38776 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h
38777 +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
38778 @@ -239,11 +239,8 @@ struct i40e_phy_info {
38779  #define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
38780                                              I40E_PHY_TYPE_OFFSET)
38781  /* Offset for 2.5G/5G PHY Types value to bit number conversion */
38782 -#define I40E_PHY_TYPE_OFFSET2 (-10)
38783 -#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \
38784 -                                            I40E_PHY_TYPE_OFFSET2)
38785 -#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \
38786 -                                            I40E_PHY_TYPE_OFFSET2)
38787 +#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T)
38788 +#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T)
38789  #define I40E_HW_CAP_MAX_GPIO                   30
38790  /* Capabilities of a PF or a VF or the whole device */
38791  struct i40e_hw_capabilities {
38792 diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
38793 index dc5b3c06d1e0..ebd08543791b 100644
38794 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c
38795 +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
38796 @@ -3899,8 +3899,6 @@ static void iavf_remove(struct pci_dev *pdev)
38798         iounmap(hw->hw_addr);
38799         pci_release_regions(pdev);
38800 -       iavf_free_all_tx_resources(adapter);
38801 -       iavf_free_all_rx_resources(adapter);
38802         iavf_free_queues(adapter);
38803         kfree(adapter->vf_res);
38804         spin_lock_bh(&adapter->mac_vlan_list_lock);
38805 diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
38806 index d13c7fc8fb0a..195d122c9cb2 100644
38807 --- a/drivers/net/ethernet/intel/ice/ice_lib.c
38808 +++ b/drivers/net/ethernet/intel/ice/ice_lib.c
38809 @@ -2818,38 +2818,46 @@ int ice_vsi_release(struct ice_vsi *vsi)
38812  /**
38813 - * ice_vsi_rebuild_update_coalesce - set coalesce for a q_vector
38814 + * ice_vsi_rebuild_update_coalesce_intrl - set interrupt rate limit for a q_vector
38815   * @q_vector: pointer to q_vector which is being updated
38816 - * @coalesce: pointer to array of struct with stored coalesce
38817 + * @stored_intrl_setting: original INTRL setting
38818   *
38819   * Set coalesce param in q_vector and update these parameters in HW.
38820   */
38821  static void
38822 -ice_vsi_rebuild_update_coalesce(struct ice_q_vector *q_vector,
38823 -                               struct ice_coalesce_stored *coalesce)
38824 +ice_vsi_rebuild_update_coalesce_intrl(struct ice_q_vector *q_vector,
38825 +                                     u16 stored_intrl_setting)
38827 -       struct ice_ring_container *rx_rc = &q_vector->rx;
38828 -       struct ice_ring_container *tx_rc = &q_vector->tx;
38829         struct ice_hw *hw = &q_vector->vsi->back->hw;
38831 -       tx_rc->itr_setting = coalesce->itr_tx;
38832 -       rx_rc->itr_setting = coalesce->itr_rx;
38834 -       /* dynamic ITR values will be updated during Tx/Rx */
38835 -       if (!ITR_IS_DYNAMIC(tx_rc->itr_setting))
38836 -               wr32(hw, GLINT_ITR(tx_rc->itr_idx, q_vector->reg_idx),
38837 -                    ITR_REG_ALIGN(tx_rc->itr_setting) >>
38838 -                    ICE_ITR_GRAN_S);
38839 -       if (!ITR_IS_DYNAMIC(rx_rc->itr_setting))
38840 -               wr32(hw, GLINT_ITR(rx_rc->itr_idx, q_vector->reg_idx),
38841 -                    ITR_REG_ALIGN(rx_rc->itr_setting) >>
38842 -                    ICE_ITR_GRAN_S);
38844 -       q_vector->intrl = coalesce->intrl;
38845 +       q_vector->intrl = stored_intrl_setting;
38846         wr32(hw, GLINT_RATE(q_vector->reg_idx),
38847              ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
38850 +/**
38851 + * ice_vsi_rebuild_update_coalesce_itr - set coalesce for a q_vector
38852 + * @q_vector: pointer to q_vector which is being updated
38853 + * @rc: pointer to ring container
38854 + * @stored_itr_setting: original ITR setting
38855 + *
38856 + * Set coalesce param in q_vector and update these parameters in HW.
38857 + */
38858 +static void
38859 +ice_vsi_rebuild_update_coalesce_itr(struct ice_q_vector *q_vector,
38860 +                                   struct ice_ring_container *rc,
38861 +                                   u16 stored_itr_setting)
38863 +       struct ice_hw *hw = &q_vector->vsi->back->hw;
38865 +       rc->itr_setting = stored_itr_setting;
38867 +       /* dynamic ITR values will be updated during Tx/Rx */
38868 +       if (!ITR_IS_DYNAMIC(rc->itr_setting))
38869 +               wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
38870 +                    ITR_REG_ALIGN(rc->itr_setting) >> ICE_ITR_GRAN_S);
38873  /**
38874   * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
38875   * @vsi: VSI connected with q_vectors
38876 @@ -2869,6 +2877,11 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
38877                 coalesce[i].itr_tx = q_vector->tx.itr_setting;
38878                 coalesce[i].itr_rx = q_vector->rx.itr_setting;
38879                 coalesce[i].intrl = q_vector->intrl;
38881 +               if (i < vsi->num_txq)
38882 +                       coalesce[i].tx_valid = true;
38883 +               if (i < vsi->num_rxq)
38884 +                       coalesce[i].rx_valid = true;
38885         }
38887         return vsi->num_q_vectors;
38888 @@ -2893,17 +2906,59 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
38889         if ((size && !coalesce) || !vsi)
38890                 return;
38892 -       for (i = 0; i < size && i < vsi->num_q_vectors; i++)
38893 -               ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
38894 -                                               &coalesce[i]);
38896 -       /* number of q_vectors increased, so assume coalesce settings were
38897 -        * changed globally (i.e. ethtool -C eth0 instead of per-queue) and use
38898 -        * the previous settings from q_vector 0 for all of the new q_vectors
38899 +       /* There are a couple of cases that have to be handled here:
38900 +        *   1. The case where the number of queue vectors stays the same, but
38901 +        *      the number of Tx or Rx rings changes (the first for loop)
38902 +        *   2. The case where the number of queue vectors increased (the
38903 +        *      second for loop)
38904          */
38905 -       for (; i < vsi->num_q_vectors; i++)
38906 -               ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
38907 -                                               &coalesce[0]);
38908 +       for (i = 0; i < size && i < vsi->num_q_vectors; i++) {
38909 +               /* There are 2 cases to handle here and they are the same for
38910 +                * both Tx and Rx:
38911 +                *   if the entry was valid previously (coalesce[i].[tr]x_valid
38912 +                *   and the loop variable is less than the number of rings
38913 +                *   allocated, then write the previous values
38914 +                *
38915 +                *   if the entry was not valid previously, but the number of
38916 +                *   rings is less than are allocated (this means the number of
38917 +                *   rings increased from previously), then write out the
38918 +                *   values in the first element
38919 +                */
38920 +               if (i < vsi->alloc_rxq && coalesce[i].rx_valid)
38921 +                       ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
38922 +                                                           &vsi->q_vectors[i]->rx,
38923 +                                                           coalesce[i].itr_rx);
38924 +               else if (i < vsi->alloc_rxq)
38925 +                       ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
38926 +                                                           &vsi->q_vectors[i]->rx,
38927 +                                                           coalesce[0].itr_rx);
38929 +               if (i < vsi->alloc_txq && coalesce[i].tx_valid)
38930 +                       ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
38931 +                                                           &vsi->q_vectors[i]->tx,
38932 +                                                           coalesce[i].itr_tx);
38933 +               else if (i < vsi->alloc_txq)
38934 +                       ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
38935 +                                                           &vsi->q_vectors[i]->tx,
38936 +                                                           coalesce[0].itr_tx);
38938 +               ice_vsi_rebuild_update_coalesce_intrl(vsi->q_vectors[i],
38939 +                                                     coalesce[i].intrl);
38940 +       }
38942 +       /* the number of queue vectors increased so write whatever is in
38943 +        * the first element
38944 +        */
38945 +       for (; i < vsi->num_q_vectors; i++) {
38946 +               ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
38947 +                                                   &vsi->q_vectors[i]->tx,
38948 +                                                   coalesce[0].itr_tx);
38949 +               ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
38950 +                                                   &vsi->q_vectors[i]->rx,
38951 +                                                   coalesce[0].itr_rx);
38952 +               ice_vsi_rebuild_update_coalesce_intrl(vsi->q_vectors[i],
38953 +                                                     coalesce[0].intrl);
38954 +       }
38957  /**
38958 @@ -2932,9 +2987,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
38960         coalesce = kcalloc(vsi->num_q_vectors,
38961                            sizeof(struct ice_coalesce_stored), GFP_KERNEL);
38962 -       if (coalesce)
38963 -               prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi,
38964 -                                                                 coalesce);
38965 +       if (!coalesce)
38966 +               return -ENOMEM;
38968 +       prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
38970         ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
38971         ice_vsi_free_q_vectors(vsi);
38973 diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
38974 index 5dab77504fa5..672a7ff0ee36 100644
38975 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h
38976 +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
38977 @@ -351,6 +351,8 @@ struct ice_coalesce_stored {
38978         u16 itr_tx;
38979         u16 itr_rx;
38980         u8 intrl;
38981 +       u8 tx_valid;
38982 +       u8 rx_valid;
38983  };
38985  /* iterator for handling rings in ring container */
38986 diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
38987 index 25dd903a3e92..d849b0f65de2 100644
38988 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
38989 +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
38990 @@ -431,7 +431,8 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
38991                         netif_carrier_on(port->dev);
38992                         if (!delayed_work_pending(caching_dw))
38993                                 queue_delayed_work(prestera_wq, caching_dw, 0);
38994 -               } else {
38995 +               } else if (netif_running(port->dev) &&
38996 +                          netif_carrier_ok(port->dev)) {
38997                         netif_carrier_off(port->dev);
38998                         if (delayed_work_pending(caching_dw))
38999                                 cancel_delayed_work(caching_dw);
39000 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
39001 index 01d3ee4b5829..bcd5e7ae8482 100644
39002 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
39003 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
39004 @@ -1319,7 +1319,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
39005                 skb->protocol = eth_type_trans(skb, netdev);
39007                 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
39008 -                   RX_DMA_VID(trxd.rxd3))
39009 +                   (trxd.rxd2 & RX_DMA_VTAG))
39010                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
39011                                                RX_DMA_VID(trxd.rxd3));
39012                 skb_record_rx_queue(skb, 0);
39013 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
39014 index fd3cec8f06ba..c47272100615 100644
39015 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
39016 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
39017 @@ -296,6 +296,7 @@
39018  #define RX_DMA_LSO             BIT(30)
39019  #define RX_DMA_PLEN0(_x)       (((_x) & 0x3fff) << 16)
39020  #define RX_DMA_GET_PLEN0(_x)   (((_x) >> 16) & 0x3fff)
39021 +#define RX_DMA_VTAG            BIT(15)
39023  /* QDMA descriptor rxd3 */
39024  #define RX_DMA_VID(_x)         ((_x) & 0xfff)
39025 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
39026 index bdbffe484fce..d2efe2455955 100644
39027 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
39028 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
39029 @@ -576,7 +576,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
39031         pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
39032         wqe = MLX5E_TX_FETCH_WQE(sq, pi);
39033 -       prefetchw(wqe->data);
39034 +       net_prefetchw(wqe->data);
39036         *session = (struct mlx5e_tx_mpwqe) {
39037                 .wqe = wqe,
39038 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
39039 index 22bee4990232..bb61f52d782d 100644
39040 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
39041 +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
39042 @@ -850,7 +850,7 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
39043                 return;
39044         }
39046 -       if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action &
39047 +       if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action ==
39048             MLX5_ACCEL_ESP_ACTION_DECRYPT)
39049                 ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
39051 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
39052 index 9143ec326ebf..f146c618a78e 100644
39053 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
39054 +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
39055 @@ -1532,6 +1532,7 @@ static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *val
39057         DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
39058         DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
39059 +       misc_mask->source_eswitch_owner_vhca_id = 0;
39062  static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
39063 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
39064 index 7846a21555ef..1f6bc0c7e91d 100644
39065 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
39066 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
39067 @@ -535,6 +535,16 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
39068         u16 erif_index = 0;
39069         int err;
39071 +       /* Add the eRIF */
39072 +       if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
39073 +               erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
39074 +               err = mr->mr_ops->route_erif_add(mlxsw_sp,
39075 +                                                rve->mr_route->route_priv,
39076 +                                                erif_index);
39077 +               if (err)
39078 +                       return err;
39079 +       }
39081         /* Update the route action, as the new eVIF can be a tunnel or a pimreg
39082          * device which will require updating the action.
39083          */
39084 @@ -544,17 +554,7 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
39085                                                       rve->mr_route->route_priv,
39086                                                       route_action);
39087                 if (err)
39088 -                       return err;
39089 -       }
39091 -       /* Add the eRIF */
39092 -       if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
39093 -               erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
39094 -               err = mr->mr_ops->route_erif_add(mlxsw_sp,
39095 -                                                rve->mr_route->route_priv,
39096 -                                                erif_index);
39097 -               if (err)
39098 -                       goto err_route_erif_add;
39099 +                       goto err_route_action_update;
39100         }
39102         /* Update the minimum MTU */
39103 @@ -572,14 +572,14 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
39104         return 0;
39106  err_route_min_mtu_update:
39107 -       if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
39108 -               mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
39109 -                                          erif_index);
39110 -err_route_erif_add:
39111         if (route_action != rve->mr_route->route_action)
39112                 mr->mr_ops->route_action_update(mlxsw_sp,
39113                                                 rve->mr_route->route_priv,
39114                                                 rve->mr_route->route_action);
39115 +err_route_action_update:
39116 +       if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
39117 +               mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
39118 +                                          erif_index);
39119         return err;
39122 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
39123 index 713ee3041d49..bea978df7713 100644
39124 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
39125 +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
39126 @@ -364,6 +364,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
39128         attrs.split = eth_port.is_split;
39129         attrs.splittable = !attrs.split;
39130 +       attrs.lanes = eth_port.port_lanes;
39131         attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
39132         attrs.phys.port_number = eth_port.label_port;
39133         attrs.phys.split_subport_number = eth_port.label_subport;
39134 diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
39135 index 117188e3c7de..87b8c032195d 100644
39136 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
39137 +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
39138 @@ -1437,6 +1437,7 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
39140         struct emac_tpd tpd;
39141         u32 prod_idx;
39142 +       int len;
39144         memset(&tpd, 0, sizeof(tpd));
39146 @@ -1456,9 +1457,10 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
39147         if (skb_network_offset(skb) != ETH_HLEN)
39148                 TPD_TYP_SET(&tpd, 1);
39150 +       len = skb->len;
39151         emac_tx_fill_tpd(adpt, tx_q, skb, &tpd);
39153 -       netdev_sent_queue(adpt->netdev, skb->len);
39154 +       netdev_sent_queue(adpt->netdev, len);
39156         /* Make sure the are enough free descriptors to hold one
39157          * maximum-sized SKB.  We need one desc for each fragment,
39158 diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
39159 index eb0c03bdb12d..cad57d58d764 100644
39160 --- a/drivers/net/ethernet/renesas/ravb_main.c
39161 +++ b/drivers/net/ethernet/renesas/ravb_main.c
39162 @@ -911,31 +911,20 @@ static int ravb_poll(struct napi_struct *napi, int budget)
39163         int q = napi - priv->napi;
39164         int mask = BIT(q);
39165         int quota = budget;
39166 -       u32 ris0, tis;
39168 -       for (;;) {
39169 -               tis = ravb_read(ndev, TIS);
39170 -               ris0 = ravb_read(ndev, RIS0);
39171 -               if (!((ris0 & mask) || (tis & mask)))
39172 -                       break;
39173 +       /* Processing RX Descriptor Ring */
39174 +       /* Clear RX interrupt */
39175 +       ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
39176 +       if (ravb_rx(ndev, &quota, q))
39177 +               goto out;
39179 -               /* Processing RX Descriptor Ring */
39180 -               if (ris0 & mask) {
39181 -                       /* Clear RX interrupt */
39182 -                       ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
39183 -                       if (ravb_rx(ndev, &quota, q))
39184 -                               goto out;
39185 -               }
39186 -               /* Processing TX Descriptor Ring */
39187 -               if (tis & mask) {
39188 -                       spin_lock_irqsave(&priv->lock, flags);
39189 -                       /* Clear TX interrupt */
39190 -                       ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
39191 -                       ravb_tx_free(ndev, q, true);
39192 -                       netif_wake_subqueue(ndev, q);
39193 -                       spin_unlock_irqrestore(&priv->lock, flags);
39194 -               }
39195 -       }
39196 +       /* Processing RX Descriptor Ring */
39197 +       spin_lock_irqsave(&priv->lock, flags);
39198 +       /* Clear TX interrupt */
39199 +       ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
39200 +       ravb_tx_free(ndev, q, true);
39201 +       netif_wake_subqueue(ndev, q);
39202 +       spin_unlock_irqrestore(&priv->lock, flags);
39204         napi_complete(napi);
39206 diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
39207 index da6886dcac37..4fa72b573c17 100644
39208 --- a/drivers/net/ethernet/sfc/ef10.c
39209 +++ b/drivers/net/ethernet/sfc/ef10.c
39210 @@ -2928,8 +2928,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
39212         /* Get the transmit queue */
39213         tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
39214 -       tx_queue = efx_channel_get_tx_queue(channel,
39215 -                                           tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
39216 +       tx_queue = channel->tx_queue + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
39218         if (!tx_queue->timestamping) {
39219                 /* Transmit completion */
39220 diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
39221 index 1bfeee283ea9..a3ca406a3561 100644
39222 --- a/drivers/net/ethernet/sfc/efx_channels.c
39223 +++ b/drivers/net/ethernet/sfc/efx_channels.c
39224 @@ -914,6 +914,8 @@ int efx_set_channels(struct efx_nic *efx)
39225                         }
39226                 }
39227         }
39228 +       if (xdp_queue_number)
39229 +               efx->xdp_tx_queue_count = xdp_queue_number;
39231         rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
39232         if (rc)
39233 diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
39234 index d75cf5ff5686..49df02ecee91 100644
39235 --- a/drivers/net/ethernet/sfc/farch.c
39236 +++ b/drivers/net/ethernet/sfc/farch.c
39237 @@ -835,14 +835,14 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
39238                 /* Transmit completion */
39239                 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
39240                 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
39241 -               tx_queue = efx_channel_get_tx_queue(
39242 -                       channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
39243 +               tx_queue = channel->tx_queue +
39244 +                               (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
39245                 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
39246         } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
39247                 /* Rewrite the FIFO write pointer */
39248                 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
39249 -               tx_queue = efx_channel_get_tx_queue(
39250 -                       channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
39251 +               tx_queue = channel->tx_queue +
39252 +                               (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
39254                 netif_tx_lock(efx->net_dev);
39255                 efx_farch_notify_tx_desc(tx_queue);
39256 @@ -1081,16 +1081,16 @@ static void
39257  efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
39259         struct efx_tx_queue *tx_queue;
39260 +       struct efx_channel *channel;
39261         int qid;
39263         qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
39264         if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
39265 -               tx_queue = efx_get_tx_queue(efx, qid / EFX_MAX_TXQ_PER_CHANNEL,
39266 -                                           qid % EFX_MAX_TXQ_PER_CHANNEL);
39267 -               if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
39268 +               channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL);
39269 +               tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL);
39270 +               if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0))
39271                         efx_farch_magic_event(tx_queue->channel,
39272                                               EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
39273 -               }
39274         }
39277 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
39278 index bf3250e0e59c..749585fe6fc9 100644
39279 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
39280 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
39281 @@ -352,6 +352,8 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
39282         plat_dat->bsp_priv = gmac;
39283         plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
39284         plat_dat->multicast_filter_bins = 0;
39285 +       plat_dat->tx_fifo_size = 8192;
39286 +       plat_dat->rx_fifo_size = 8192;
39288         err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
39289         if (err)
39290 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
39291 index 29f765a246a0..aaf37598cbd3 100644
39292 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
39293 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
39294 @@ -638,6 +638,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
39295         value &= ~GMAC_PACKET_FILTER_PCF;
39296         value &= ~GMAC_PACKET_FILTER_PM;
39297         value &= ~GMAC_PACKET_FILTER_PR;
39298 +       value &= ~GMAC_PACKET_FILTER_RA;
39299         if (dev->flags & IFF_PROMISC) {
39300                 /* VLAN Tag Filter Fail Packets Queuing */
39301                 if (hw->vlan_fail_q_en) {
39302 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
39303 index 4749bd0af160..c6f24abf6432 100644
39304 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
39305 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
39306 @@ -2757,8 +2757,15 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
39308         /* Enable TSO */
39309         if (priv->tso) {
39310 -               for (chan = 0; chan < tx_cnt; chan++)
39311 +               for (chan = 0; chan < tx_cnt; chan++) {
39312 +                       struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
39314 +                       /* TSO and TBS cannot co-exist */
39315 +                       if (tx_q->tbs & STMMAC_TBS_AVAIL)
39316 +                               continue;
39318                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
39319 +               }
39320         }
39322         /* Enable Split Header */
39323 @@ -2850,9 +2857,8 @@ static int stmmac_open(struct net_device *dev)
39324                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
39325                 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
39327 +               /* Setup per-TXQ tbs flag before TX descriptor alloc */
39328                 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
39329 -               if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
39330 -                       tx_q->tbs &= ~STMMAC_TBS_AVAIL;
39331         }
39333         ret = alloc_dma_desc_resources(priv);
39334 diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
39335 index c7031e1960d4..03055c96f076 100644
39336 --- a/drivers/net/ethernet/ti/davinci_emac.c
39337 +++ b/drivers/net/ethernet/ti/davinci_emac.c
39338 @@ -169,11 +169,11 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
39339  /* EMAC mac_status register */
39340  #define EMAC_MACSTATUS_TXERRCODE_MASK  (0xF00000)
39341  #define EMAC_MACSTATUS_TXERRCODE_SHIFT (20)
39342 -#define EMAC_MACSTATUS_TXERRCH_MASK    (0x7)
39343 +#define EMAC_MACSTATUS_TXERRCH_MASK    (0x70000)
39344  #define EMAC_MACSTATUS_TXERRCH_SHIFT   (16)
39345  #define EMAC_MACSTATUS_RXERRCODE_MASK  (0xF000)
39346  #define EMAC_MACSTATUS_RXERRCODE_SHIFT (12)
39347 -#define EMAC_MACSTATUS_RXERRCH_MASK    (0x7)
39348 +#define EMAC_MACSTATUS_RXERRCH_MASK    (0x700)
39349  #define EMAC_MACSTATUS_RXERRCH_SHIFT   (8)
39351  /* EMAC RX register masks */
39352 diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
39353 index c6eb7f2368aa..911b5ef9e680 100644
39354 --- a/drivers/net/ethernet/xilinx/Kconfig
39355 +++ b/drivers/net/ethernet/xilinx/Kconfig
39356 @@ -18,12 +18,14 @@ if NET_VENDOR_XILINX
39358  config XILINX_EMACLITE
39359         tristate "Xilinx 10/100 Ethernet Lite support"
39360 +       depends on HAS_IOMEM
39361         select PHYLIB
39362         help
39363           This driver supports the 10/100 Ethernet Lite from Xilinx.
39365  config XILINX_AXI_EMAC
39366         tristate "Xilinx 10/100/1000 AXI Ethernet support"
39367 +       depends on HAS_IOMEM
39368         select PHYLINK
39369         help
39370           This driver supports the 10/100/1000 Ethernet from Xilinx for the
39371 @@ -31,6 +33,7 @@ config XILINX_AXI_EMAC
39373  config XILINX_LL_TEMAC
39374         tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
39375 +       depends on HAS_IOMEM
39376         select PHYLIB
39377         help
39378           This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
39379 diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
39380 index 0152f1e70783..9defaa21a1a9 100644
39381 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
39382 +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
39383 @@ -1085,7 +1085,7 @@ static int init_queues(struct port *port)
39384         int i;
39386         if (!ports_open) {
39387 -               dma_pool = dma_pool_create(DRV_NAME, port->netdev->dev.parent,
39388 +               dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
39389                                            POOL_ALLOC_SIZE, 32, 0);
39390                 if (!dma_pool)
39391                         return -ENOMEM;
39392 @@ -1435,6 +1435,9 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
39393         ndev->netdev_ops = &ixp4xx_netdev_ops;
39394         ndev->ethtool_ops = &ixp4xx_ethtool_ops;
39395         ndev->tx_queue_len = 100;
39396 +       /* Inherit the DMA masks from the platform device */
39397 +       ndev->dev.dma_mask = dev->dma_mask;
39398 +       ndev->dev.coherent_dma_mask = dev->coherent_dma_mask;
39400         netif_napi_add(ndev, &port->napi, eth_poll, NAPI_WEIGHT);
39402 diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig
39403 index f722079dfb6a..f99c1048c97e 100644
39404 --- a/drivers/net/fddi/Kconfig
39405 +++ b/drivers/net/fddi/Kconfig
39406 @@ -40,17 +40,20 @@ config DEFXX
39408  config DEFXX_MMIO
39409         bool
39410 -       prompt "Use MMIO instead of PIO" if PCI || EISA
39411 +       prompt "Use MMIO instead of IOP" if PCI || EISA
39412         depends on DEFXX
39413 -       default n if PCI || EISA
39414 +       default n if EISA
39415         default y
39416         help
39417           This instructs the driver to use EISA or PCI memory-mapped I/O
39418 -         (MMIO) as appropriate instead of programmed I/O ports (PIO).
39419 +         (MMIO) as appropriate instead of programmed I/O ports (IOP).
39420           Enabling this gives an improvement in processing time in parts
39421 -         of the driver, but it may cause problems with EISA (DEFEA)
39422 -         adapters.  TURBOchannel does not have the concept of I/O ports,
39423 -         so MMIO is always used for these (DEFTA) adapters.
39424 +         of the driver, but it requires a memory window to be configured
39425 +         for EISA (DEFEA) adapters that may not always be available.
39426 +         Conversely some PCIe host bridges do not support IOP, so MMIO
39427 +         may be required to access PCI (DEFPA) adapters on downstream PCI
39428 +         buses with some systems.  TURBOchannel does not have the concept
39429 +         of I/O ports, so MMIO is always used for these (DEFTA) adapters.
39431           If unsure, say N.
39433 diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
39434 index 077c68498f04..c7ce6d5491af 100644
39435 --- a/drivers/net/fddi/defxx.c
39436 +++ b/drivers/net/fddi/defxx.c
39437 @@ -495,6 +495,25 @@ static const struct net_device_ops dfx_netdev_ops = {
39438         .ndo_set_mac_address    = dfx_ctl_set_mac_address,
39439  };
39441 +static void dfx_register_res_alloc_err(const char *print_name, bool mmio,
39442 +                                      bool eisa)
39444 +       pr_err("%s: Cannot use %s, no address set, aborting\n",
39445 +              print_name, mmio ? "MMIO" : "I/O");
39446 +       pr_err("%s: Recompile driver with \"CONFIG_DEFXX_MMIO=%c\"\n",
39447 +              print_name, mmio ? 'n' : 'y');
39448 +       if (eisa && mmio)
39449 +               pr_err("%s: Or run ECU and set adapter's MMIO location\n",
39450 +                      print_name);
39453 +static void dfx_register_res_err(const char *print_name, bool mmio,
39454 +                                unsigned long start, unsigned long len)
39456 +       pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, aborting\n",
39457 +              print_name, mmio ? "MMIO" : "I/O", len, start);
39460  /*
39461   * ================
39462   * = dfx_register =
39463 @@ -568,15 +587,12 @@ static int dfx_register(struct device *bdev)
39464         dev_set_drvdata(bdev, dev);
39466         dfx_get_bars(bdev, bar_start, bar_len);
39467 -       if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) {
39468 -               pr_err("%s: Cannot use MMIO, no address set, aborting\n",
39469 -                      print_name);
39470 -               pr_err("%s: Run ECU and set adapter's MMIO location\n",
39471 -                      print_name);
39472 -               pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\""
39473 -                      "\n", print_name);
39474 +       if (bar_len[0] == 0 ||
39475 +           (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0)) {
39476 +               dfx_register_res_alloc_err(print_name, dfx_use_mmio,
39477 +                                          dfx_bus_eisa);
39478                 err = -ENXIO;
39479 -               goto err_out;
39480 +               goto err_out_disable;
39481         }
39483         if (dfx_use_mmio)
39484 @@ -585,18 +601,16 @@ static int dfx_register(struct device *bdev)
39485         else
39486                 region = request_region(bar_start[0], bar_len[0], print_name);
39487         if (!region) {
39488 -               pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, "
39489 -                      "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name,
39490 -                      (long)bar_len[0], (long)bar_start[0]);
39491 +               dfx_register_res_err(print_name, dfx_use_mmio,
39492 +                                    bar_start[0], bar_len[0]);
39493                 err = -EBUSY;
39494                 goto err_out_disable;
39495         }
39496         if (bar_start[1] != 0) {
39497                 region = request_region(bar_start[1], bar_len[1], print_name);
39498                 if (!region) {
39499 -                       pr_err("%s: Cannot reserve I/O resource "
39500 -                              "0x%lx @ 0x%lx, aborting\n", print_name,
39501 -                              (long)bar_len[1], (long)bar_start[1]);
39502 +                       dfx_register_res_err(print_name, 0,
39503 +                                            bar_start[1], bar_len[1]);
39504                         err = -EBUSY;
39505                         goto err_out_csr_region;
39506                 }
39507 @@ -604,9 +618,8 @@ static int dfx_register(struct device *bdev)
39508         if (bar_start[2] != 0) {
39509                 region = request_region(bar_start[2], bar_len[2], print_name);
39510                 if (!region) {
39511 -                       pr_err("%s: Cannot reserve I/O resource "
39512 -                              "0x%lx @ 0x%lx, aborting\n", print_name,
39513 -                              (long)bar_len[2], (long)bar_start[2]);
39514 +                       dfx_register_res_err(print_name, 0,
39515 +                                            bar_start[2], bar_len[2]);
39516                         err = -EBUSY;
39517                         goto err_out_bh_region;
39518                 }
39519 diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
39520 index 42f31c681846..61cd3dd4deab 100644
39521 --- a/drivers/net/geneve.c
39522 +++ b/drivers/net/geneve.c
39523 @@ -891,7 +891,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
39524         __be16 sport;
39525         int err;
39527 -       if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
39528 +       if (!pskb_inet_may_pull(skb))
39529                 return -EINVAL;
39531         sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
39532 @@ -988,7 +988,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
39533         __be16 sport;
39534         int err;
39536 -       if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
39537 +       if (!pskb_inet_may_pull(skb))
39538                 return -EINVAL;
39540         sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
39541 diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
39542 index 390d3403386a..144892060718 100644
39543 --- a/drivers/net/ipa/gsi.c
39544 +++ b/drivers/net/ipa/gsi.c
39545 @@ -211,8 +211,8 @@ static void gsi_irq_setup(struct gsi *gsi)
39546         iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
39548         /* The inter-EE registers are in the non-adjusted address range */
39549 -       iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
39550 -       iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
39551 +       iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET);
39552 +       iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET);
39554         iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
39556 diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
39557 index 1622d8cf8dea..48ef04afab79 100644
39558 --- a/drivers/net/ipa/gsi_reg.h
39559 +++ b/drivers/net/ipa/gsi_reg.h
39560 @@ -53,15 +53,15 @@
39561  #define GSI_EE_REG_ADJUST                      0x0000d000      /* IPA v4.5+ */
39563  /* The two inter-EE IRQ register offsets are relative to gsi->virt_raw */
39564 -#define GSI_INTER_EE_SRC_CH_IRQ_OFFSET \
39565 -                       GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
39566 -#define GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(ee) \
39567 -                       (0x0000c018 + 0x1000 * (ee))
39569 -#define GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET \
39570 -                       GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(GSI_EE_AP)
39571 -#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(ee) \
39572 -                       (0x0000c01c + 0x1000 * (ee))
39573 +#define GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET \
39574 +                       GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
39575 +#define GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(ee) \
39576 +                       (0x0000c020 + 0x1000 * (ee))
39578 +#define GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET \
39579 +                       GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
39580 +#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(ee) \
39581 +                       (0x0000c024 + 0x1000 * (ee))
39583  /* All other register offsets are relative to gsi->virt */
39584  #define GSI_CH_C_CNTXT_0_OFFSET(ch) \
39585 diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
39586 index 6eac50d4b42f..d453ec016168 100644
39587 --- a/drivers/net/phy/intel-xway.c
39588 +++ b/drivers/net/phy/intel-xway.c
39589 @@ -11,6 +11,18 @@
39591  #define XWAY_MDIO_IMASK                        0x19    /* interrupt mask */
39592  #define XWAY_MDIO_ISTAT                        0x1A    /* interrupt status */
39593 +#define XWAY_MDIO_LED                  0x1B    /* led control */
39595 +/* bit 15:12 are reserved */
39596 +#define XWAY_MDIO_LED_LED3_EN          BIT(11) /* Enable the integrated function of LED3 */
39597 +#define XWAY_MDIO_LED_LED2_EN          BIT(10) /* Enable the integrated function of LED2 */
39598 +#define XWAY_MDIO_LED_LED1_EN          BIT(9)  /* Enable the integrated function of LED1 */
39599 +#define XWAY_MDIO_LED_LED0_EN          BIT(8)  /* Enable the integrated function of LED0 */
39600 +/* bit 7:4 are reserved */
39601 +#define XWAY_MDIO_LED_LED3_DA          BIT(3)  /* Direct Access to LED3 */
39602 +#define XWAY_MDIO_LED_LED2_DA          BIT(2)  /* Direct Access to LED2 */
39603 +#define XWAY_MDIO_LED_LED1_DA          BIT(1)  /* Direct Access to LED1 */
39604 +#define XWAY_MDIO_LED_LED0_DA          BIT(0)  /* Direct Access to LED0 */
39606  #define XWAY_MDIO_INIT_WOL             BIT(15) /* Wake-On-LAN */
39607  #define XWAY_MDIO_INIT_MSRE            BIT(14)
39608 @@ -159,6 +171,15 @@ static int xway_gphy_config_init(struct phy_device *phydev)
39609         /* Clear all pending interrupts */
39610         phy_read(phydev, XWAY_MDIO_ISTAT);
39612 +       /* Ensure that integrated led function is enabled for all leds */
39613 +       err = phy_write(phydev, XWAY_MDIO_LED,
39614 +                       XWAY_MDIO_LED_LED0_EN |
39615 +                       XWAY_MDIO_LED_LED1_EN |
39616 +                       XWAY_MDIO_LED_LED2_EN |
39617 +                       XWAY_MDIO_LED_LED3_EN);
39618 +       if (err)
39619 +               return err;
39621         phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDCH,
39622                       XWAY_MMD_LEDCH_NACS_NONE |
39623                       XWAY_MMD_LEDCH_SBF_F02HZ |
39624 diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
39625 index 8018ddf7f316..f86c9ddc609e 100644
39626 --- a/drivers/net/phy/marvell.c
39627 +++ b/drivers/net/phy/marvell.c
39628 @@ -967,22 +967,28 @@ static int m88e1111_get_downshift(struct phy_device *phydev, u8 *data)
39630  static int m88e1111_set_downshift(struct phy_device *phydev, u8 cnt)
39632 -       int val;
39633 +       int val, err;
39635         if (cnt > MII_M1111_PHY_EXT_CR_DOWNSHIFT_MAX)
39636                 return -E2BIG;
39638 -       if (!cnt)
39639 -               return phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
39640 -                                     MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
39641 +       if (!cnt) {
39642 +               err = phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
39643 +                                    MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
39644 +       } else {
39645 +               val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
39646 +               val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
39648 -       val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
39649 -       val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
39650 +               err = phy_modify(phydev, MII_M1111_PHY_EXT_CR,
39651 +                                MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
39652 +                                MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
39653 +                                val);
39654 +       }
39656 -       return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
39657 -                         MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
39658 -                         MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
39659 -                         val);
39660 +       if (err < 0)
39661 +               return err;
39663 +       return genphy_soft_reset(phydev);
39666  static int m88e1111_get_tunable(struct phy_device *phydev,
39667 @@ -1025,22 +1031,28 @@ static int m88e1011_get_downshift(struct phy_device *phydev, u8 *data)
39669  static int m88e1011_set_downshift(struct phy_device *phydev, u8 cnt)
39671 -       int val;
39672 +       int val, err;
39674         if (cnt > MII_M1011_PHY_SCR_DOWNSHIFT_MAX)
39675                 return -E2BIG;
39677 -       if (!cnt)
39678 -               return phy_clear_bits(phydev, MII_M1011_PHY_SCR,
39679 -                                     MII_M1011_PHY_SCR_DOWNSHIFT_EN);
39680 +       if (!cnt) {
39681 +               err = phy_clear_bits(phydev, MII_M1011_PHY_SCR,
39682 +                                    MII_M1011_PHY_SCR_DOWNSHIFT_EN);
39683 +       } else {
39684 +               val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
39685 +               val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
39687 -       val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
39688 -       val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
39689 +               err = phy_modify(phydev, MII_M1011_PHY_SCR,
39690 +                                MII_M1011_PHY_SCR_DOWNSHIFT_EN |
39691 +                                MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
39692 +                                val);
39693 +       }
39695 -       return phy_modify(phydev, MII_M1011_PHY_SCR,
39696 -                         MII_M1011_PHY_SCR_DOWNSHIFT_EN |
39697 -                         MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
39698 -                         val);
39699 +       if (err < 0)
39700 +               return err;
39702 +       return genphy_soft_reset(phydev);
39705  static int m88e1011_get_tunable(struct phy_device *phydev,
39706 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
39707 index cc38e326405a..af2e1759b523 100644
39708 --- a/drivers/net/phy/phy_device.c
39709 +++ b/drivers/net/phy/phy_device.c
39710 @@ -273,6 +273,9 @@ static __maybe_unused int mdio_bus_phy_suspend(struct device *dev)
39712         struct phy_device *phydev = to_phy_device(dev);
39714 +       if (phydev->mac_managed_pm)
39715 +               return 0;
39717         /* We must stop the state machine manually, otherwise it stops out of
39718          * control, possibly with the phydev->lock held. Upon resume, netdev
39719          * may call phy routines that try to grab the same lock, and that may
39720 @@ -294,6 +297,9 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
39721         struct phy_device *phydev = to_phy_device(dev);
39722         int ret;
39724 +       if (phydev->mac_managed_pm)
39725 +               return 0;
39727         if (!phydev->suspended_by_mdio_bus)
39728                 goto no_resume;
39730 diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
39731 index ddb78fb4d6dc..d8cac02a79b9 100644
39732 --- a/drivers/net/phy/smsc.c
39733 +++ b/drivers/net/phy/smsc.c
39734 @@ -185,10 +185,13 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
39735         return genphy_config_aneg(phydev);
39738 -static int lan87xx_config_aneg_ext(struct phy_device *phydev)
39739 +static int lan95xx_config_aneg_ext(struct phy_device *phydev)
39741         int rc;
39743 +       if (phydev->phy_id != 0x0007c0f0) /* not (LAN9500A or LAN9505A) */
39744 +               return lan87xx_config_aneg(phydev);
39746         /* Extend Manual AutoMDIX timer */
39747         rc = phy_read(phydev, PHY_EDPD_CONFIG);
39748         if (rc < 0)
39749 @@ -441,7 +444,7 @@ static struct phy_driver smsc_phy_driver[] = {
39750         .read_status    = lan87xx_read_status,
39751         .config_init    = smsc_phy_config_init,
39752         .soft_reset     = smsc_phy_reset,
39753 -       .config_aneg    = lan87xx_config_aneg_ext,
39754 +       .config_aneg    = lan95xx_config_aneg_ext,
39756         /* IRQ related */
39757         .config_intr    = smsc_phy_config_intr,
39758 diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
39759 index d650b39b6e5d..c1316718304d 100644
39760 --- a/drivers/net/usb/ax88179_178a.c
39761 +++ b/drivers/net/usb/ax88179_178a.c
39762 @@ -296,12 +296,12 @@ static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
39763         int ret;
39765         if (2 == size) {
39766 -               u16 buf;
39767 +               u16 buf = 0;
39768                 ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
39769                 le16_to_cpus(&buf);
39770                 *((u16 *)data) = buf;
39771         } else if (4 == size) {
39772 -               u32 buf;
39773 +               u32 buf = 0;
39774                 ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
39775                 le32_to_cpus(&buf);
39776                 *((u32 *)data) = buf;
39777 @@ -1296,6 +1296,8 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
39779         u8 mac[ETH_ALEN];
39781 +       memset(mac, 0, sizeof(mac));
39783         /* Maybe the boot loader passed the MAC address via device tree */
39784         if (!eth_platform_get_mac_address(&dev->udev->dev, mac)) {
39785                 netif_dbg(dev, ifup, dev->net,
39786 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39787 index 9bc58e64b5b7..3ef4b2841402 100644
39788 --- a/drivers/net/usb/hso.c
39789 +++ b/drivers/net/usb/hso.c
39790 @@ -3104,7 +3104,7 @@ static void hso_free_interface(struct usb_interface *interface)
39791                         cancel_work_sync(&serial_table[i]->async_put_intf);
39792                         cancel_work_sync(&serial_table[i]->async_get_intf);
39793                         hso_serial_tty_unregister(serial);
39794 -                       kref_put(&serial_table[i]->ref, hso_serial_ref_free);
39795 +                       kref_put(&serial->parent->ref, hso_serial_ref_free);
39796                 }
39797         }
39799 diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
39800 index e81c5699c952..d2b360cfb402 100644
39801 --- a/drivers/net/usb/lan78xx.c
39802 +++ b/drivers/net/usb/lan78xx.c
39803 @@ -2655,7 +2655,7 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
39804         while (!skb_queue_empty(&dev->rxq) &&
39805                !skb_queue_empty(&dev->txq) &&
39806                !skb_queue_empty(&dev->done)) {
39807 -               schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
39808 +               schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
39809                 set_current_state(TASK_UNINTERRUPTIBLE);
39810                 netif_dbg(dev, ifdown, dev->net,
39811                           "waited for %d urb completions\n", temp);
39812 diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
39813 index f4f37ecfed58..36647378e016 100644
39814 --- a/drivers/net/usb/usbnet.c
39815 +++ b/drivers/net/usb/usbnet.c
39816 @@ -764,7 +764,7 @@ static void wait_skb_queue_empty(struct sk_buff_head *q)
39817         spin_lock_irqsave(&q->lock, flags);
39818         while (!skb_queue_empty(q)) {
39819                 spin_unlock_irqrestore(&q->lock, flags);
39820 -               schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
39821 +               schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
39822                 set_current_state(TASK_UNINTERRUPTIBLE);
39823                 spin_lock_irqsave(&q->lock, flags);
39824         }
39825 diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
39826 index 4d9dc7d15908..0720f5f92caa 100644
39827 --- a/drivers/net/wan/hdlc_fr.c
39828 +++ b/drivers/net/wan/hdlc_fr.c
39829 @@ -415,7 +415,7 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
39831                 if (pad > 0) { /* Pad the frame with zeros */
39832                         if (__skb_pad(skb, pad, false))
39833 -                               goto out;
39834 +                               goto drop;
39835                         skb_put(skb, pad);
39836                 }
39837         }
39838 @@ -448,9 +448,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
39839         return NETDEV_TX_OK;
39841  drop:
39842 -       kfree_skb(skb);
39843 -out:
39844         dev->stats.tx_dropped++;
39845 +       kfree_skb(skb);
39846         return NETDEV_TX_OK;
39849 diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
39850 index c3372498f4f1..8fda0446ff71 100644
39851 --- a/drivers/net/wan/lapbether.c
39852 +++ b/drivers/net/wan/lapbether.c
39853 @@ -51,6 +51,8 @@ struct lapbethdev {
39854         struct list_head        node;
39855         struct net_device       *ethdev;        /* link to ethernet device */
39856         struct net_device       *axdev;         /* lapbeth device (lapb#) */
39857 +       bool                    up;
39858 +       spinlock_t              up_lock;        /* Protects "up" */
39859  };
39861  static LIST_HEAD(lapbeth_devices);
39862 @@ -101,8 +103,9 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
39863         rcu_read_lock();
39864         lapbeth = lapbeth_get_x25_dev(dev);
39865         if (!lapbeth)
39866 -               goto drop_unlock;
39867 -       if (!netif_running(lapbeth->axdev))
39868 +               goto drop_unlock_rcu;
39869 +       spin_lock_bh(&lapbeth->up_lock);
39870 +       if (!lapbeth->up)
39871                 goto drop_unlock;
39873         len = skb->data[0] + skb->data[1] * 256;
39874 @@ -117,11 +120,14 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
39875                 goto drop_unlock;
39876         }
39877  out:
39878 +       spin_unlock_bh(&lapbeth->up_lock);
39879         rcu_read_unlock();
39880         return 0;
39881  drop_unlock:
39882         kfree_skb(skb);
39883         goto out;
39884 +drop_unlock_rcu:
39885 +       rcu_read_unlock();
39886  drop:
39887         kfree_skb(skb);
39888         return 0;
39889 @@ -151,13 +157,11 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
39890  static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
39891                                       struct net_device *dev)
39893 +       struct lapbethdev *lapbeth = netdev_priv(dev);
39894         int err;
39896 -       /*
39897 -        * Just to be *really* sure not to send anything if the interface
39898 -        * is down, the ethernet device may have gone.
39899 -        */
39900 -       if (!netif_running(dev))
39901 +       spin_lock_bh(&lapbeth->up_lock);
39902 +       if (!lapbeth->up)
39903                 goto drop;
39905         /* There should be a pseudo header of 1 byte added by upper layers.
39906 @@ -194,6 +198,7 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
39907                 goto drop;
39908         }
39909  out:
39910 +       spin_unlock_bh(&lapbeth->up_lock);
39911         return NETDEV_TX_OK;
39912  drop:
39913         kfree_skb(skb);
39914 @@ -285,6 +290,7 @@ static const struct lapb_register_struct lapbeth_callbacks = {
39915   */
39916  static int lapbeth_open(struct net_device *dev)
39918 +       struct lapbethdev *lapbeth = netdev_priv(dev);
39919         int err;
39921         if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) {
39922 @@ -292,13 +298,22 @@ static int lapbeth_open(struct net_device *dev)
39923                 return -ENODEV;
39924         }
39926 +       spin_lock_bh(&lapbeth->up_lock);
39927 +       lapbeth->up = true;
39928 +       spin_unlock_bh(&lapbeth->up_lock);
39930         return 0;
39933  static int lapbeth_close(struct net_device *dev)
39935 +       struct lapbethdev *lapbeth = netdev_priv(dev);
39936         int err;
39938 +       spin_lock_bh(&lapbeth->up_lock);
39939 +       lapbeth->up = false;
39940 +       spin_unlock_bh(&lapbeth->up_lock);
39942         if ((err = lapb_unregister(dev)) != LAPB_OK)
39943                 pr_err("lapb_unregister error: %d\n", err);
39945 @@ -356,6 +371,9 @@ static int lapbeth_new_device(struct net_device *dev)
39946         dev_hold(dev);
39947         lapbeth->ethdev = dev;
39949 +       lapbeth->up = false;
39950 +       spin_lock_init(&lapbeth->up_lock);
39952         rc = -EIO;
39953         if (register_netdevice(ndev))
39954                 goto fail;
39955 diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
39956 index 0a37be6a7d33..fab398046a3f 100644
39957 --- a/drivers/net/wireless/ath/ath10k/htc.c
39958 +++ b/drivers/net/wireless/ath/ath10k/htc.c
39959 @@ -669,7 +669,7 @@ static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
39961         ath10k_dbg(ar, ATH10K_DBG_HTC,
39962                    "bundle tx status %d eid %d req count %d count %d len %d\n",
39963 -                  ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, bundle_skb->len);
39964 +                  ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len);
39965         return ret;
39968 diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
39969 index d97b33f789e4..7efbe03fbca8 100644
39970 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
39971 +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
39972 @@ -592,6 +592,9 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
39973                                         GFP_ATOMIC
39974                                         );
39975                 break;
39976 +       default:
39977 +               kfree(tb);
39978 +               return;
39979         }
39981  exit:
39982 diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
39983 index cccfd3bd4d27..ca5cda890d58 100644
39984 --- a/drivers/net/wireless/ath/ath11k/wmi.c
39985 +++ b/drivers/net/wireless/ath/ath11k/wmi.c
39986 @@ -5417,31 +5417,6 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
39987         return 0;
39990 -static int
39991 -ath11k_pull_pdev_temp_ev(struct ath11k_base *ab, u8 *evt_buf,
39992 -                        u32 len, const struct wmi_pdev_temperature_event *ev)
39994 -       const void **tb;
39995 -       int ret;
39997 -       tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
39998 -       if (IS_ERR(tb)) {
39999 -               ret = PTR_ERR(tb);
40000 -               ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
40001 -               return ret;
40002 -       }
40004 -       ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
40005 -       if (!ev) {
40006 -               ath11k_warn(ab, "failed to fetch pdev temp ev");
40007 -               kfree(tb);
40008 -               return -EPROTO;
40009 -       }
40011 -       kfree(tb);
40012 -       return 0;
40015  size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head)
40017         struct ath11k_fw_stats_vdev *i;
40018 @@ -6849,23 +6824,37 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
40019                                   struct sk_buff *skb)
40021         struct ath11k *ar;
40022 -       struct wmi_pdev_temperature_event ev = {0};
40023 +       const void **tb;
40024 +       const struct wmi_pdev_temperature_event *ev;
40025 +       int ret;
40027 +       tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
40028 +       if (IS_ERR(tb)) {
40029 +               ret = PTR_ERR(tb);
40030 +               ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
40031 +               return;
40032 +       }
40034 -       if (ath11k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
40035 -               ath11k_warn(ab, "failed to extract pdev temperature event");
40036 +       ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
40037 +       if (!ev) {
40038 +               ath11k_warn(ab, "failed to fetch pdev temp ev");
40039 +               kfree(tb);
40040                 return;
40041         }
40043         ath11k_dbg(ab, ATH11K_DBG_WMI,
40044 -                  "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
40045 +                  "pdev temperature ev temp %d pdev_id %d\n", ev->temp, ev->pdev_id);
40047 -       ar = ath11k_mac_get_ar_by_pdev_id(ab, ev.pdev_id);
40048 +       ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
40049         if (!ar) {
40050 -               ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
40051 +               ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
40052 +               kfree(tb);
40053                 return;
40054         }
40056 -       ath11k_thermal_event_temperature(ar, ev.temp);
40057 +       ath11k_thermal_event_temperature(ar, ev->temp);
40059 +       kfree(tb);
40062  static void ath11k_fils_discovery_event(struct ath11k_base *ab,
40063 diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
40064 index db0c6fa9c9dc..ff61ae34ecdf 100644
40065 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
40066 +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
40067 @@ -246,7 +246,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
40068         if (unlikely(r)) {
40069                 ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n",
40070                         reg_offset, r);
40071 -               return -EIO;
40072 +               return -1;
40073         }
40075         return be32_to_cpu(val);
40076 diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
40077 index 5abc2a5526ec..2ca3b86714a9 100644
40078 --- a/drivers/net/wireless/ath/ath9k/hw.c
40079 +++ b/drivers/net/wireless/ath/ath9k/hw.c
40080 @@ -286,7 +286,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah)
40082         srev = REG_READ(ah, AR_SREV);
40084 -       if (srev == -EIO) {
40085 +       if (srev == -1) {
40086                 ath_err(ath9k_hw_common(ah),
40087                         "Failed to read SREV register");
40088                 return false;
40089 diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
40090 index 23fbddd0c1f8..534ab3b894e2 100644
40091 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
40092 +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
40093 @@ -815,7 +815,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
40094          * doesn't seem to have as many firmware restart cycles...
40095          *
40096          * As a test, we're sticking in a 1/100s delay here */
40097 -       schedule_timeout_uninterruptible(msecs_to_jiffies(10));
40098 +       schedule_msec_hrtimeout_uninterruptible((10));
40100         return 0;
40102 @@ -1266,7 +1266,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv)
40103         IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
40104         i = 5000;
40105         do {
40106 -               schedule_timeout_uninterruptible(msecs_to_jiffies(40));
40107 +               schedule_msec_hrtimeout_uninterruptible((40));
40108                 /* Todo... wait for sync command ... */
40110                 read_register(priv->net_dev, IPW_REG_INTA, &inta);
40111 diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
40112 index a0cf78c418ac..903de34028ef 100644
40113 --- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
40114 +++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
40115 @@ -633,8 +633,10 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
40116         }
40118         if (ext->alg != IW_ENCODE_ALG_NONE) {
40119 -               memcpy(sec.keys[idx], ext->key, ext->key_len);
40120 -               sec.key_sizes[idx] = ext->key_len;
40121 +               int key_len = clamp_val(ext->key_len, 0, SCM_KEY_LEN);
40123 +               memcpy(sec.keys[idx], ext->key, key_len);
40124 +               sec.key_sizes[idx] = key_len;
40125                 sec.flags |= (1 << idx);
40126                 if (ext->alg == IW_ENCODE_ALG_WEP) {
40127                         sec.encode_alg[idx] = SEC_ALG_WEP;
40128 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
40129 index 579bc81cc0ae..4cd8c39cc3e9 100644
40130 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
40131 +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
40132 @@ -1,6 +1,6 @@
40133  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
40134  /*
40135 - * Copyright (C) 2018-2020 Intel Corporation
40136 + * Copyright (C) 2018-2021 Intel Corporation
40137   */
40138  #include <linux/firmware.h>
40139  #include "iwl-drv.h"
40140 @@ -426,7 +426,8 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
40141         const struct firmware *fw;
40142         int res;
40144 -       if (!iwlwifi_mod_params.enable_ini)
40145 +       if (!iwlwifi_mod_params.enable_ini ||
40146 +           trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_9000)
40147                 return;
40149         res = firmware_request_nowarn(&fw, "iwl-debug-yoyo.bin", dev);
40150 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
40151 index 60e0db4a5e20..9236f9106826 100644
40152 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
40153 +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
40154 @@ -2,7 +2,7 @@
40155  /*
40156   * Copyright (C) 2015 Intel Mobile Communications GmbH
40157   * Copyright (C) 2016-2017 Intel Deutschland GmbH
40158 - * Copyright (C) 2019-2020 Intel Corporation
40159 + * Copyright (C) 2019-2021 Intel Corporation
40160   */
40161  #include <linux/kernel.h>
40162  #include <linux/bsearch.h>
40163 @@ -21,7 +21,6 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
40164                                   const struct iwl_cfg_trans_params *cfg_trans)
40166         struct iwl_trans *trans;
40167 -       int txcmd_size, txcmd_align;
40168  #ifdef CONFIG_LOCKDEP
40169         static struct lock_class_key __key;
40170  #endif
40171 @@ -31,10 +30,40 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
40172                 return NULL;
40174         trans->trans_cfg = cfg_trans;
40175 -       if (!cfg_trans->gen2) {
40177 +#ifdef CONFIG_LOCKDEP
40178 +       lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
40179 +                        &__key, 0);
40180 +#endif
40182 +       trans->dev = dev;
40183 +       trans->ops = ops;
40184 +       trans->num_rx_queues = 1;
40186 +       WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
40188 +       if (trans->trans_cfg->use_tfh) {
40189 +               trans->txqs.tfd.addr_size = 64;
40190 +               trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
40191 +               trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
40192 +       } else {
40193 +               trans->txqs.tfd.addr_size = 36;
40194 +               trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
40195 +               trans->txqs.tfd.size = sizeof(struct iwl_tfd);
40196 +       }
40197 +       trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
40199 +       return trans;
40202 +int iwl_trans_init(struct iwl_trans *trans)
40204 +       int txcmd_size, txcmd_align;
40206 +       if (!trans->trans_cfg->gen2) {
40207                 txcmd_size = sizeof(struct iwl_tx_cmd);
40208                 txcmd_align = sizeof(void *);
40209 -       } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
40210 +       } else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
40211                 txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
40212                 txcmd_align = 64;
40213         } else {
40214 @@ -46,17 +75,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
40215         txcmd_size += 36; /* biggest possible 802.11 header */
40217         /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
40218 -       if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
40219 -               return ERR_PTR(-EINVAL);
40221 -#ifdef CONFIG_LOCKDEP
40222 -       lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
40223 -                        &__key, 0);
40224 -#endif
40226 -       trans->dev = dev;
40227 -       trans->ops = ops;
40228 -       trans->num_rx_queues = 1;
40229 +       if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align))
40230 +               return -EINVAL;
40232         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
40233                 trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
40234 @@ -68,23 +88,16 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
40235          * allocate here.
40236          */
40237         if (trans->trans_cfg->gen2) {
40238 -               trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", dev,
40239 +               trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", trans->dev,
40240                                                        trans->txqs.bc_tbl_size,
40241                                                        256, 0);
40242                 if (!trans->txqs.bc_pool)
40243 -                       return NULL;
40244 +                       return -ENOMEM;
40245         }
40247 -       if (trans->trans_cfg->use_tfh) {
40248 -               trans->txqs.tfd.addr_size = 64;
40249 -               trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
40250 -               trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
40251 -       } else {
40252 -               trans->txqs.tfd.addr_size = 36;
40253 -               trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
40254 -               trans->txqs.tfd.size = sizeof(struct iwl_tfd);
40255 -       }
40256 -       trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
40257 +       /* Some things must not change even if the config does */
40258 +       WARN_ON(trans->txqs.tfd.addr_size !=
40259 +               (trans->trans_cfg->use_tfh ? 64 : 36));
40261         snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
40262                  "iwl_cmd_pool:%s", dev_name(trans->dev));
40263 @@ -93,35 +106,35 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
40264                                   txcmd_size, txcmd_align,
40265                                   SLAB_HWCACHE_ALIGN, NULL);
40266         if (!trans->dev_cmd_pool)
40267 -               return NULL;
40269 -       WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
40270 +               return -ENOMEM;
40272         trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
40273         if (!trans->txqs.tso_hdr_page) {
40274                 kmem_cache_destroy(trans->dev_cmd_pool);
40275 -               return NULL;
40276 +               return -ENOMEM;
40277         }
40279         /* Initialize the wait queue for commands */
40280         init_waitqueue_head(&trans->wait_command_queue);
40282 -       return trans;
40283 +       return 0;
40286  void iwl_trans_free(struct iwl_trans *trans)
40288         int i;
40290 -       for_each_possible_cpu(i) {
40291 -               struct iwl_tso_hdr_page *p =
40292 -                       per_cpu_ptr(trans->txqs.tso_hdr_page, i);
40293 +       if (trans->txqs.tso_hdr_page) {
40294 +               for_each_possible_cpu(i) {
40295 +                       struct iwl_tso_hdr_page *p =
40296 +                               per_cpu_ptr(trans->txqs.tso_hdr_page, i);
40298 -               if (p->page)
40299 -                       __free_page(p->page);
40300 -       }
40301 +                       if (p && p->page)
40302 +                               __free_page(p->page);
40303 +               }
40305 -       free_percpu(trans->txqs.tso_hdr_page);
40306 +               free_percpu(trans->txqs.tso_hdr_page);
40307 +       }
40309         kmem_cache_destroy(trans->dev_cmd_pool);
40311 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
40312 index 4a5822c1be13..3e0df6fbb642 100644
40313 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
40314 +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
40315 @@ -1438,6 +1438,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
40316                           struct device *dev,
40317                           const struct iwl_trans_ops *ops,
40318                           const struct iwl_cfg_trans_params *cfg_trans);
40319 +int iwl_trans_init(struct iwl_trans *trans);
40320  void iwl_trans_free(struct iwl_trans *trans);
40322  /*****************************************************
40323 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
40324 index 8772b65c9dab..2d58cb969918 100644
40325 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
40326 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
40327 @@ -1,7 +1,7 @@
40328  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
40329  /*
40330   * Copyright (C) 2017 Intel Deutschland GmbH
40331 - * Copyright (C) 2018-2020 Intel Corporation
40332 + * Copyright (C) 2018-2021 Intel Corporation
40333   */
40334  #include "rs.h"
40335  #include "fw-api.h"
40336 @@ -72,19 +72,15 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
40337         bool vht_ena = vht_cap->vht_supported;
40338         u16 flags = 0;
40340 +       /* get STBC flags */
40341         if (mvm->cfg->ht_params->stbc &&
40342             (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) {
40343 -               if (he_cap->has_he) {
40344 -                       if (he_cap->he_cap_elem.phy_cap_info[2] &
40345 -                           IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
40346 -                               flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
40348 -                       if (he_cap->he_cap_elem.phy_cap_info[7] &
40349 -                           IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ)
40350 -                               flags |= IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK;
40351 -               } else if ((ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) ||
40352 -                          (vht_ena &&
40353 -                           (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)))
40354 +               if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] &
40355 +                                     IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
40356 +                       flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
40357 +               else if (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
40358 +                       flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
40359 +               else if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)
40360                         flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
40361         }
40363 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
40364 index 558a0b2ef0fc..66faf7914bd8 100644
40365 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
40366 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
40367 @@ -17,10 +17,20 @@
40368  #include "iwl-prph.h"
40369  #include "internal.h"
40371 +#define TRANS_CFG_MARKER BIT(0)
40372 +#define _IS_A(cfg, _struct) __builtin_types_compatible_p(typeof(cfg),  \
40373 +                                                        struct _struct)
40374 +extern int _invalid_type;
40375 +#define _TRANS_CFG_MARKER(cfg)                                         \
40376 +       (__builtin_choose_expr(_IS_A(cfg, iwl_cfg_trans_params),        \
40377 +                              TRANS_CFG_MARKER,                        \
40378 +        __builtin_choose_expr(_IS_A(cfg, iwl_cfg), 0, _invalid_type)))
40379 +#define _ASSIGN_CFG(cfg) (_TRANS_CFG_MARKER(cfg) + (kernel_ulong_t)&(cfg))
40381  #define IWL_PCI_DEVICE(dev, subdev, cfg) \
40382         .vendor = PCI_VENDOR_ID_INTEL,  .device = (dev), \
40383         .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
40384 -       .driver_data = (kernel_ulong_t)&(cfg)
40385 +       .driver_data = _ASSIGN_CFG(cfg)
40387  /* Hardware specific file defines the PCI IDs table for that hardware module */
40388  static const struct pci_device_id iwl_hw_card_ids[] = {
40389 @@ -1075,19 +1085,22 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
40391  static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
40393 -       const struct iwl_cfg_trans_params *trans =
40394 -               (struct iwl_cfg_trans_params *)(ent->driver_data);
40395 +       const struct iwl_cfg_trans_params *trans;
40396         const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
40397         struct iwl_trans *iwl_trans;
40398         struct iwl_trans_pcie *trans_pcie;
40399         int i, ret;
40400 +       const struct iwl_cfg *cfg;
40402 +       trans = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
40404         /*
40405          * This is needed for backwards compatibility with the old
40406          * tables, so we don't need to change all the config structs
40407          * at the same time.  The cfg is used to compare with the old
40408          * full cfg structs.
40409          */
40410 -       const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
40411 +       cfg = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
40413         /* make sure trans is the first element in iwl_cfg */
40414         BUILD_BUG_ON(offsetof(struct iwl_cfg, trans));
40415 @@ -1202,11 +1215,19 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
40417  #endif
40418         /*
40419 -        * If we didn't set the cfg yet, assume the trans is actually
40420 -        * a full cfg from the old tables.
40421 +        * If we didn't set the cfg yet, the PCI ID table entry should have
40422 +        * been a full config - if yes, use it, otherwise fail.
40423          */
40424 -       if (!iwl_trans->cfg)
40425 +       if (!iwl_trans->cfg) {
40426 +               if (ent->driver_data & TRANS_CFG_MARKER) {
40427 +                       pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n",
40428 +                              pdev->device, pdev->subsystem_device,
40429 +                              iwl_trans->hw_rev, iwl_trans->hw_rf_id);
40430 +                       ret = -EINVAL;
40431 +                       goto out_free_trans;
40432 +               }
40433                 iwl_trans->cfg = cfg;
40434 +       }
40436         /* if we don't have a name yet, copy name from the old cfg */
40437         if (!iwl_trans->name)
40438 @@ -1222,6 +1243,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
40439                 trans_pcie->num_rx_bufs = RX_QUEUE_SIZE;
40440         }
40442 +       ret = iwl_trans_init(iwl_trans);
40443 +       if (ret)
40444 +               goto out_free_trans;
40446         pci_set_drvdata(pdev, iwl_trans);
40447         iwl_trans->drv = iwl_drv_start(iwl_trans);
40449 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
40450 index 94ffc1ae484d..af9412bd697e 100644
40451 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
40452 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
40453 @@ -1,7 +1,7 @@
40454  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
40455  /*
40456   * Copyright (C) 2017 Intel Deutschland GmbH
40457 - * Copyright (C) 2018-2020 Intel Corporation
40458 + * Copyright (C) 2018-2021 Intel Corporation
40459   */
40460  #include "iwl-trans.h"
40461  #include "iwl-prph.h"
40462 @@ -143,7 +143,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
40463         if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
40464                 IWL_DEBUG_INFO(trans,
40465                                "DEVICE_ENABLED bit was set and is now cleared\n");
40466 -               iwl_txq_gen2_tx_stop(trans);
40467 +               iwl_txq_gen2_tx_free(trans);
40468                 iwl_pcie_rx_stop(trans);
40469         }
40471 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
40472 index 4456abb9a074..34bde8c87324 100644
40473 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
40474 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
40475 @@ -40,6 +40,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
40476         const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
40477         u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
40478         struct iwl_tfh_tfd *tfd;
40479 +       unsigned long flags;
40481         copy_size = sizeof(struct iwl_cmd_header_wide);
40482         cmd_size = sizeof(struct iwl_cmd_header_wide);
40483 @@ -108,14 +109,14 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
40484                 goto free_dup_buf;
40485         }
40487 -       spin_lock_bh(&txq->lock);
40488 +       spin_lock_irqsave(&txq->lock, flags);
40490         idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
40491         tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
40492         memset(tfd, 0, sizeof(*tfd));
40494         if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
40495 -               spin_unlock_bh(&txq->lock);
40496 +               spin_unlock_irqrestore(&txq->lock, flags);
40498                 IWL_ERR(trans, "No space in command queue\n");
40499                 iwl_op_mode_cmd_queue_full(trans->op_mode);
40500 @@ -250,7 +251,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
40501         spin_unlock(&trans_pcie->reg_lock);
40503  out:
40504 -       spin_unlock_bh(&txq->lock);
40505 +       spin_unlock_irqrestore(&txq->lock, flags);
40506  free_dup_buf:
40507         if (idx < 0)
40508                 kfree(dup_buf);
40509 diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
40510 index 833f43d1ca7a..810dcb3df242 100644
40511 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
40512 +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
40513 @@ -13,30 +13,6 @@
40514  #include "iwl-scd.h"
40515  #include <linux/dmapool.h>
40518 - * iwl_txq_gen2_tx_stop - Stop all Tx DMA channels
40519 - */
40520 -void iwl_txq_gen2_tx_stop(struct iwl_trans *trans)
40522 -       int txq_id;
40524 -       /*
40525 -        * This function can be called before the op_mode disabled the
40526 -        * queues. This happens when we have an rfkill interrupt.
40527 -        * Since we stop Tx altogether - mark the queues as stopped.
40528 -        */
40529 -       memset(trans->txqs.queue_stopped, 0,
40530 -              sizeof(trans->txqs.queue_stopped));
40531 -       memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
40533 -       /* Unmap DMA from host system and free skb's */
40534 -       for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
40535 -               if (!trans->txqs.txq[txq_id])
40536 -                       continue;
40537 -               iwl_txq_gen2_unmap(trans, txq_id);
40538 -       }
40541  /*
40542   * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
40543   */
40544 @@ -1189,6 +1165,12 @@ static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
40545                 goto error_free_resp;
40546         }
40548 +       if (WARN_ONCE(trans->txqs.txq[qid],
40549 +                     "queue %d already allocated\n", qid)) {
40550 +               ret = -EIO;
40551 +               goto error_free_resp;
40552 +       }
40554         txq->id = qid;
40555         trans->txqs.txq[qid] = txq;
40556         wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
40557 diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
40558 index af1dbdf5617a..20efc62acf13 100644
40559 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
40560 +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
40561 @@ -1,6 +1,6 @@
40562  /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
40563  /*
40564 - * Copyright (C) 2020 Intel Corporation
40565 + * Copyright (C) 2020-2021 Intel Corporation
40566   */
40567  #ifndef __iwl_trans_queue_tx_h__
40568  #define __iwl_trans_queue_tx_h__
40569 @@ -123,7 +123,6 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
40570  void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
40571  void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
40572  void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
40573 -void iwl_txq_gen2_tx_stop(struct iwl_trans *trans);
40574  void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
40575  int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
40576                  bool cmd_queue);
40577 diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
40578 index c9f8c056aa51..84b32a5f01ee 100644
40579 --- a/drivers/net/wireless/marvell/mwl8k.c
40580 +++ b/drivers/net/wireless/marvell/mwl8k.c
40581 @@ -1473,6 +1473,7 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
40582         if (txq->skb == NULL) {
40583                 dma_free_coherent(&priv->pdev->dev, size, txq->txd,
40584                                   txq->txd_dma);
40585 +               txq->txd = NULL;
40586                 return -ENOMEM;
40587         }
40589 diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
40590 index 2f27c43ad76d..7196fa9047e6 100644
40591 --- a/drivers/net/wireless/mediatek/mt76/dma.c
40592 +++ b/drivers/net/wireless/mediatek/mt76/dma.c
40593 @@ -309,7 +309,7 @@ static int
40594  mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
40595                           struct sk_buff *skb, u32 tx_info)
40597 -       struct mt76_queue_buf buf;
40598 +       struct mt76_queue_buf buf = {};
40599         dma_addr_t addr;
40601         if (q->queued + 1 >= q->ndesc - 1)
40602 diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
40603 index 8bf45497cfca..36a430f09f64 100644
40604 --- a/drivers/net/wireless/mediatek/mt76/mt76.h
40605 +++ b/drivers/net/wireless/mediatek/mt76/mt76.h
40606 @@ -222,6 +222,7 @@ struct mt76_wcid {
40608         u16 idx;
40609         u8 hw_key_idx;
40610 +       u8 hw_key_idx2;
40612         u8 sta:1;
40613         u8 ext_phy:1;
40614 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
40615 index 2eab23898c77..6dbaaf95ee38 100644
40616 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
40617 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
40618 @@ -86,6 +86,7 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
40619         switch (val) {
40620         case 0x7615:
40621         case 0x7622:
40622 +       case 0x7663:
40623                 return 0;
40624         default:
40625                 return -EINVAL;
40626 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
40627 index 59fdd0fc2ad4..8dccb589b756 100644
40628 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
40629 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
40630 @@ -690,7 +690,7 @@ mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
40632         int i;
40634 -       for (i = 1; i < txp->nbuf; i++)
40635 +       for (i = 0; i < txp->nbuf; i++)
40636                 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
40637                                  le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
40639 @@ -966,6 +966,7 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
40640         struct mt7615_dev *dev = phy->dev;
40641         struct mt7615_rate_desc rd;
40642         u32 w5, w27, addr;
40643 +       u16 idx = sta->vif->mt76.omac_idx;
40645         if (!mt76_is_mmio(&dev->mt76)) {
40646                 mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates);
40647 @@ -1017,7 +1018,10 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
40649         mt76_wr(dev, addr + 27 * 4, w27);
40651 -       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
40652 +       idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
40653 +       addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
40655 +       mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
40656         sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0);
40657         sta->rate_set_tsf |= rd.rateset;
40659 @@ -1033,7 +1037,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
40660  static int
40661  mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
40662                            struct ieee80211_key_conf *key,
40663 -                          enum mt7615_cipher_type cipher,
40664 +                          enum mt7615_cipher_type cipher, u16 cipher_mask,
40665                            enum set_key_cmd cmd)
40667         u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
40668 @@ -1050,22 +1054,22 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
40669                         memcpy(data + 16, key->key + 24, 8);
40670                         memcpy(data + 24, key->key + 16, 8);
40671                 } else {
40672 -                       if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher)
40673 -                               memmove(data + 16, data, 16);
40674 -                       if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
40675 +                       if (cipher_mask == BIT(cipher))
40676                                 memcpy(data, key->key, key->keylen);
40677 -                       else if (cipher == MT_CIPHER_BIP_CMAC_128)
40678 +                       else if (cipher != MT_CIPHER_BIP_CMAC_128)
40679 +                               memcpy(data, key->key, 16);
40680 +                       if (cipher == MT_CIPHER_BIP_CMAC_128)
40681                                 memcpy(data + 16, key->key, 16);
40682                 }
40683         } else {
40684 -               if (wcid->cipher & ~BIT(cipher)) {
40685 -                       if (cipher != MT_CIPHER_BIP_CMAC_128)
40686 -                               memmove(data, data + 16, 16);
40687 +               if (cipher == MT_CIPHER_BIP_CMAC_128)
40688                         memset(data + 16, 0, 16);
40689 -               } else {
40690 +               else if (cipher_mask)
40691 +                       memset(data, 0, 16);
40692 +               if (!cipher_mask)
40693                         memset(data, 0, sizeof(data));
40694 -               }
40695         }
40697         mt76_wr_copy(dev, addr, data, sizeof(data));
40699         return 0;
40700 @@ -1073,7 +1077,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
40702  static int
40703  mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
40704 -                         enum mt7615_cipher_type cipher,
40705 +                         enum mt7615_cipher_type cipher, u16 cipher_mask,
40706                           int keyidx, enum set_key_cmd cmd)
40708         u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
40709 @@ -1083,20 +1087,23 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
40711         w0 = mt76_rr(dev, addr);
40712         w1 = mt76_rr(dev, addr + 4);
40713 -       if (cmd == SET_KEY) {
40714 -               w0 |= MT_WTBL_W0_RX_KEY_VALID |
40715 -                     FIELD_PREP(MT_WTBL_W0_RX_IK_VALID,
40716 -                                cipher == MT_CIPHER_BIP_CMAC_128);
40717 -               if (cipher != MT_CIPHER_BIP_CMAC_128 ||
40718 -                   !wcid->cipher)
40719 -                       w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
40720 -       }  else {
40721 -               if (!(wcid->cipher & ~BIT(cipher)))
40722 -                       w0 &= ~(MT_WTBL_W0_RX_KEY_VALID |
40723 -                               MT_WTBL_W0_KEY_IDX);
40724 -               if (cipher == MT_CIPHER_BIP_CMAC_128)
40725 -                       w0 &= ~MT_WTBL_W0_RX_IK_VALID;
40727 +       if (cipher_mask)
40728 +               w0 |= MT_WTBL_W0_RX_KEY_VALID;
40729 +       else
40730 +               w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | MT_WTBL_W0_KEY_IDX);
40731 +       if (cipher_mask & BIT(MT_CIPHER_BIP_CMAC_128))
40732 +               w0 |= MT_WTBL_W0_RX_IK_VALID;
40733 +       else
40734 +               w0 &= ~MT_WTBL_W0_RX_IK_VALID;
40736 +       if (cmd == SET_KEY &&
40737 +           (cipher != MT_CIPHER_BIP_CMAC_128 ||
40738 +            cipher_mask == BIT(cipher))) {
40739 +               w0 &= ~MT_WTBL_W0_KEY_IDX;
40740 +               w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
40741         }
40743         mt76_wr(dev, MT_WTBL_RICR0, w0);
40744         mt76_wr(dev, MT_WTBL_RICR1, w1);
40746 @@ -1109,24 +1116,25 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
40748  static void
40749  mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
40750 -                             enum mt7615_cipher_type cipher,
40751 +                             enum mt7615_cipher_type cipher, u16 cipher_mask,
40752                               enum set_key_cmd cmd)
40754         u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
40756 -       if (cmd == SET_KEY) {
40757 -               if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
40758 -                       mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
40759 -                                FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
40760 -       } else {
40761 -               if (cipher != MT_CIPHER_BIP_CMAC_128 &&
40762 -                   wcid->cipher & BIT(MT_CIPHER_BIP_CMAC_128))
40763 -                       mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
40764 -                                FIELD_PREP(MT_WTBL_W2_KEY_TYPE,
40765 -                                           MT_CIPHER_BIP_CMAC_128));
40766 -               else if (!(wcid->cipher & ~BIT(cipher)))
40767 -                       mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
40768 +       if (!cipher_mask) {
40769 +               mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
40770 +               return;
40771         }
40773 +       if (cmd != SET_KEY)
40774 +               return;
40776 +       if (cipher == MT_CIPHER_BIP_CMAC_128 &&
40777 +           cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128))
40778 +               return;
40780 +       mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
40781 +                FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
40784  int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
40785 @@ -1135,25 +1143,30 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
40786                               enum set_key_cmd cmd)
40788         enum mt7615_cipher_type cipher;
40789 +       u16 cipher_mask = wcid->cipher;
40790         int err;
40792         cipher = mt7615_mac_get_cipher(key->cipher);
40793         if (cipher == MT_CIPHER_NONE)
40794                 return -EOPNOTSUPP;
40796 -       mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd);
40797 -       err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd);
40798 +       if (cmd == SET_KEY)
40799 +               cipher_mask |= BIT(cipher);
40800 +       else
40801 +               cipher_mask &= ~BIT(cipher);
40803 +       mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd);
40804 +       err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask,
40805 +                                        cmd);
40806         if (err < 0)
40807                 return err;
40809 -       err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, cmd);
40810 +       err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask,
40811 +                                       key->keyidx, cmd);
40812         if (err < 0)
40813                 return err;
40815 -       if (cmd == SET_KEY)
40816 -               wcid->cipher |= BIT(cipher);
40817 -       else
40818 -               wcid->cipher &= ~BIT(cipher);
40819 +       wcid->cipher = cipher_mask;
40821         return 0;
40823 @@ -1821,10 +1834,8 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
40824         int i, aggr;
40825         u32 val, val2;
40827 -       memset(mib, 0, sizeof(*mib));
40829 -       mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
40830 -                                         MT_MIB_SDR3_FCS_ERR_MASK);
40831 +       mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
40832 +                                          MT_MIB_SDR3_FCS_ERR_MASK);
40834         val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy),
40835                              MT_MIB_AMPDU_MPDU_COUNT);
40836 @@ -1837,24 +1848,16 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
40837         aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
40838         for (i = 0; i < 4; i++) {
40839                 val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
40841 -               val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
40842 -               if (val2 > mib->ack_fail_cnt)
40843 -                       mib->ack_fail_cnt = val2;
40845 -               val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
40846 -               if (val2 > mib->ba_miss_cnt)
40847 -                       mib->ba_miss_cnt = val2;
40848 +               mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
40849 +               mib->ack_fail_cnt += FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK,
40850 +                                              val);
40852                 val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
40853 -               val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
40854 -               if (val2 > mib->rts_retries_cnt) {
40855 -                       mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
40856 -                       mib->rts_retries_cnt = val2;
40857 -               }
40858 +               mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
40859 +               mib->rts_retries_cnt += FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK,
40860 +                                                 val);
40862                 val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
40864                 dev->mt76.aggr_stats[aggr++] += val & 0xffff;
40865                 dev->mt76.aggr_stats[aggr++] += val >> 16;
40866         }
40867 @@ -1976,15 +1979,17 @@ void mt7615_dma_reset(struct mt7615_dev *dev)
40868         mt76_clear(dev, MT_WPDMA_GLO_CFG,
40869                    MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
40870                    MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
40872         usleep_range(1000, 2000);
40874 -       mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
40875         for (i = 0; i < __MT_TXQ_MAX; i++)
40876                 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
40878 -       mt76_for_each_q_rx(&dev->mt76, i) {
40879 +       for (i = 0; i < __MT_MCUQ_MAX; i++)
40880 +               mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
40882 +       mt76_for_each_q_rx(&dev->mt76, i)
40883                 mt76_queue_rx_reset(dev, i);
40884 -       }
40886         mt76_set(dev, MT_WPDMA_GLO_CFG,
40887                  MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
40888 @@ -2000,8 +2005,12 @@ void mt7615_tx_token_put(struct mt7615_dev *dev)
40889         spin_lock_bh(&dev->token_lock);
40890         idr_for_each_entry(&dev->token, txwi, id) {
40891                 mt7615_txp_skb_unmap(&dev->mt76, txwi);
40892 -               if (txwi->skb)
40893 -                       dev_kfree_skb_any(txwi->skb);
40894 +               if (txwi->skb) {
40895 +                       struct ieee80211_hw *hw;
40897 +                       hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
40898 +                       ieee80211_free_txskb(hw, txwi->skb);
40899 +               }
40900                 mt76_put_txwi(&dev->mt76, txwi);
40901         }
40902         spin_unlock_bh(&dev->token_lock);
40903 @@ -2304,8 +2313,10 @@ void mt7615_coredump_work(struct work_struct *work)
40904                         break;
40906                 skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
40907 -               if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ)
40908 -                       break;
40909 +               if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
40910 +                       dev_kfree_skb(skb);
40911 +                       continue;
40912 +               }
40914                 memcpy(data, skb->data, skb->len);
40915                 data += skb->len;
40916 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
40917 index 25faf486d279..d334491667a4 100644
40918 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
40919 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
40920 @@ -217,8 +217,6 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
40921         ret = mt7615_mcu_add_dev_info(phy, vif, true);
40922         if (ret)
40923                 goto out;
40925 -       mt7615_mac_set_beacon_filter(phy, vif, true);
40926  out:
40927         mt7615_mutex_release(dev);
40929 @@ -244,7 +242,6 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
40931         mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
40933 -       mt7615_mac_set_beacon_filter(phy, vif, false);
40934         mt7615_mcu_add_dev_info(phy, vif, false);
40936         rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
40937 @@ -337,7 +334,8 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
40938         struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
40939                                   &mvif->sta;
40940         struct mt76_wcid *wcid = &msta->wcid;
40941 -       int idx = key->keyidx, err;
40942 +       int idx = key->keyidx, err = 0;
40943 +       u8 *wcid_keyidx = &wcid->hw_key_idx;
40945         /* The hardware does not support per-STA RX GTK, fallback
40946          * to software mode for these.
40947 @@ -352,6 +350,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
40948         /* fall back to sw encryption for unsupported ciphers */
40949         switch (key->cipher) {
40950         case WLAN_CIPHER_SUITE_AES_CMAC:
40951 +               wcid_keyidx = &wcid->hw_key_idx2;
40952                 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
40953                 break;
40954         case WLAN_CIPHER_SUITE_TKIP:
40955 @@ -369,12 +368,13 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
40957         mt7615_mutex_acquire(dev);
40959 -       if (cmd == SET_KEY) {
40960 -               key->hw_key_idx = wcid->idx;
40961 -               wcid->hw_key_idx = idx;
40962 -       } else if (idx == wcid->hw_key_idx) {
40963 -               wcid->hw_key_idx = -1;
40964 -       }
40965 +       if (cmd == SET_KEY)
40966 +               *wcid_keyidx = idx;
40967 +       else if (idx == *wcid_keyidx)
40968 +               *wcid_keyidx = -1;
40969 +       else
40970 +               goto out;
40972         mt76_wcid_key_setup(&dev->mt76, wcid,
40973                             cmd == SET_KEY ? key : NULL);
40975 @@ -383,6 +383,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
40976         else
40977                 err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
40979 +out:
40980         mt7615_mutex_release(dev);
40982         return err;
40983 @@ -544,6 +545,9 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
40984         if (changed & BSS_CHANGED_ARP_FILTER)
40985                 mt7615_mcu_update_arp_filter(hw, vif, info);
40987 +       if (changed & BSS_CHANGED_ASSOC)
40988 +               mt7615_mac_set_beacon_filter(phy, vif, info->assoc);
40990         mt7615_mutex_release(dev);
40993 @@ -803,26 +807,38 @@ mt7615_get_stats(struct ieee80211_hw *hw,
40994         struct mt7615_phy *phy = mt7615_hw_phy(hw);
40995         struct mib_stats *mib = &phy->mib;
40997 +       mt7615_mutex_acquire(phy->dev);
40999         stats->dot11RTSSuccessCount = mib->rts_cnt;
41000         stats->dot11RTSFailureCount = mib->rts_retries_cnt;
41001         stats->dot11FCSErrorCount = mib->fcs_err_cnt;
41002         stats->dot11ACKFailureCount = mib->ack_fail_cnt;
41004 +       memset(mib, 0, sizeof(*mib));
41006 +       mt7615_mutex_release(phy->dev);
41008         return 0;
41011  static u64
41012  mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
41014 +       struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
41015         struct mt7615_dev *dev = mt7615_hw_dev(hw);
41016         union {
41017                 u64 t64;
41018                 u32 t32[2];
41019         } tsf;
41020 +       u16 idx = mvif->mt76.omac_idx;
41021 +       u32 reg;
41023 +       idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
41024 +       reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
41026         mt7615_mutex_acquire(dev);
41028 -       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
41029 +       mt76_set(dev, reg, MT_LPON_TCR_MODE); /* TSF read */
41030         tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0);
41031         tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1);
41033 @@ -835,18 +851,24 @@ static void
41034  mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
41035                u64 timestamp)
41037 +       struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
41038         struct mt7615_dev *dev = mt7615_hw_dev(hw);
41039         union {
41040                 u64 t64;
41041                 u32 t32[2];
41042         } tsf = { .t64 = timestamp, };
41043 +       u16 idx = mvif->mt76.omac_idx;
41044 +       u32 reg;
41046 +       idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
41047 +       reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
41049         mt7615_mutex_acquire(dev);
41051         mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]);
41052         mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]);
41053         /* TSF software overwrite */
41054 -       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_WRITE);
41055 +       mt76_set(dev, reg, MT_LPON_TCR_WRITE);
41057         mt7615_mutex_release(dev);
41059 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
41060 index 631596fc2f36..198e9025b681 100644
41061 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
41062 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
41063 @@ -291,12 +291,20 @@ static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev)
41064         u32 addr;
41065         int err;
41067 -       addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
41068 +       if (is_mt7663(mdev)) {
41069 +               /* Clear firmware own via N9 eint */
41070 +               mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN);
41071 +               mt76_poll(dev, MT_CONN_ON_MISC, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
41073 +               addr = MT_CONN_HIF_ON_LPCTL;
41074 +       } else {
41075 +               addr = MT_CFG_LPCR_HOST;
41076 +       }
41078         mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
41080         mt7622_trigger_hif_int(dev, true);
41082 -       addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
41083         err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
41085         mt7622_trigger_hif_int(dev, false);
41086 @@ -1040,6 +1048,9 @@ mt7615_mcu_sta_ba(struct mt7615_dev *dev,
41088         wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
41089                                                   WTBL_SET, sta_wtbl, &skb);
41090 +       if (IS_ERR(wtbl_hdr))
41091 +               return PTR_ERR(wtbl_hdr);
41093         mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, tx,
41094                                     sta_wtbl, wtbl_hdr);
41096 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
41097 index 491841bc6291..4bc0c379c579 100644
41098 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
41099 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
41100 @@ -133,11 +133,11 @@ struct mt7615_vif {
41101  };
41103  struct mib_stats {
41104 -       u16 ack_fail_cnt;
41105 -       u16 fcs_err_cnt;
41106 -       u16 rts_cnt;
41107 -       u16 rts_retries_cnt;
41108 -       u16 ba_miss_cnt;
41109 +       u32 ack_fail_cnt;
41110 +       u32 fcs_err_cnt;
41111 +       u32 rts_cnt;
41112 +       u32 rts_retries_cnt;
41113 +       u32 ba_miss_cnt;
41114         unsigned long aggr_per;
41115  };
41117 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
41118 index 72395925ddee..15b417d6d889 100644
41119 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
41120 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
41121 @@ -163,10 +163,9 @@ void mt7615_unregister_device(struct mt7615_dev *dev)
41122         mt76_unregister_device(&dev->mt76);
41123         if (mcu_running)
41124                 mt7615_mcu_exit(dev);
41125 -       mt7615_dma_cleanup(dev);
41127         mt7615_tx_token_put(dev);
41129 +       mt7615_dma_cleanup(dev);
41130         tasklet_disable(&dev->irq_tasklet);
41132         mt76_free_device(&dev->mt76);
41133 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
41134 index 6e5db015b32c..6e4710d3ddd3 100644
41135 --- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
41136 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
41137 @@ -447,9 +447,10 @@ enum mt7615_reg_base {
41139  #define MT_LPON(_n)                    ((dev)->reg_map[MT_LPON_BASE] + (_n))
41141 -#define MT_LPON_T0CR                   MT_LPON(0x010)
41142 -#define MT_LPON_T0CR_MODE              GENMASK(1, 0)
41143 -#define MT_LPON_T0CR_WRITE             BIT(0)
41144 +#define MT_LPON_TCR0(_n)               MT_LPON(0x010 + ((_n) * 4))
41145 +#define MT_LPON_TCR2(_n)               MT_LPON(0x0f8 + ((_n) - 2) * 4)
41146 +#define MT_LPON_TCR_MODE               GENMASK(1, 0)
41147 +#define MT_LPON_TCR_WRITE              BIT(0)
41149  #define MT_LPON_UTTR0                  MT_LPON(0x018)
41150  #define MT_LPON_UTTR1                  MT_LPON(0x01c)
41151 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
41152 index 9fb506f2ace6..4393dd21ebbb 100644
41153 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
41154 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
41155 @@ -218,12 +218,15 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
41156         int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
41157         bool mcu = q == dev->q_mcu[MT_MCUQ_WM];
41158         struct mt76_sdio *sdio = &dev->sdio;
41159 +       u8 pad;
41161         qid = mcu ? ARRAY_SIZE(sdio->xmit_buf) - 1 : q->qid;
41162         while (q->first != q->head) {
41163                 struct mt76_queue_entry *e = &q->entry[q->first];
41164                 struct sk_buff *iter;
41166 +               smp_rmb();
41168                 if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) {
41169                         __skb_put_zero(e->skb, 4);
41170                         err = __mt7663s_xmit_queue(dev, e->skb->data,
41171 @@ -234,7 +237,8 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
41172                         goto next;
41173                 }
41175 -               if (len + e->skb->len + 4 > MT76S_XMIT_BUF_SZ)
41176 +               pad = roundup(e->skb->len, 4) - e->skb->len;
41177 +               if (len + e->skb->len + pad + 4 > MT76S_XMIT_BUF_SZ)
41178                         break;
41180                 if (mt7663s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz,
41181 @@ -252,6 +256,11 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
41182                         len += iter->len;
41183                         nframes++;
41184                 }
41186 +               if (unlikely(pad)) {
41187 +                       memset(sdio->xmit_buf[qid] + len, 0, pad);
41188 +                       len += pad;
41189 +               }
41190  next:
41191                 q->first = (q->first + 1) % q->ndesc;
41192                 e->done = true;
41193 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
41194 index 203256862dfd..f8d3673c2cae 100644
41195 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
41196 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
41197 @@ -67,6 +67,7 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
41198         struct mt7615_rate_desc *rate = &wrd->rate;
41199         struct mt7615_sta *sta = wrd->sta;
41200         u32 w5, w27, addr, val;
41201 +       u16 idx;
41203         lockdep_assert_held(&dev->mt76.mutex);
41205 @@ -118,7 +119,11 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
41207         sta->rate_probe = sta->rateset[rate->rateset].probe_rate.idx != -1;
41209 -       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
41210 +       idx = sta->vif->mt76.omac_idx;
41211 +       idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
41212 +       addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
41214 +       mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
41215         val = mt76_rr(dev, MT_LPON_UTTR0);
41216         sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset;
41218 diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
41219 index 6cbccfb05f8b..cefd33b74a87 100644
41220 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
41221 +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
41222 @@ -833,6 +833,9 @@ int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
41223         wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid,
41224                                                   WTBL_RESET_AND_SET,
41225                                                   sta_wtbl, &skb);
41226 +       if (IS_ERR(wtbl_hdr))
41227 +               return PTR_ERR(wtbl_hdr);
41229         if (enable) {
41230                 mt76_connac_mcu_wtbl_generic_tlv(dev, skb, vif, sta, sta_wtbl,
41231                                                  wtbl_hdr);
41232 @@ -946,6 +949,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
41234         switch (vif->type) {
41235         case NL80211_IFTYPE_MESH_POINT:
41236 +       case NL80211_IFTYPE_MONITOR:
41237         case NL80211_IFTYPE_AP:
41238                 basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
41239                 break;
41240 @@ -1195,6 +1199,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
41241                         .center_chan = ieee80211_frequency_to_channel(freq1),
41242                         .center_chan2 = ieee80211_frequency_to_channel(freq2),
41243                         .tx_streams = hweight8(phy->antenna_mask),
41244 +                       .ht_op_info = 4, /* set HT 40M allowed */
41245                         .rx_streams = phy->chainmask,
41246                         .short_st = true,
41247                 },
41248 @@ -1287,6 +1292,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
41249         case NL80211_CHAN_WIDTH_20:
41250         default:
41251                 rlm_req.rlm.bw = CMD_CBW_20MHZ;
41252 +               rlm_req.rlm.ht_op_info = 0;
41253                 break;
41254         }
41256 @@ -1306,7 +1312,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
41258         struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
41259         struct cfg80211_scan_request *sreq = &scan_req->req;
41260 -       int n_ssids = 0, err, i, duration = MT76_CONNAC_SCAN_CHANNEL_TIME;
41261 +       int n_ssids = 0, err, i, duration;
41262         int ext_channels_num = max_t(int, sreq->n_channels - 32, 0);
41263         struct ieee80211_channel **scan_list = sreq->channels;
41264         struct mt76_dev *mdev = phy->dev;
41265 @@ -1343,6 +1349,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
41266         req->ssid_type_ext = n_ssids ? BIT(0) : 0;
41267         req->ssids_num = n_ssids;
41269 +       duration = is_mt7921(phy->dev) ? 0 : MT76_CONNAC_SCAN_CHANNEL_TIME;
41270         /* increase channel time for passive scan */
41271         if (!sreq->n_ssids)
41272                 duration *= 2;
41273 diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
41274 index ab671e21f882..02db5d66735d 100644
41275 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
41276 +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
41277 @@ -447,6 +447,10 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
41278             !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
41279                 return -EOPNOTSUPP;
41281 +       /* MT76x0 GTK offloading does not work with more than one VIF */
41282 +       if (is_mt76x0(dev) && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
41283 +               return -EOPNOTSUPP;
41285         msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL;
41286         wcid = msta ? &msta->wcid : &mvif->group_wcid;
41288 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
41289 index 77dcd71e49a5..2f706620686e 100644
41290 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
41291 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
41292 @@ -124,7 +124,7 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
41293                 range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i));
41295         for (i = 0; i < ARRAY_SIZE(bound); i++)
41296 -               bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1;
41297 +               bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
41299         seq_printf(file, "\nPhy %d\n", ext_phy);
41301 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
41302 index 660398ac53c2..738ecf8f4fa2 100644
41303 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
41304 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
41305 @@ -124,7 +124,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
41306                                    struct ieee80211_channel *chan,
41307                                    u8 chain_idx)
41309 -       int index;
41310 +       int index, target_power;
41311         bool tssi_on;
41313         if (chain_idx > 3)
41314 @@ -133,15 +133,22 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
41315         tssi_on = mt7915_tssi_enabled(dev, chan->band);
41317         if (chan->band == NL80211_BAND_2GHZ) {
41318 -               index = MT_EE_TX0_POWER_2G + chain_idx * 3 + !tssi_on;
41319 +               index = MT_EE_TX0_POWER_2G + chain_idx * 3;
41320 +               target_power = mt7915_eeprom_read(dev, index);
41322 +               if (!tssi_on)
41323 +                       target_power += mt7915_eeprom_read(dev, index + 1);
41324         } else {
41325 -               int group = tssi_on ?
41326 -                           mt7915_get_channel_group(chan->hw_value) : 8;
41327 +               int group = mt7915_get_channel_group(chan->hw_value);
41329 +               index = MT_EE_TX0_POWER_5G + chain_idx * 12;
41330 +               target_power = mt7915_eeprom_read(dev, index + group);
41332 -               index = MT_EE_TX0_POWER_5G + chain_idx * 12 + group;
41333 +               if (!tssi_on)
41334 +                       target_power += mt7915_eeprom_read(dev, index + 8);
41335         }
41337 -       return mt7915_eeprom_read(dev, index);
41338 +       return target_power;
41341  static const u8 sku_cck_delta_map[] = {
41342 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
41343 index ad4e5b95158b..c7d4268d860a 100644
41344 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
41345 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
41346 @@ -4,6 +4,7 @@
41347  #include <linux/etherdevice.h>
41348  #include "mt7915.h"
41349  #include "mac.h"
41350 +#include "mcu.h"
41351  #include "eeprom.h"
41353  #define CCK_RATE(_idx, _rate) {                                                \
41354 @@ -283,9 +284,50 @@ static void mt7915_init_work(struct work_struct *work)
41355         mt7915_register_ext_phy(dev);
41358 +static void mt7915_wfsys_reset(struct mt7915_dev *dev)
41360 +       u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON;
41361 +       u32 reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
41363 +#define MT_MCU_DUMMY_RANDOM    GENMASK(15, 0)
41364 +#define MT_MCU_DUMMY_DEFAULT   GENMASK(31, 16)
41366 +       mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM);
41368 +       /* change to software control */
41369 +       val |= MT_TOP_PWR_SW_RST;
41370 +       mt76_wr(dev, MT_TOP_PWR_CTRL, val);
41372 +       /* reset wfsys */
41373 +       val &= ~MT_TOP_PWR_SW_RST;
41374 +       mt76_wr(dev, MT_TOP_PWR_CTRL, val);
41376 +       /* release wfsys then mcu re-excutes romcode */
41377 +       val |= MT_TOP_PWR_SW_RST;
41378 +       mt76_wr(dev, MT_TOP_PWR_CTRL, val);
41380 +       /* switch to hw control */
41381 +       val &= ~MT_TOP_PWR_SW_RST;
41382 +       val |= MT_TOP_PWR_HW_CTRL;
41383 +       mt76_wr(dev, MT_TOP_PWR_CTRL, val);
41385 +       /* check whether mcu resets to default */
41386 +       if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_DEFAULT,
41387 +                           MT_MCU_DUMMY_DEFAULT, 1000)) {
41388 +               dev_err(dev->mt76.dev, "wifi subsystem reset failure\n");
41389 +               return;
41390 +       }
41392 +       /* wfsys reset won't clear host registers */
41393 +       mt76_clear(dev, reg, MT_TOP_MISC_FW_STATE);
41395 +       msleep(100);
41398  static int mt7915_init_hardware(struct mt7915_dev *dev)
41400         int ret, idx;
41401 +       u32 val;
41403         mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
41405 @@ -295,6 +337,12 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
41407         dev->dbdc_support = !!(mt7915_l1_rr(dev, MT_HW_BOUND) & BIT(5));
41409 +       val = mt76_rr(dev, mt7915_reg_map_l1(dev, MT_TOP_MISC));
41411 +       /* If MCU was already running, it is likely in a bad state */
41412 +       if (FIELD_GET(MT_TOP_MISC_FW_STATE, val) > FW_STATE_FW_DOWNLOAD)
41413 +               mt7915_wfsys_reset(dev);
41415         ret = mt7915_dma_init(dev);
41416         if (ret)
41417                 return ret;
41418 @@ -308,8 +356,14 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
41419         mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
41421         ret = mt7915_mcu_init(dev);
41422 -       if (ret)
41423 -               return ret;
41424 +       if (ret) {
41425 +               /* Reset and try again */
41426 +               mt7915_wfsys_reset(dev);
41428 +               ret = mt7915_mcu_init(dev);
41429 +               if (ret)
41430 +                       return ret;
41431 +       }
41433         ret = mt7915_eeprom_init(dev);
41434         if (ret < 0)
41435 @@ -675,9 +729,8 @@ void mt7915_unregister_device(struct mt7915_dev *dev)
41436         mt7915_unregister_ext_phy(dev);
41437         mt76_unregister_device(&dev->mt76);
41438         mt7915_mcu_exit(dev);
41439 -       mt7915_dma_cleanup(dev);
41441         mt7915_tx_token_put(dev);
41442 +       mt7915_dma_cleanup(dev);
41444         mt76_free_device(&dev->mt76);
41446 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
41447 index e5a258958ac9..819670767521 100644
41448 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
41449 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
41450 @@ -1091,7 +1091,7 @@ void mt7915_txp_skb_unmap(struct mt76_dev *dev,
41451         int i;
41453         txp = mt7915_txwi_to_txp(dev, t);
41454 -       for (i = 1; i < txp->nbuf; i++)
41455 +       for (i = 0; i < txp->nbuf; i++)
41456                 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
41457                                  le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
41459 @@ -1470,9 +1470,8 @@ mt7915_update_beacons(struct mt7915_dev *dev)
41462  static void
41463 -mt7915_dma_reset(struct mt7915_phy *phy)
41464 +mt7915_dma_reset(struct mt7915_dev *dev)
41466 -       struct mt7915_dev *dev = phy->dev;
41467         struct mt76_phy *mphy_ext = dev->mt76.phy2;
41468         u32 hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
41469         int i;
41470 @@ -1489,18 +1488,20 @@ mt7915_dma_reset(struct mt7915_phy *phy)
41471                            (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
41472                             MT_WFDMA1_GLO_CFG_RX_DMA_EN));
41473         }
41475         usleep_range(1000, 2000);
41477 -       mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true);
41478         for (i = 0; i < __MT_TXQ_MAX; i++) {
41479 -               mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true);
41480 +               mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
41481                 if (mphy_ext)
41482                         mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true);
41483         }
41485 -       mt76_for_each_q_rx(&dev->mt76, i) {
41486 +       for (i = 0; i < __MT_MCUQ_MAX; i++)
41487 +               mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
41489 +       mt76_for_each_q_rx(&dev->mt76, i)
41490                 mt76_queue_rx_reset(dev, i);
41491 -       }
41493         /* re-init prefetch settings after reset */
41494         mt7915_dma_prefetch(dev);
41495 @@ -1584,7 +1585,7 @@ void mt7915_mac_reset_work(struct work_struct *work)
41496         idr_init(&dev->token);
41498         if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
41499 -               mt7915_dma_reset(&dev->phy);
41500 +               mt7915_dma_reset(dev);
41502                 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
41503                 mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
41504 @@ -1633,39 +1634,30 @@ mt7915_mac_update_mib_stats(struct mt7915_phy *phy)
41505         bool ext_phy = phy != &dev->phy;
41506         int i, aggr0, aggr1;
41508 -       memset(mib, 0, sizeof(*mib));
41510 -       mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
41511 -                                         MT_MIB_SDR3_FCS_ERR_MASK);
41512 +       mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
41513 +                                          MT_MIB_SDR3_FCS_ERR_MASK);
41515         aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
41516         for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
41517 -               u32 val, val2;
41518 +               u32 val;
41520                 val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
41522 -               val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
41523 -               if (val2 > mib->ack_fail_cnt)
41524 -                       mib->ack_fail_cnt = val2;
41526 -               val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
41527 -               if (val2 > mib->ba_miss_cnt)
41528 -                       mib->ba_miss_cnt = val2;
41529 +               mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
41530 +               mib->ack_fail_cnt +=
41531 +                       FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
41533                 val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
41534 -               val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
41535 -               if (val2 > mib->rts_retries_cnt) {
41536 -                       mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
41537 -                       mib->rts_retries_cnt = val2;
41538 -               }
41539 +               mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
41540 +               mib->rts_retries_cnt +=
41541 +                       FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
41543                 val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
41544 -               val2 = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
41546                 dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
41547                 dev->mt76.aggr_stats[aggr0++] += val >> 16;
41548 -               dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff;
41549 -               dev->mt76.aggr_stats[aggr1++] += val2 >> 16;
41551 +               val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
41552 +               dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
41553 +               dev->mt76.aggr_stats[aggr1++] += val >> 16;
41554         }
41557 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
41558 index d4969b2e1ffb..bf032d943f74 100644
41559 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
41560 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
41561 @@ -317,7 +317,9 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
41562         struct mt7915_sta *msta = sta ? (struct mt7915_sta *)sta->drv_priv :
41563                                   &mvif->sta;
41564         struct mt76_wcid *wcid = &msta->wcid;
41565 +       u8 *wcid_keyidx = &wcid->hw_key_idx;
41566         int idx = key->keyidx;
41567 +       int err = 0;
41569         /* The hardware does not support per-STA RX GTK, fallback
41570          * to software mode for these.
41571 @@ -332,6 +334,7 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
41572         /* fall back to sw encryption for unsupported ciphers */
41573         switch (key->cipher) {
41574         case WLAN_CIPHER_SUITE_AES_CMAC:
41575 +               wcid_keyidx = &wcid->hw_key_idx2;
41576                 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
41577                 break;
41578         case WLAN_CIPHER_SUITE_TKIP:
41579 @@ -347,16 +350,24 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
41580                 return -EOPNOTSUPP;
41581         }
41583 -       if (cmd == SET_KEY) {
41584 -               key->hw_key_idx = wcid->idx;
41585 -               wcid->hw_key_idx = idx;
41586 -       } else if (idx == wcid->hw_key_idx) {
41587 -               wcid->hw_key_idx = -1;
41588 -       }
41589 +       mutex_lock(&dev->mt76.mutex);
41591 +       if (cmd == SET_KEY)
41592 +               *wcid_keyidx = idx;
41593 +       else if (idx == *wcid_keyidx)
41594 +               *wcid_keyidx = -1;
41595 +       else
41596 +               goto out;
41598         mt76_wcid_key_setup(&dev->mt76, wcid,
41599                             cmd == SET_KEY ? key : NULL);
41601 -       return mt7915_mcu_add_key(dev, vif, msta, key, cmd);
41602 +       err = mt7915_mcu_add_key(dev, vif, msta, key, cmd);
41604 +out:
41605 +       mutex_unlock(&dev->mt76.mutex);
41607 +       return err;
41610  static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
41611 @@ -717,13 +728,19 @@ mt7915_get_stats(struct ieee80211_hw *hw,
41612                  struct ieee80211_low_level_stats *stats)
41614         struct mt7915_phy *phy = mt7915_hw_phy(hw);
41615 +       struct mt7915_dev *dev = mt7915_hw_dev(hw);
41616         struct mib_stats *mib = &phy->mib;
41618 +       mutex_lock(&dev->mt76.mutex);
41619         stats->dot11RTSSuccessCount = mib->rts_cnt;
41620         stats->dot11RTSFailureCount = mib->rts_retries_cnt;
41621         stats->dot11FCSErrorCount = mib->fcs_err_cnt;
41622         stats->dot11ACKFailureCount = mib->ack_fail_cnt;
41624 +       memset(mib, 0, sizeof(*mib));
41626 +       mutex_unlock(&dev->mt76.mutex);
41628         return 0;
41631 @@ -833,9 +850,12 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
41632         struct mt7915_phy *phy = mt7915_hw_phy(hw);
41633         struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
41634         struct mt7915_sta_stats *stats = &msta->stats;
41635 +       struct rate_info rxrate = {};
41637 -       if (mt7915_mcu_get_rx_rate(phy, vif, sta, &sinfo->rxrate) == 0)
41638 +       if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
41639 +               sinfo->rxrate = rxrate;
41640                 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
41641 +       }
41643         if (!stats->tx_rate.legacy && !stats->tx_rate.flags)
41644                 return;
41645 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
41646 index 195929242b72..f069a5a03e14 100644
41647 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
41648 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
41649 @@ -351,54 +351,62 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
41650         dev->hw_pattern++;
41653 -static void
41654 +static int
41655  mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra,
41656                          struct rate_info *rate, u16 r)
41658         struct ieee80211_supported_band *sband;
41659         u16 ru_idx = le16_to_cpu(ra->ru_idx);
41660 -       u16 flags = 0;
41661 +       bool cck = false;
41663         rate->mcs = FIELD_GET(MT_RA_RATE_MCS, r);
41664         rate->nss = FIELD_GET(MT_RA_RATE_NSS, r) + 1;
41666         switch (FIELD_GET(MT_RA_RATE_TX_MODE, r)) {
41667         case MT_PHY_TYPE_CCK:
41668 +               cck = true;
41669 +               fallthrough;
41670         case MT_PHY_TYPE_OFDM:
41671                 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
41672                         sband = &mphy->sband_5g.sband;
41673                 else
41674                         sband = &mphy->sband_2g.sband;
41676 +               rate->mcs = mt76_get_rate(mphy->dev, sband, rate->mcs, cck);
41677                 rate->legacy = sband->bitrates[rate->mcs].bitrate;
41678                 break;
41679         case MT_PHY_TYPE_HT:
41680         case MT_PHY_TYPE_HT_GF:
41681                 rate->mcs += (rate->nss - 1) * 8;
41682 -               flags |= RATE_INFO_FLAGS_MCS;
41683 +               if (rate->mcs > 31)
41684 +                       return -EINVAL;
41686 +               rate->flags = RATE_INFO_FLAGS_MCS;
41687                 if (ra->gi)
41688 -                       flags |= RATE_INFO_FLAGS_SHORT_GI;
41689 +                       rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
41690                 break;
41691         case MT_PHY_TYPE_VHT:
41692 -               flags |= RATE_INFO_FLAGS_VHT_MCS;
41693 +               if (rate->mcs > 9)
41694 +                       return -EINVAL;
41696 +               rate->flags = RATE_INFO_FLAGS_VHT_MCS;
41697                 if (ra->gi)
41698 -                       flags |= RATE_INFO_FLAGS_SHORT_GI;
41699 +                       rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
41700                 break;
41701         case MT_PHY_TYPE_HE_SU:
41702         case MT_PHY_TYPE_HE_EXT_SU:
41703         case MT_PHY_TYPE_HE_TB:
41704         case MT_PHY_TYPE_HE_MU:
41705 +               if (ra->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11)
41706 +                       return -EINVAL;
41708                 rate->he_gi = ra->gi;
41709                 rate->he_dcm = FIELD_GET(MT_RA_RATE_DCM_EN, r);
41711 -               flags |= RATE_INFO_FLAGS_HE_MCS;
41712 +               rate->flags = RATE_INFO_FLAGS_HE_MCS;
41713                 break;
41714         default:
41715 -               break;
41716 +               return -EINVAL;
41717         }
41718 -       rate->flags = flags;
41720         if (ru_idx) {
41721                 switch (ru_idx) {
41722 @@ -435,6 +443,8 @@ mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra,
41723                         break;
41724                 }
41725         }
41727 +       return 0;
41730  static void
41731 @@ -465,12 +475,12 @@ mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
41732                 mphy = dev->mt76.phy2;
41734         /* current rate */
41735 -       mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr);
41736 -       stats->tx_rate = rate;
41737 +       if (!mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr))
41738 +               stats->tx_rate = rate;
41740         /* probing rate */
41741 -       mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe);
41742 -       stats->prob_rate = prob_rate;
41743 +       if (!mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe))
41744 +               stats->prob_rate = prob_rate;
41746         if (attempts) {
41747                 u16 success = le16_to_cpu(ra->success);
41748 @@ -1188,6 +1198,9 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev,
41750         wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
41751                                              &skb);
41752 +       if (IS_ERR(wtbl_hdr))
41753 +               return PTR_ERR(wtbl_hdr);
41755         mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
41757         ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
41758 @@ -1704,6 +1717,9 @@ int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
41759                 return -ENOMEM;
41761         wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb);
41762 +       if (IS_ERR(wtbl_hdr))
41763 +               return PTR_ERR(wtbl_hdr);
41765         mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, NULL, wtbl_hdr);
41767         return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD(WTBL_UPDATE),
41768 @@ -1728,6 +1744,9 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
41770         wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
41771                                              &skb);
41772 +       if (IS_ERR(wtbl_hdr))
41773 +               return PTR_ERR(wtbl_hdr);
41775         mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
41777         return mt76_mcu_skb_send_msg(&dev->mt76, skb,
41778 @@ -2253,6 +2272,9 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
41780         wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
41781                                              sta_wtbl, &skb);
41782 +       if (IS_ERR(wtbl_hdr))
41783 +               return PTR_ERR(wtbl_hdr);
41785         if (enable) {
41786                 mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
41787                 mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
41788 @@ -2742,21 +2764,8 @@ static int mt7915_load_ram(struct mt7915_dev *dev)
41790  static int mt7915_load_firmware(struct mt7915_dev *dev)
41792 +       u32 reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
41793         int ret;
41794 -       u32 val, reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
41796 -       val = FIELD_PREP(MT_TOP_MISC_FW_STATE, FW_STATE_FW_DOWNLOAD);
41798 -       if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, val, 1000)) {
41799 -               /* restart firmware once */
41800 -               __mt76_mcu_restart(&dev->mt76);
41801 -               if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE,
41802 -                                   val, 1000)) {
41803 -                       dev_err(dev->mt76.dev,
41804 -                               "Firmware is not ready for download\n");
41805 -                       return -EIO;
41806 -               }
41807 -       }
41809         ret = mt7915_load_patch(dev);
41810         if (ret)
41811 @@ -3501,9 +3510,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
41812         struct ieee80211_supported_band *sband;
41813         struct mt7915_mcu_phy_rx_info *res;
41814         struct sk_buff *skb;
41815 -       u16 flags = 0;
41816         int ret;
41817 -       int i;
41818 +       bool cck = false;
41820         ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(PHY_STAT_INFO),
41821                                         &req, sizeof(req), true, &skb);
41822 @@ -3517,48 +3525,53 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
41824         switch (res->mode) {
41825         case MT_PHY_TYPE_CCK:
41826 +               cck = true;
41827 +               fallthrough;
41828         case MT_PHY_TYPE_OFDM:
41829                 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
41830                         sband = &mphy->sband_5g.sband;
41831                 else
41832                         sband = &mphy->sband_2g.sband;
41834 -               for (i = 0; i < sband->n_bitrates; i++) {
41835 -                       if (rate->mcs != (sband->bitrates[i].hw_value & 0xf))
41836 -                               continue;
41838 -                       rate->legacy = sband->bitrates[i].bitrate;
41839 -                       break;
41840 -               }
41841 +               rate->mcs = mt76_get_rate(&dev->mt76, sband, rate->mcs, cck);
41842 +               rate->legacy = sband->bitrates[rate->mcs].bitrate;
41843                 break;
41844         case MT_PHY_TYPE_HT:
41845         case MT_PHY_TYPE_HT_GF:
41846 -               if (rate->mcs > 31)
41847 -                       return -EINVAL;
41849 -               flags |= RATE_INFO_FLAGS_MCS;
41850 +               if (rate->mcs > 31) {
41851 +                       ret = -EINVAL;
41852 +                       goto out;
41853 +               }
41855 +               rate->flags = RATE_INFO_FLAGS_MCS;
41856                 if (res->gi)
41857 -                       flags |= RATE_INFO_FLAGS_SHORT_GI;
41858 +                       rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
41859                 break;
41860         case MT_PHY_TYPE_VHT:
41861 -               flags |= RATE_INFO_FLAGS_VHT_MCS;
41862 +               if (rate->mcs > 9) {
41863 +                       ret = -EINVAL;
41864 +                       goto out;
41865 +               }
41867 +               rate->flags = RATE_INFO_FLAGS_VHT_MCS;
41868                 if (res->gi)
41869 -                       flags |= RATE_INFO_FLAGS_SHORT_GI;
41870 +                       rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
41871                 break;
41872         case MT_PHY_TYPE_HE_SU:
41873         case MT_PHY_TYPE_HE_EXT_SU:
41874         case MT_PHY_TYPE_HE_TB:
41875         case MT_PHY_TYPE_HE_MU:
41876 +               if (res->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11) {
41877 +                       ret = -EINVAL;
41878 +                       goto out;
41879 +               }
41880                 rate->he_gi = res->gi;
41882 -               flags |= RATE_INFO_FLAGS_HE_MCS;
41883 +               rate->flags = RATE_INFO_FLAGS_HE_MCS;
41884                 break;
41885         default:
41886 -               break;
41887 +               ret = -EINVAL;
41888 +               goto out;
41889         }
41890 -       rate->flags = flags;
41892         switch (res->bw) {
41893         case IEEE80211_STA_RX_BW_160:
41894 @@ -3575,7 +3588,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
41895                 break;
41896         }
41898 +out:
41899         dev_kfree_skb(skb);
41901 -       return 0;
41902 +       return ret;
41904 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
41905 index 5c7eefdf2013..1160d1bf8a7c 100644
41906 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
41907 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
41908 @@ -108,11 +108,11 @@ struct mt7915_vif {
41909  };
41911  struct mib_stats {
41912 -       u16 ack_fail_cnt;
41913 -       u16 fcs_err_cnt;
41914 -       u16 rts_cnt;
41915 -       u16 rts_retries_cnt;
41916 -       u16 ba_miss_cnt;
41917 +       u32 ack_fail_cnt;
41918 +       u32 fcs_err_cnt;
41919 +       u32 rts_cnt;
41920 +       u32 rts_retries_cnt;
41921 +       u32 ba_miss_cnt;
41922  };
41924  struct mt7915_hif {
41925 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
41926 index ed0c9a24bb53..dfb8880657bf 100644
41927 --- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
41928 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
41929 @@ -4,6 +4,11 @@
41930  #ifndef __MT7915_REGS_H
41931  #define __MT7915_REGS_H
41933 +/* MCU WFDMA0 */
41934 +#define MT_MCU_WFDMA0_BASE             0x2000
41935 +#define MT_MCU_WFDMA0(ofs)             (MT_MCU_WFDMA0_BASE + (ofs))
41936 +#define MT_MCU_WFDMA0_DUMMY_CR         MT_MCU_WFDMA0(0x120)
41938  /* MCU WFDMA1 */
41939  #define MT_MCU_WFDMA1_BASE             0x3000
41940  #define MT_MCU_WFDMA1(ofs)             (MT_MCU_WFDMA1_BASE + (ofs))
41941 @@ -396,6 +401,14 @@
41942  #define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1      BIT(1)
41943  #define MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO       BIT(2)
41945 +#define MT_TOP_RGU_BASE                                0xf0000
41946 +#define MT_TOP_PWR_CTRL                                (MT_TOP_RGU_BASE + (0x0))
41947 +#define MT_TOP_PWR_KEY                         (0x5746 << 16)
41948 +#define MT_TOP_PWR_SW_RST                      BIT(0)
41949 +#define MT_TOP_PWR_SW_PWR_ON                   GENMASK(3, 2)
41950 +#define MT_TOP_PWR_HW_CTRL                     BIT(4)
41951 +#define MT_TOP_PWR_PWR_ON                      BIT(7)
41953  #define MT_INFRA_CFG_BASE              0xf1000
41954  #define MT_INFRA(ofs)                  (MT_INFRA_CFG_BASE + (ofs))
41956 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
41957 index 0dc8e25e18e4..87a7ea12f3b3 100644
41958 --- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
41959 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
41960 @@ -9,10 +9,13 @@ mt7921_fw_debug_set(void *data, u64 val)
41962         struct mt7921_dev *dev = data;
41964 -       dev->fw_debug = (u8)val;
41965 +       mt7921_mutex_acquire(dev);
41967 +       dev->fw_debug = (u8)val;
41968         mt7921_mcu_fw_log_2_host(dev, dev->fw_debug);
41970 +       mt7921_mutex_release(dev);
41972         return 0;
41975 @@ -44,14 +47,13 @@ mt7921_ampdu_stat_read_phy(struct mt7921_phy *phy,
41976                 range[i] = mt76_rr(dev, MT_MIB_ARNG(0, i));
41978         for (i = 0; i < ARRAY_SIZE(bound); i++)
41979 -               bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1;
41980 +               bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
41982         seq_printf(file, "\nPhy0\n");
41984         seq_printf(file, "Length: %8d | ", bound[0]);
41985         for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
41986 -               seq_printf(file, "%3d -%3d | ",
41987 -                          bound[i] + 1, bound[i + 1]);
41988 +               seq_printf(file, "%3d  %3d | ", bound[i] + 1, bound[i + 1]);
41990         seq_puts(file, "\nCount:  ");
41991         for (i = 0; i < ARRAY_SIZE(bound); i++)
41992 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
41993 index 3f9097481a5e..a6d2a25b3495 100644
41994 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
41995 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
41996 @@ -400,7 +400,9 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
41998         /* RXD Group 3 - P-RXV */
41999         if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
42000 -               u32 v0, v1, v2;
42001 +               u8 stbc, gi;
42002 +               u32 v0, v1;
42003 +               bool cck;
42005                 rxv = rxd;
42006                 rxd += 2;
42007 @@ -409,7 +411,6 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
42009                 v0 = le32_to_cpu(rxv[0]);
42010                 v1 = le32_to_cpu(rxv[1]);
42011 -               v2 = le32_to_cpu(rxv[2]);
42013                 if (v0 & MT_PRXV_HT_AD_CODE)
42014                         status->enc_flags |= RX_ENC_FLAG_LDPC;
42015 @@ -429,87 +430,87 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
42016                                              status->chain_signal[i]);
42017                 }
42019 -               /* RXD Group 5 - C-RXV */
42020 -               if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
42021 -                       u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
42022 -                       u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
42023 -                       bool cck = false;
42024 +               stbc = FIELD_GET(MT_PRXV_STBC, v0);
42025 +               gi = FIELD_GET(MT_PRXV_SGI, v0);
42026 +               cck = false;
42028 -                       rxd += 18;
42029 -                       if ((u8 *)rxd - skb->data >= skb->len)
42030 -                               return -EINVAL;
42031 +               idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
42032 +               mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
42034 -                       idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
42035 -                       mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
42037 -                       switch (mode) {
42038 -                       case MT_PHY_TYPE_CCK:
42039 -                               cck = true;
42040 -                               fallthrough;
42041 -                       case MT_PHY_TYPE_OFDM:
42042 -                               i = mt76_get_rate(&dev->mt76, sband, i, cck);
42043 -                               break;
42044 -                       case MT_PHY_TYPE_HT_GF:
42045 -                       case MT_PHY_TYPE_HT:
42046 -                               status->encoding = RX_ENC_HT;
42047 -                               if (i > 31)
42048 -                                       return -EINVAL;
42049 -                               break;
42050 -                       case MT_PHY_TYPE_VHT:
42051 -                               status->nss =
42052 -                                       FIELD_GET(MT_PRXV_NSTS, v0) + 1;
42053 -                               status->encoding = RX_ENC_VHT;
42054 -                               if (i > 9)
42055 -                                       return -EINVAL;
42056 -                               break;
42057 -                       case MT_PHY_TYPE_HE_MU:
42058 -                               status->flag |= RX_FLAG_RADIOTAP_HE_MU;
42059 -                               fallthrough;
42060 -                       case MT_PHY_TYPE_HE_SU:
42061 -                       case MT_PHY_TYPE_HE_EXT_SU:
42062 -                       case MT_PHY_TYPE_HE_TB:
42063 -                               status->nss =
42064 -                                       FIELD_GET(MT_PRXV_NSTS, v0) + 1;
42065 -                               status->encoding = RX_ENC_HE;
42066 -                               status->flag |= RX_FLAG_RADIOTAP_HE;
42067 -                               i &= GENMASK(3, 0);
42069 -                               if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
42070 -                                       status->he_gi = gi;
42072 -                               status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
42073 -                               break;
42074 -                       default:
42075 +               switch (mode) {
42076 +               case MT_PHY_TYPE_CCK:
42077 +                       cck = true;
42078 +                       fallthrough;
42079 +               case MT_PHY_TYPE_OFDM:
42080 +                       i = mt76_get_rate(&dev->mt76, sband, i, cck);
42081 +                       break;
42082 +               case MT_PHY_TYPE_HT_GF:
42083 +               case MT_PHY_TYPE_HT:
42084 +                       status->encoding = RX_ENC_HT;
42085 +                       if (i > 31)
42086                                 return -EINVAL;
42087 -                       }
42088 -                       status->rate_idx = i;
42090 -                       switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
42091 -                       case IEEE80211_STA_RX_BW_20:
42092 -                               break;
42093 -                       case IEEE80211_STA_RX_BW_40:
42094 -                               if (mode & MT_PHY_TYPE_HE_EXT_SU &&
42095 -                                   (idx & MT_PRXV_TX_ER_SU_106T)) {
42096 -                                       status->bw = RATE_INFO_BW_HE_RU;
42097 -                                       status->he_ru =
42098 -                                               NL80211_RATE_INFO_HE_RU_ALLOC_106;
42099 -                               } else {
42100 -                                       status->bw = RATE_INFO_BW_40;
42101 -                               }
42102 -                               break;
42103 -                       case IEEE80211_STA_RX_BW_80:
42104 -                               status->bw = RATE_INFO_BW_80;
42105 -                               break;
42106 -                       case IEEE80211_STA_RX_BW_160:
42107 -                               status->bw = RATE_INFO_BW_160;
42108 -                               break;
42109 -                       default:
42110 +                       break;
42111 +               case MT_PHY_TYPE_VHT:
42112 +                       status->nss =
42113 +                               FIELD_GET(MT_PRXV_NSTS, v0) + 1;
42114 +                       status->encoding = RX_ENC_VHT;
42115 +                       if (i > 9)
42116                                 return -EINVAL;
42117 +                       break;
42118 +               case MT_PHY_TYPE_HE_MU:
42119 +                       status->flag |= RX_FLAG_RADIOTAP_HE_MU;
42120 +                       fallthrough;
42121 +               case MT_PHY_TYPE_HE_SU:
42122 +               case MT_PHY_TYPE_HE_EXT_SU:
42123 +               case MT_PHY_TYPE_HE_TB:
42124 +                       status->nss =
42125 +                               FIELD_GET(MT_PRXV_NSTS, v0) + 1;
42126 +                       status->encoding = RX_ENC_HE;
42127 +                       status->flag |= RX_FLAG_RADIOTAP_HE;
42128 +                       i &= GENMASK(3, 0);
42130 +                       if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
42131 +                               status->he_gi = gi;
42133 +                       status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
42134 +                       break;
42135 +               default:
42136 +                       return -EINVAL;
42137 +               }
42139 +               status->rate_idx = i;
42141 +               switch (FIELD_GET(MT_PRXV_FRAME_MODE, v0)) {
42142 +               case IEEE80211_STA_RX_BW_20:
42143 +                       break;
42144 +               case IEEE80211_STA_RX_BW_40:
42145 +                       if (mode & MT_PHY_TYPE_HE_EXT_SU &&
42146 +                           (idx & MT_PRXV_TX_ER_SU_106T)) {
42147 +                               status->bw = RATE_INFO_BW_HE_RU;
42148 +                               status->he_ru =
42149 +                                       NL80211_RATE_INFO_HE_RU_ALLOC_106;
42150 +                       } else {
42151 +                               status->bw = RATE_INFO_BW_40;
42152                         }
42153 +                       break;
42154 +               case IEEE80211_STA_RX_BW_80:
42155 +                       status->bw = RATE_INFO_BW_80;
42156 +                       break;
42157 +               case IEEE80211_STA_RX_BW_160:
42158 +                       status->bw = RATE_INFO_BW_160;
42159 +                       break;
42160 +               default:
42161 +                       return -EINVAL;
42162 +               }
42164 -                       status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
42165 -                       if (mode < MT_PHY_TYPE_HE_SU && gi)
42166 -                               status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
42167 +               status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
42168 +               if (mode < MT_PHY_TYPE_HE_SU && gi)
42169 +                       status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
42171 +               if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
42172 +                       rxd += 18;
42173 +                       if ((u8 *)rxd - skb->data >= skb->len)
42174 +                               return -EINVAL;
42175                 }
42176         }
42178 @@ -1317,31 +1318,20 @@ mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
42179         struct mib_stats *mib = &phy->mib;
42180         int i, aggr0 = 0, aggr1;
42182 -       memset(mib, 0, sizeof(*mib));
42184 -       mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(0),
42185 -                                         MT_MIB_SDR3_FCS_ERR_MASK);
42186 +       mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0),
42187 +                                          MT_MIB_SDR3_FCS_ERR_MASK);
42188 +       mib->ack_fail_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR3(0),
42189 +                                           MT_MIB_ACK_FAIL_COUNT_MASK);
42190 +       mib->ba_miss_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR2(0),
42191 +                                          MT_MIB_BA_FAIL_COUNT_MASK);
42192 +       mib->rts_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR0(0),
42193 +                                      MT_MIB_RTS_COUNT_MASK);
42194 +       mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0),
42195 +                                              MT_MIB_RTS_FAIL_COUNT_MASK);
42197         for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
42198                 u32 val, val2;
42200 -               val = mt76_rr(dev, MT_MIB_MB_SDR1(0, i));
42202 -               val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
42203 -               if (val2 > mib->ack_fail_cnt)
42204 -                       mib->ack_fail_cnt = val2;
42206 -               val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
42207 -               if (val2 > mib->ba_miss_cnt)
42208 -                       mib->ba_miss_cnt = val2;
42210 -               val = mt76_rr(dev, MT_MIB_MB_SDR0(0, i));
42211 -               val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
42212 -               if (val2 > mib->rts_retries_cnt) {
42213 -                       mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
42214 -                       mib->rts_retries_cnt = val2;
42215 -               }
42217                 val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
42218                 val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
42220 @@ -1503,8 +1493,10 @@ void mt7921_coredump_work(struct work_struct *work)
42221                         break;
42223                 skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
42224 -               if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ)
42225 -                       break;
42226 +               if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
42227 +                       dev_kfree_skb(skb);
42228 +                       continue;
42229 +               }
42231                 memcpy(data, skb->data, skb->len);
42232                 data += skb->len;
42233 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
42234 index a0c1fa0f20e4..109c8849d106 100644
42235 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
42236 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
42237 @@ -97,18 +97,24 @@ enum rx_pkt_type {
42238  #define MT_RXD3_NORMAL_PF_MODE         BIT(29)
42239  #define MT_RXD3_NORMAL_PF_STS          GENMASK(31, 30)
42241 -/* P-RXV */
42242 +/* P-RXV DW0 */
42243  #define MT_PRXV_TX_RATE                        GENMASK(6, 0)
42244  #define MT_PRXV_TX_DCM                 BIT(4)
42245  #define MT_PRXV_TX_ER_SU_106T          BIT(5)
42246  #define MT_PRXV_NSTS                   GENMASK(9, 7)
42247  #define MT_PRXV_HT_AD_CODE             BIT(11)
42248 +#define MT_PRXV_FRAME_MODE             GENMASK(14, 12)
42249 +#define MT_PRXV_SGI                    GENMASK(16, 15)
42250 +#define MT_PRXV_STBC                   GENMASK(23, 22)
42251 +#define MT_PRXV_TX_MODE                        GENMASK(27, 24)
42252  #define MT_PRXV_HE_RU_ALLOC_L          GENMASK(31, 28)
42253 -#define MT_PRXV_HE_RU_ALLOC_H          GENMASK(3, 0)
42255 +/* P-RXV DW1 */
42256  #define MT_PRXV_RCPI3                  GENMASK(31, 24)
42257  #define MT_PRXV_RCPI2                  GENMASK(23, 16)
42258  #define MT_PRXV_RCPI1                  GENMASK(15, 8)
42259  #define MT_PRXV_RCPI0                  GENMASK(7, 0)
42260 +#define MT_PRXV_HE_RU_ALLOC_H          GENMASK(3, 0)
42262  /* C-RXV */
42263  #define MT_CRXV_HT_STBC                        GENMASK(1, 0)
42264 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
42265 index 729f6c42cdde..ada943c7a950 100644
42266 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
42267 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
42268 @@ -348,6 +348,7 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
42269         if (vif == phy->monitor_vif)
42270                 phy->monitor_vif = NULL;
42272 +       mt7921_mutex_acquire(dev);
42273         mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
42275         if (dev->pm.enable) {
42276 @@ -360,7 +361,6 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
42278         rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
42280 -       mt7921_mutex_acquire(dev);
42281         dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
42282         phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
42283         mt7921_mutex_release(dev);
42284 @@ -413,7 +413,8 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
42285         struct mt7921_sta *msta = sta ? (struct mt7921_sta *)sta->drv_priv :
42286                                   &mvif->sta;
42287         struct mt76_wcid *wcid = &msta->wcid;
42288 -       int idx = key->keyidx;
42289 +       u8 *wcid_keyidx = &wcid->hw_key_idx;
42290 +       int idx = key->keyidx, err = 0;
42292         /* The hardware does not support per-STA RX GTK, fallback
42293          * to software mode for these.
42294 @@ -429,6 +430,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
42295         switch (key->cipher) {
42296         case WLAN_CIPHER_SUITE_AES_CMAC:
42297                 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
42298 +               wcid_keyidx = &wcid->hw_key_idx2;
42299                 break;
42300         case WLAN_CIPHER_SUITE_TKIP:
42301         case WLAN_CIPHER_SUITE_CCMP:
42302 @@ -443,16 +445,23 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
42303                 return -EOPNOTSUPP;
42304         }
42306 -       if (cmd == SET_KEY) {
42307 -               key->hw_key_idx = wcid->idx;
42308 -               wcid->hw_key_idx = idx;
42309 -       } else if (idx == wcid->hw_key_idx) {
42310 -               wcid->hw_key_idx = -1;
42311 -       }
42312 +       mt7921_mutex_acquire(dev);
42314 +       if (cmd == SET_KEY)
42315 +               *wcid_keyidx = idx;
42316 +       else if (idx == *wcid_keyidx)
42317 +               *wcid_keyidx = -1;
42318 +       else
42319 +               goto out;
42321         mt76_wcid_key_setup(&dev->mt76, wcid,
42322                             cmd == SET_KEY ? key : NULL);
42324 -       return mt7921_mcu_add_key(dev, vif, msta, key, cmd);
42325 +       err = mt7921_mcu_add_key(dev, vif, msta, key, cmd);
42326 +out:
42327 +       mt7921_mutex_release(dev);
42329 +       return err;
42332  static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
42333 @@ -587,6 +596,9 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
42334         if (changed & BSS_CHANGED_PS)
42335                 mt7921_mcu_uni_bss_ps(dev, vif);
42337 +       if (changed & BSS_CHANGED_ARP_FILTER)
42338 +               mt7921_mcu_update_arp_filter(hw, vif, info);
42340         mt7921_mutex_release(dev);
42343 @@ -814,11 +826,17 @@ mt7921_get_stats(struct ieee80211_hw *hw,
42344         struct mt7921_phy *phy = mt7921_hw_phy(hw);
42345         struct mib_stats *mib = &phy->mib;
42347 +       mt7921_mutex_acquire(phy->dev);
42349         stats->dot11RTSSuccessCount = mib->rts_cnt;
42350         stats->dot11RTSFailureCount = mib->rts_retries_cnt;
42351         stats->dot11FCSErrorCount = mib->fcs_err_cnt;
42352         stats->dot11ACKFailureCount = mib->ack_fail_cnt;
42354 +       memset(mib, 0, sizeof(*mib));
42356 +       mt7921_mutex_release(phy->dev);
42358         return 0;
42361 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
42362 index b5cc72e7e81c..62afbad77596 100644
42363 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
42364 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
42365 @@ -1304,3 +1304,47 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
42366                 mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
42367         }
42370 +int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw,
42371 +                                struct ieee80211_vif *vif,
42372 +                                struct ieee80211_bss_conf *info)
42374 +       struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
42375 +       struct mt7921_dev *dev = mt7921_hw_dev(hw);
42376 +       struct sk_buff *skb;
42377 +       int i, len = min_t(int, info->arp_addr_cnt,
42378 +                          IEEE80211_BSS_ARP_ADDR_LIST_LEN);
42379 +       struct {
42380 +               struct {
42381 +                       u8 bss_idx;
42382 +                       u8 pad[3];
42383 +               } __packed hdr;
42384 +               struct mt76_connac_arpns_tlv arp;
42385 +       } req_hdr = {
42386 +               .hdr = {
42387 +                       .bss_idx = mvif->mt76.idx,
42388 +               },
42389 +               .arp = {
42390 +                       .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
42391 +                       .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
42392 +                       .ips_num = len,
42393 +                       .mode = 2,  /* update */
42394 +                       .option = 1,
42395 +               },
42396 +       };
42398 +       skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
42399 +                                sizeof(req_hdr) + len * sizeof(__be32));
42400 +       if (!skb)
42401 +               return -ENOMEM;
42403 +       skb_put_data(skb, &req_hdr, sizeof(req_hdr));
42404 +       for (i = 0; i < len; i++) {
42405 +               u8 *addr = (u8 *)skb_put(skb, sizeof(__be32));
42407 +               memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
42408 +       }
42410 +       return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD_OFFLOAD,
42411 +                                    true);
42413 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
42414 index 46e6aeec35ae..25a1a6acb6ba 100644
42415 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
42416 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
42417 @@ -102,11 +102,11 @@ struct mt7921_vif {
42418  };
42420  struct mib_stats {
42421 -       u16 ack_fail_cnt;
42422 -       u16 fcs_err_cnt;
42423 -       u16 rts_cnt;
42424 -       u16 rts_retries_cnt;
42425 -       u16 ba_miss_cnt;
42426 +       u32 ack_fail_cnt;
42427 +       u32 fcs_err_cnt;
42428 +       u32 rts_cnt;
42429 +       u32 rts_retries_cnt;
42430 +       u32 ba_miss_cnt;
42431  };
42433  struct mt7921_phy {
42434 @@ -339,4 +339,7 @@ int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy,
42435                                  bool enable);
42436  void mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif);
42437  void mt7921_coredump_work(struct work_struct *work);
42438 +int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw,
42439 +                                struct ieee80211_vif *vif,
42440 +                                struct ieee80211_bss_conf *info);
42441  #endif
42442 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
42443 index 5570b4a50531..80f6f29892a4 100644
42444 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
42445 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
42446 @@ -137,7 +137,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
42448         mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
42450 -       mt7921_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
42451 +       mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
42453         ret = devm_request_irq(mdev->dev, pdev->irq, mt7921_irq_handler,
42454                                IRQF_SHARED, KBUILD_MODNAME, dev);
42455 @@ -146,10 +146,12 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
42457         ret = mt7921_register_device(dev);
42458         if (ret)
42459 -               goto err_free_dev;
42460 +               goto err_free_irq;
42462         return 0;
42464 +err_free_irq:
42465 +       devm_free_irq(&pdev->dev, pdev->irq, dev);
42466  err_free_dev:
42467         mt76_free_device(&dev->mt76);
42468  err_free_pci_vec:
42469 @@ -193,7 +195,6 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
42470         mt76_for_each_q_rx(mdev, i) {
42471                 napi_disable(&mdev->napi[i]);
42472         }
42473 -       tasklet_kill(&dev->irq_tasklet);
42475         pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
42477 @@ -208,13 +209,16 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
42479         /* disable interrupt */
42480         mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
42481 +       mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
42482 +       synchronize_irq(pdev->irq);
42483 +       tasklet_kill(&dev->irq_tasklet);
42485 -       pci_save_state(pdev);
42486 -       err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
42487 +       err = mt7921_mcu_fw_pmctrl(dev);
42488         if (err)
42489                 goto restore;
42491 -       err = mt7921_mcu_drv_pmctrl(dev);
42492 +       pci_save_state(pdev);
42493 +       err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
42494         if (err)
42495                 goto restore;
42497 @@ -237,18 +241,18 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
42498         struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
42499         int i, err;
42501 -       err = mt7921_mcu_fw_pmctrl(dev);
42502 -       if (err < 0)
42503 -               return err;
42505         err = pci_set_power_state(pdev, PCI_D0);
42506         if (err)
42507                 return err;
42509         pci_restore_state(pdev);
42511 +       err = mt7921_mcu_drv_pmctrl(dev);
42512 +       if (err < 0)
42513 +               return err;
42515         /* enable interrupt */
42516 -       mt7921_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
42517 +       mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
42518         mt7921_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
42519                           MT_INT_MCU_CMD);
42521 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
42522 index 6dad7f6ab09d..73878d3e2495 100644
42523 --- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
42524 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
42525 @@ -96,8 +96,8 @@
42526  #define MT_WF_MIB_BASE(_band)          ((_band) ? 0xa4800 : 0x24800)
42527  #define MT_WF_MIB(_band, ofs)          (MT_WF_MIB_BASE(_band) + (ofs))
42529 -#define MT_MIB_SDR3(_band)             MT_WF_MIB(_band, 0x014)
42530 -#define MT_MIB_SDR3_FCS_ERR_MASK       GENMASK(15, 0)
42531 +#define MT_MIB_SDR3(_band)             MT_WF_MIB(_band, 0x698)
42532 +#define MT_MIB_SDR3_FCS_ERR_MASK       GENMASK(31, 16)
42534  #define MT_MIB_SDR9(_band)             MT_WF_MIB(_band, 0x02c)
42535  #define MT_MIB_SDR9_BUSY_MASK          GENMASK(23, 0)
42536 @@ -121,16 +121,21 @@
42537  #define MT_MIB_RTS_RETRIES_COUNT_MASK  GENMASK(31, 16)
42538  #define MT_MIB_RTS_COUNT_MASK          GENMASK(15, 0)
42540 -#define MT_MIB_MB_SDR1(_band, n)       MT_WF_MIB(_band, 0x104 + ((n) << 4))
42541 -#define MT_MIB_BA_MISS_COUNT_MASK      GENMASK(15, 0)
42542 -#define MT_MIB_ACK_FAIL_COUNT_MASK     GENMASK(31, 16)
42543 +#define MT_MIB_MB_BSDR0(_band)         MT_WF_MIB(_band, 0x688)
42544 +#define MT_MIB_RTS_COUNT_MASK          GENMASK(15, 0)
42545 +#define MT_MIB_MB_BSDR1(_band)         MT_WF_MIB(_band, 0x690)
42546 +#define MT_MIB_RTS_FAIL_COUNT_MASK     GENMASK(15, 0)
42547 +#define MT_MIB_MB_BSDR2(_band)         MT_WF_MIB(_band, 0x518)
42548 +#define MT_MIB_BA_FAIL_COUNT_MASK      GENMASK(15, 0)
42549 +#define MT_MIB_MB_BSDR3(_band)         MT_WF_MIB(_band, 0x520)
42550 +#define MT_MIB_ACK_FAIL_COUNT_MASK     GENMASK(15, 0)
42552  #define MT_MIB_MB_SDR2(_band, n)       MT_WF_MIB(_band, 0x108 + ((n) << 4))
42553  #define MT_MIB_FRAME_RETRIES_COUNT_MASK        GENMASK(15, 0)
42555 -#define MT_TX_AGG_CNT(_band, n)                MT_WF_MIB(_band, 0x0a8 + ((n) << 2))
42556 -#define MT_TX_AGG_CNT2(_band, n)       MT_WF_MIB(_band, 0x164 + ((n) << 2))
42557 -#define MT_MIB_ARNG(_band, n)          MT_WF_MIB(_band, 0x4b8 + ((n) << 2))
42558 +#define MT_TX_AGG_CNT(_band, n)                MT_WF_MIB(_band, 0x7dc + ((n) << 2))
42559 +#define MT_TX_AGG_CNT2(_band, n)       MT_WF_MIB(_band, 0x7ec + ((n) << 2))
42560 +#define MT_MIB_ARNG(_band, n)          MT_WF_MIB(_band, 0x0b0 + ((n) << 2))
42561  #define MT_MIB_ARNCR_RANGE(val, n)     (((val) >> ((n) << 3)) & GENMASK(7, 0))
42563  #define MT_WTBLON_TOP_BASE             0x34000
42564 @@ -357,11 +362,11 @@
42565  #define MT_INFRA_CFG_BASE              0xfe000
42566  #define MT_INFRA(ofs)                  (MT_INFRA_CFG_BASE + (ofs))
42568 -#define MT_HIF_REMAP_L1                        MT_INFRA(0x260)
42569 +#define MT_HIF_REMAP_L1                        MT_INFRA(0x24c)
42570  #define MT_HIF_REMAP_L1_MASK           GENMASK(15, 0)
42571  #define MT_HIF_REMAP_L1_OFFSET         GENMASK(15, 0)
42572  #define MT_HIF_REMAP_L1_BASE           GENMASK(31, 16)
42573 -#define MT_HIF_REMAP_BASE_L1           0xe0000
42574 +#define MT_HIF_REMAP_BASE_L1           0x40000
42576  #define MT_SWDEF_BASE                  0x41f200
42577  #define MT_SWDEF(ofs)                  (MT_SWDEF_BASE + (ofs))
42578 @@ -384,7 +389,7 @@
42579  #define MT_HW_CHIPID                   0x70010200
42580  #define MT_HW_REV                      0x70010204
42582 -#define MT_PCIE_MAC_BASE               0x74030000
42583 +#define MT_PCIE_MAC_BASE               0x10000
42584  #define MT_PCIE_MAC(ofs)               (MT_PCIE_MAC_BASE + (ofs))
42585  #define MT_PCIE_MAC_INT_ENABLE         MT_PCIE_MAC(0x188)
42587 diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
42588 index 0b6facb17ff7..a18d2896ee1f 100644
42589 --- a/drivers/net/wireless/mediatek/mt76/sdio.c
42590 +++ b/drivers/net/wireless/mediatek/mt76/sdio.c
42591 @@ -256,6 +256,9 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
42593         q->entry[q->head].skb = tx_info.skb;
42594         q->entry[q->head].buf_sz = len;
42596 +       smp_wmb();
42598         q->head = (q->head + 1) % q->ndesc;
42599         q->queued++;
42601 diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
42602 index b8fe8adc43a3..451ed60c6296 100644
42603 --- a/drivers/net/wireless/mediatek/mt76/tx.c
42604 +++ b/drivers/net/wireless/mediatek/mt76/tx.c
42605 @@ -461,11 +461,11 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
42606         int ret = 0;
42608         while (1) {
42609 +               int n_frames = 0;
42611                 if (test_bit(MT76_STATE_PM, &phy->state) ||
42612 -                   test_bit(MT76_RESET, &phy->state)) {
42613 -                       ret = -EBUSY;
42614 -                       break;
42615 -               }
42616 +                   test_bit(MT76_RESET, &phy->state))
42617 +                       return -EBUSY;
42619                 if (dev->queue_ops->tx_cleanup &&
42620                     q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
42621 @@ -497,11 +497,16 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
42622                 }
42624                 if (!mt76_txq_stopped(q))
42625 -                       ret += mt76_txq_send_burst(phy, q, mtxq);
42626 +                       n_frames = mt76_txq_send_burst(phy, q, mtxq);
42628                 spin_unlock_bh(&q->lock);
42630                 ieee80211_return_txq(phy->hw, txq, false);
42632 +               if (unlikely(n_frames < 0))
42633 +                       return n_frames;
42635 +               ret += n_frames;
42636         }
42638         return ret;
42639 diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
42640 index c868582c5d22..aa3b64902cf9 100644
42641 --- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c
42642 +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
42643 @@ -99,7 +99,7 @@ mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom)
42645         u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
42647 -       return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
42648 +       return (u16)~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
42651  static void
42652 diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
42653 index 1b205e7d97a8..37f40039e4ca 100644
42654 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c
42655 +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
42656 @@ -575,7 +575,6 @@ static int wilc_mac_open(struct net_device *ndev)
42658         struct wilc_vif *vif = netdev_priv(ndev);
42659         struct wilc *wl = vif->wilc;
42660 -       unsigned char mac_add[ETH_ALEN] = {0};
42661         int ret = 0;
42662         struct mgmt_frame_regs mgmt_regs = {};
42664 @@ -598,9 +597,12 @@ static int wilc_mac_open(struct net_device *ndev)
42666         wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype,
42667                                 vif->idx);
42668 -       wilc_get_mac_address(vif, mac_add);
42669 -       netdev_dbg(ndev, "Mac address: %pM\n", mac_add);
42670 -       ether_addr_copy(ndev->dev_addr, mac_add);
42672 +       if (is_valid_ether_addr(ndev->dev_addr))
42673 +               wilc_set_mac_address(vif, ndev->dev_addr);
42674 +       else
42675 +               wilc_get_mac_address(vif, ndev->dev_addr);
42676 +       netdev_dbg(ndev, "Mac address: %pM\n", ndev->dev_addr);
42678         if (!is_valid_ether_addr(ndev->dev_addr)) {
42679                 netdev_err(ndev, "Wrong MAC address\n");
42680 @@ -639,7 +641,14 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
42681         int srcu_idx;
42683         if (!is_valid_ether_addr(addr->sa_data))
42684 -               return -EINVAL;
42685 +               return -EADDRNOTAVAIL;
42687 +       if (!vif->mac_opened) {
42688 +               eth_commit_mac_addr_change(dev, p);
42689 +               return 0;
42690 +       }
42692 +       /* Verify MAC Address is not already in use: */
42694         srcu_idx = srcu_read_lock(&wilc->srcu);
42695         list_for_each_entry_rcu(tmp_vif, &wilc->vif_list, list) {
42696 @@ -647,7 +656,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
42697                 if (ether_addr_equal(addr->sa_data, mac_addr)) {
42698                         if (vif != tmp_vif) {
42699                                 srcu_read_unlock(&wilc->srcu, srcu_idx);
42700 -                               return -EINVAL;
42701 +                               return -EADDRNOTAVAIL;
42702                         }
42703                         srcu_read_unlock(&wilc->srcu, srcu_idx);
42704                         return 0;
42705 @@ -659,9 +668,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
42706         if (result)
42707                 return result;
42709 -       ether_addr_copy(vif->bssid, addr->sa_data);
42710 -       ether_addr_copy(vif->ndev->dev_addr, addr->sa_data);
42712 +       eth_commit_mac_addr_change(dev, p);
42713         return result;
42716 diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
42717 index 351ff909ab1c..e14b9fc2c67a 100644
42718 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c
42719 +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
42720 @@ -947,7 +947,7 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
42721                         for (i = 0; (i < 3) && (nint > 0); i++, nint--)
42722                                 reg |= BIT(i);
42724 -                       ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
42725 +                       ret = wilc_sdio_write_reg(wilc, WILC_INTR2_ENABLE, reg);
42726                         if (ret) {
42727                                 dev_err(&func->dev,
42728                                         "Failed write reg (%08x)...\n",
42729 diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
42730 index c775c177933b..8dc80574d08d 100644
42731 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c
42732 +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
42733 @@ -570,8 +570,10 @@ qtnf_event_handle_external_auth(struct qtnf_vif *vif,
42734                 return 0;
42736         if (ev->ssid_len) {
42737 -               memcpy(auth.ssid.ssid, ev->ssid, ev->ssid_len);
42738 -               auth.ssid.ssid_len = ev->ssid_len;
42739 +               int len = clamp_val(ev->ssid_len, 0, IEEE80211_MAX_SSID_LEN);
42741 +               memcpy(auth.ssid.ssid, ev->ssid, len);
42742 +               auth.ssid.ssid_len = len;
42743         }
42745         auth.key_mgmt_suite = le32_to_cpu(ev->akm_suite);
42746 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
42747 index 27c8a5d96520..fcaaf664cbec 100644
42748 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
42749 +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
42750 @@ -249,7 +249,7 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
42751         0x824, 0x00030FE0,
42752         0x828, 0x00000000,
42753         0x82C, 0x002081DD,
42754 -       0x830, 0x2AAA8E24,
42755 +       0x830, 0x2AAAEEC8,
42756         0x834, 0x0037A706,
42757         0x838, 0x06489B44,
42758         0x83C, 0x0000095B,
42759 @@ -324,10 +324,10 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
42760         0x9D8, 0x00000000,
42761         0x9DC, 0x00000000,
42762         0x9E0, 0x00005D00,
42763 -       0x9E4, 0x00000002,
42764 +       0x9E4, 0x00000003,
42765         0x9E8, 0x00000001,
42766         0xA00, 0x00D047C8,
42767 -       0xA04, 0x01FF000C,
42768 +       0xA04, 0x01FF800C,
42769         0xA08, 0x8C8A8300,
42770         0xA0C, 0x2E68000F,
42771         0xA10, 0x9500BB78,
42772 @@ -1320,7 +1320,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
42773                 0x083, 0x00021800,
42774                 0x084, 0x00028000,
42775                 0x085, 0x00048000,
42776 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
42777 +               0x086, 0x0009483A,
42778 +       0xA0000000,     0x00000000,
42779                 0x086, 0x00094838,
42780 +       0xB0000000,     0x00000000,
42781                 0x087, 0x00044980,
42782                 0x088, 0x00048000,
42783                 0x089, 0x0000D480,
42784 @@ -1409,36 +1413,32 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
42785                 0x03C, 0x000CA000,
42786                 0x0EF, 0x00000000,
42787                 0x0EF, 0x00001100,
42788 -       0xFF0F0104, 0xABCD,
42789 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
42790                 0x034, 0x0004ADF3,
42791                 0x034, 0x00049DF0,
42792 -       0xFF0F0204, 0xCDEF,
42793 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
42794                 0x034, 0x0004ADF3,
42795                 0x034, 0x00049DF0,
42796 -       0xFF0F0404, 0xCDEF,
42797 -               0x034, 0x0004ADF3,
42798 -               0x034, 0x00049DF0,
42799 -       0xFF0F0200, 0xCDEF,
42800 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
42801                 0x034, 0x0004ADF5,
42802                 0x034, 0x00049DF2,
42803 -       0xFF0F02C0, 0xCDEF,
42804 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
42805 +               0x034, 0x0004A0F3,
42806 +               0x034, 0x000490B1,
42807 +               0x9000040c,     0x00000000,     0x40000000,     0x00000000,
42808                 0x034, 0x0004A0F3,
42809                 0x034, 0x000490B1,
42810 -       0xCDCDCDCD, 0xCDCD,
42811 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
42812 +               0x034, 0x0004ADF5,
42813 +               0x034, 0x00049DF2,
42814 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
42815 +               0x034, 0x0004ADF3,
42816 +               0x034, 0x00049DF0,
42817 +       0xA0000000,     0x00000000,
42818                 0x034, 0x0004ADF7,
42819                 0x034, 0x00049DF3,
42820 -       0xFF0F0104, 0xDEAD,
42821 -       0xFF0F0104, 0xABCD,
42822 -               0x034, 0x00048DED,
42823 -               0x034, 0x00047DEA,
42824 -               0x034, 0x00046DE7,
42825 -               0x034, 0x00045CE9,
42826 -               0x034, 0x00044CE6,
42827 -               0x034, 0x000438C6,
42828 -               0x034, 0x00042886,
42829 -               0x034, 0x00041486,
42830 -               0x034, 0x00040447,
42831 -       0xFF0F0204, 0xCDEF,
42832 +       0xB0000000,     0x00000000,
42833 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
42834                 0x034, 0x00048DED,
42835                 0x034, 0x00047DEA,
42836                 0x034, 0x00046DE7,
42837 @@ -1448,7 +1448,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
42838                 0x034, 0x00042886,
42839                 0x034, 0x00041486,
42840                 0x034, 0x00040447,
42841 -       0xFF0F0404, 0xCDEF,
42842 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
42843                 0x034, 0x00048DED,
42844                 0x034, 0x00047DEA,
42845                 0x034, 0x00046DE7,
42846 @@ -1458,7 +1458,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
42847                 0x034, 0x00042886,
42848                 0x034, 0x00041486,
42849                 0x034, 0x00040447,
42850 -       0xFF0F02C0, 0xCDEF,
42851 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
42852 +               0x034, 0x000480AE,
42853 +               0x034, 0x000470AB,
42854 +               0x034, 0x0004608B,
42855 +               0x034, 0x00045069,
42856 +               0x034, 0x00044048,
42857 +               0x034, 0x00043045,
42858 +               0x034, 0x00042026,
42859 +               0x034, 0x00041023,
42860 +               0x034, 0x00040002,
42861 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
42862                 0x034, 0x000480AE,
42863                 0x034, 0x000470AB,
42864                 0x034, 0x0004608B,
42865 @@ -1468,7 +1478,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
42866                 0x034, 0x00042026,
42867                 0x034, 0x00041023,
42868                 0x034, 0x00040002,
42869 -       0xCDCDCDCD, 0xCDCD,
42870 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
42871 +               0x034, 0x00048DED,
42872 +               0x034, 0x00047DEA,
42873 +               0x034, 0x00046DE7,
42874 +               0x034, 0x00045CE9,
42875 +               0x034, 0x00044CE6,
42876 +               0x034, 0x000438C6,
42877 +               0x034, 0x00042886,
42878 +               0x034, 0x00041486,
42879 +               0x034, 0x00040447,
42880 +       0xA0000000,     0x00000000,
42881                 0x034, 0x00048DEF,
42882                 0x034, 0x00047DEC,
42883                 0x034, 0x00046DE9,
42884 @@ -1478,38 +1498,36 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
42885                 0x034, 0x0004248A,
42886                 0x034, 0x0004108D,
42887                 0x034, 0x0004008A,
42888 -       0xFF0F0104, 0xDEAD,
42889 -       0xFF0F0200, 0xABCD,
42890 +       0xB0000000,     0x00000000,
42891 +       0x80000210,     0x00000000,     0x40000000,     0x00000000,
42892                 0x034, 0x0002ADF4,
42893 -       0xFF0F02C0, 0xCDEF,
42894 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
42895 +               0x034, 0x0002A0F3,
42896 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
42897                 0x034, 0x0002A0F3,
42898 -       0xCDCDCDCD, 0xCDCD,
42899 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
42900 +               0x034, 0x0002ADF4,
42901 +       0xA0000000,     0x00000000,
42902                 0x034, 0x0002ADF7,
42903 -       0xFF0F0200, 0xDEAD,
42904 -       0xFF0F0104, 0xABCD,
42905 -               0x034, 0x00029DF4,
42906 -       0xFF0F0204, 0xCDEF,
42907 +       0xB0000000,     0x00000000,
42908 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
42909                 0x034, 0x00029DF4,
42910 -       0xFF0F0404, 0xCDEF,
42911 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
42912                 0x034, 0x00029DF4,
42913 -       0xFF0F0200, 0xCDEF,
42914 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
42915                 0x034, 0x00029DF1,
42916 -       0xFF0F02C0, 0xCDEF,
42917 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
42918 +               0x034, 0x000290F0,
42919 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
42920                 0x034, 0x000290F0,
42921 -       0xCDCDCDCD, 0xCDCD,
42922 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
42923 +               0x034, 0x00029DF1,
42924 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
42925 +               0x034, 0x00029DF4,
42926 +       0xA0000000,     0x00000000,
42927                 0x034, 0x00029DF2,
42928 -       0xFF0F0104, 0xDEAD,
42929 -       0xFF0F0104, 0xABCD,
42930 -               0x034, 0x00028DF1,
42931 -               0x034, 0x00027DEE,
42932 -               0x034, 0x00026DEB,
42933 -               0x034, 0x00025CEC,
42934 -               0x034, 0x00024CE9,
42935 -               0x034, 0x000238CA,
42936 -               0x034, 0x00022889,
42937 -               0x034, 0x00021489,
42938 -               0x034, 0x0002044A,
42939 -       0xFF0F0204, 0xCDEF,
42940 +       0xB0000000,     0x00000000,
42941 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
42942                 0x034, 0x00028DF1,
42943                 0x034, 0x00027DEE,
42944                 0x034, 0x00026DEB,
42945 @@ -1519,7 +1537,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
42946                 0x034, 0x00022889,
42947                 0x034, 0x00021489,
42948                 0x034, 0x0002044A,
42949 -       0xFF0F0404, 0xCDEF,
42950 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
42951                 0x034, 0x00028DF1,
42952                 0x034, 0x00027DEE,
42953                 0x034, 0x00026DEB,
42954 @@ -1529,7 +1547,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
42955                 0x034, 0x00022889,
42956                 0x034, 0x00021489,
42957                 0x034, 0x0002044A,
42958 -       0xFF0F02C0, 0xCDEF,
42959 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
42960                 0x034, 0x000280AF,
42961                 0x034, 0x000270AC,
42962                 0x034, 0x0002608B,
42963 @@ -1539,7 +1557,27 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
42964                 0x034, 0x00022026,
42965                 0x034, 0x00021023,
42966                 0x034, 0x00020002,
42967 -       0xCDCDCDCD, 0xCDCD,
42968 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
42969 +               0x034, 0x000280AF,
42970 +               0x034, 0x000270AC,
42971 +               0x034, 0x0002608B,
42972 +               0x034, 0x00025069,
42973 +               0x034, 0x00024048,
42974 +               0x034, 0x00023045,
42975 +               0x034, 0x00022026,
42976 +               0x034, 0x00021023,
42977 +               0x034, 0x00020002,
42978 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
42979 +               0x034, 0x00028DF1,
42980 +               0x034, 0x00027DEE,
42981 +               0x034, 0x00026DEB,
42982 +               0x034, 0x00025CEC,
42983 +               0x034, 0x00024CE9,
42984 +               0x034, 0x000238CA,
42985 +               0x034, 0x00022889,
42986 +               0x034, 0x00021489,
42987 +               0x034, 0x0002044A,
42988 +       0xA0000000,     0x00000000,
42989                 0x034, 0x00028DEE,
42990                 0x034, 0x00027DEB,
42991                 0x034, 0x00026CCD,
42992 @@ -1549,27 +1587,24 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
42993                 0x034, 0x00022849,
42994                 0x034, 0x00021449,
42995                 0x034, 0x0002004D,
42996 -       0xFF0F0104, 0xDEAD,
42997 -       0xFF0F02C0, 0xABCD,
42998 +       0xB0000000,     0x00000000,
42999 +       0x8000020c,     0x00000000,     0x40000000,     0x00000000,
43000 +               0x034, 0x0000A0D7,
43001 +               0x034, 0x000090D3,
43002 +               0x034, 0x000080B1,
43003 +               0x034, 0x000070AE,
43004 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
43005                 0x034, 0x0000A0D7,
43006                 0x034, 0x000090D3,
43007                 0x034, 0x000080B1,
43008                 0x034, 0x000070AE,
43009 -       0xCDCDCDCD, 0xCDCD,
43010 +       0xA0000000,     0x00000000,
43011                 0x034, 0x0000ADF7,
43012                 0x034, 0x00009DF4,
43013                 0x034, 0x00008DF1,
43014                 0x034, 0x00007DEE,
43015 -       0xFF0F02C0, 0xDEAD,
43016 -       0xFF0F0104, 0xABCD,
43017 -               0x034, 0x00006DEB,
43018 -               0x034, 0x00005CEC,
43019 -               0x034, 0x00004CE9,
43020 -               0x034, 0x000038CA,
43021 -               0x034, 0x00002889,
43022 -               0x034, 0x00001489,
43023 -               0x034, 0x0000044A,
43024 -       0xFF0F0204, 0xCDEF,
43025 +       0xB0000000,     0x00000000,
43026 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
43027                 0x034, 0x00006DEB,
43028                 0x034, 0x00005CEC,
43029                 0x034, 0x00004CE9,
43030 @@ -1577,7 +1612,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43031                 0x034, 0x00002889,
43032                 0x034, 0x00001489,
43033                 0x034, 0x0000044A,
43034 -       0xFF0F0404, 0xCDEF,
43035 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
43036                 0x034, 0x00006DEB,
43037                 0x034, 0x00005CEC,
43038                 0x034, 0x00004CE9,
43039 @@ -1585,7 +1620,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43040                 0x034, 0x00002889,
43041                 0x034, 0x00001489,
43042                 0x034, 0x0000044A,
43043 -       0xFF0F02C0, 0xCDEF,
43044 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
43045                 0x034, 0x0000608D,
43046                 0x034, 0x0000506B,
43047                 0x034, 0x0000404A,
43048 @@ -1593,7 +1628,23 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43049                 0x034, 0x00002044,
43050                 0x034, 0x00001025,
43051                 0x034, 0x00000004,
43052 -       0xCDCDCDCD, 0xCDCD,
43053 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
43054 +               0x034, 0x0000608D,
43055 +               0x034, 0x0000506B,
43056 +               0x034, 0x0000404A,
43057 +               0x034, 0x00003047,
43058 +               0x034, 0x00002044,
43059 +               0x034, 0x00001025,
43060 +               0x034, 0x00000004,
43061 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
43062 +               0x034, 0x00006DEB,
43063 +               0x034, 0x00005CEC,
43064 +               0x034, 0x00004CE9,
43065 +               0x034, 0x000038CA,
43066 +               0x034, 0x00002889,
43067 +               0x034, 0x00001489,
43068 +               0x034, 0x0000044A,
43069 +       0xA0000000,     0x00000000,
43070                 0x034, 0x00006DCD,
43071                 0x034, 0x00005CCD,
43072                 0x034, 0x00004CCA,
43073 @@ -1601,11 +1652,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43074                 0x034, 0x00002888,
43075                 0x034, 0x00001488,
43076                 0x034, 0x00000486,
43077 -       0xFF0F0104, 0xDEAD,
43078 +       0xB0000000,     0x00000000,
43079                 0x0EF, 0x00000000,
43080                 0x018, 0x0001712A,
43081                 0x0EF, 0x00000040,
43082 -       0xFF0F0104, 0xABCD,
43083 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
43084                 0x035, 0x00000187,
43085                 0x035, 0x00008187,
43086                 0x035, 0x00010187,
43087 @@ -1615,7 +1666,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43088                 0x035, 0x00040188,
43089                 0x035, 0x00048188,
43090                 0x035, 0x00050188,
43091 -       0xFF0F0204, 0xCDEF,
43092 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
43093                 0x035, 0x00000187,
43094                 0x035, 0x00008187,
43095                 0x035, 0x00010187,
43096 @@ -1625,7 +1676,37 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43097                 0x035, 0x00040188,
43098                 0x035, 0x00048188,
43099                 0x035, 0x00050188,
43100 -       0xFF0F0404, 0xCDEF,
43101 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
43102 +               0x035, 0x00000128,
43103 +               0x035, 0x00008128,
43104 +               0x035, 0x00010128,
43105 +               0x035, 0x000201C8,
43106 +               0x035, 0x000281C8,
43107 +               0x035, 0x000301C8,
43108 +               0x035, 0x000401C8,
43109 +               0x035, 0x000481C8,
43110 +               0x035, 0x000501C8,
43111 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
43112 +               0x035, 0x00000145,
43113 +               0x035, 0x00008145,
43114 +               0x035, 0x00010145,
43115 +               0x035, 0x00020196,
43116 +               0x035, 0x00028196,
43117 +               0x035, 0x00030196,
43118 +               0x035, 0x000401C7,
43119 +               0x035, 0x000481C7,
43120 +               0x035, 0x000501C7,
43121 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
43122 +               0x035, 0x00000128,
43123 +               0x035, 0x00008128,
43124 +               0x035, 0x00010128,
43125 +               0x035, 0x000201C8,
43126 +               0x035, 0x000281C8,
43127 +               0x035, 0x000301C8,
43128 +               0x035, 0x000401C8,
43129 +               0x035, 0x000481C8,
43130 +               0x035, 0x000501C8,
43131 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
43132                 0x035, 0x00000187,
43133                 0x035, 0x00008187,
43134                 0x035, 0x00010187,
43135 @@ -1635,7 +1716,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43136                 0x035, 0x00040188,
43137                 0x035, 0x00048188,
43138                 0x035, 0x00050188,
43139 -       0xCDCDCDCD, 0xCDCD,
43140 +       0xA0000000,     0x00000000,
43141                 0x035, 0x00000145,
43142                 0x035, 0x00008145,
43143                 0x035, 0x00010145,
43144 @@ -1645,11 +1726,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43145                 0x035, 0x000401C7,
43146                 0x035, 0x000481C7,
43147                 0x035, 0x000501C7,
43148 -       0xFF0F0104, 0xDEAD,
43149 +       0xB0000000,     0x00000000,
43150                 0x0EF, 0x00000000,
43151                 0x018, 0x0001712A,
43152                 0x0EF, 0x00000010,
43153 -       0xFF0F0104, 0xABCD,
43154 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
43155                 0x036, 0x00085733,
43156                 0x036, 0x0008D733,
43157                 0x036, 0x00095733,
43158 @@ -1662,7 +1743,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43159                 0x036, 0x000CE4B4,
43160                 0x036, 0x000D64B4,
43161                 0x036, 0x000DE4B4,
43162 -       0xFF0F0204, 0xCDEF,
43163 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
43164                 0x036, 0x00085733,
43165                 0x036, 0x0008D733,
43166                 0x036, 0x00095733,
43167 @@ -1675,7 +1756,46 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43168                 0x036, 0x000CE4B4,
43169                 0x036, 0x000D64B4,
43170                 0x036, 0x000DE4B4,
43171 -       0xFF0F0404, 0xCDEF,
43172 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
43173 +               0x036, 0x000063B5,
43174 +               0x036, 0x0000E3B5,
43175 +               0x036, 0x000163B5,
43176 +               0x036, 0x0001E3B5,
43177 +               0x036, 0x000263B5,
43178 +               0x036, 0x0002E3B5,
43179 +               0x036, 0x000363B5,
43180 +               0x036, 0x0003E3B5,
43181 +               0x036, 0x000463B5,
43182 +               0x036, 0x0004E3B5,
43183 +               0x036, 0x000563B5,
43184 +               0x036, 0x0005E3B5,
43185 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
43186 +               0x036, 0x000056B3,
43187 +               0x036, 0x0000D6B3,
43188 +               0x036, 0x000156B3,
43189 +               0x036, 0x0001D6B3,
43190 +               0x036, 0x00026634,
43191 +               0x036, 0x0002E634,
43192 +               0x036, 0x00036634,
43193 +               0x036, 0x0003E634,
43194 +               0x036, 0x000467B4,
43195 +               0x036, 0x0004E7B4,
43196 +               0x036, 0x000567B4,
43197 +               0x036, 0x0005E7B4,
43198 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
43199 +               0x036, 0x000063B5,
43200 +               0x036, 0x0000E3B5,
43201 +               0x036, 0x000163B5,
43202 +               0x036, 0x0001E3B5,
43203 +               0x036, 0x000263B5,
43204 +               0x036, 0x0002E3B5,
43205 +               0x036, 0x000363B5,
43206 +               0x036, 0x0003E3B5,
43207 +               0x036, 0x000463B5,
43208 +               0x036, 0x0004E3B5,
43209 +               0x036, 0x000563B5,
43210 +               0x036, 0x0005E3B5,
43211 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
43212                 0x036, 0x00085733,
43213                 0x036, 0x0008D733,
43214                 0x036, 0x00095733,
43215 @@ -1688,7 +1808,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43216                 0x036, 0x000CE4B4,
43217                 0x036, 0x000D64B4,
43218                 0x036, 0x000DE4B4,
43219 -       0xCDCDCDCD, 0xCDCD,
43220 +       0xA0000000,     0x00000000,
43221                 0x036, 0x000056B3,
43222                 0x036, 0x0000D6B3,
43223                 0x036, 0x000156B3,
43224 @@ -1701,103 +1821,162 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43225                 0x036, 0x0004E7B4,
43226                 0x036, 0x000567B4,
43227                 0x036, 0x0005E7B4,
43228 -       0xFF0F0104, 0xDEAD,
43229 +       0xB0000000,     0x00000000,
43230                 0x0EF, 0x00000000,
43231                 0x0EF, 0x00000008,
43232 -       0xFF0F0104, 0xABCD,
43233 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
43234                 0x03C, 0x000001C8,
43235                 0x03C, 0x00000492,
43236 -       0xFF0F0204, 0xCDEF,
43237 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
43238                 0x03C, 0x000001C8,
43239                 0x03C, 0x00000492,
43240 -       0xFF0F0404, 0xCDEF,
43241 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
43242 +               0x03C, 0x000001B6,
43243 +               0x03C, 0x00000492,
43244 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
43245 +               0x03C, 0x0000022A,
43246 +               0x03C, 0x00000594,
43247 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
43248 +               0x03C, 0x000001B6,
43249 +               0x03C, 0x00000492,
43250 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
43251                 0x03C, 0x000001C8,
43252                 0x03C, 0x00000492,
43253 -       0xCDCDCDCD, 0xCDCD,
43254 +       0xA0000000,     0x00000000,
43255                 0x03C, 0x0000022A,
43256                 0x03C, 0x00000594,
43257 -       0xFF0F0104, 0xDEAD,
43258 -       0xFF0F0104, 0xABCD,
43259 +       0xB0000000,     0x00000000,
43260 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
43261                 0x03C, 0x00000800,
43262 -       0xFF0F0204, 0xCDEF,
43263 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
43264                 0x03C, 0x00000800,
43265 -       0xFF0F0404, 0xCDEF,
43266 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
43267                 0x03C, 0x00000800,
43268 -       0xFF0F02C0, 0xCDEF,
43269 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
43270                 0x03C, 0x00000820,
43271 -       0xCDCDCDCD, 0xCDCD,
43272 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
43273 +               0x03C, 0x00000820,
43274 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
43275 +               0x03C, 0x00000800,
43276 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
43277 +               0x03C, 0x00000800,
43278 +       0xA0000000,     0x00000000,
43279                 0x03C, 0x00000900,
43280 -       0xFF0F0104, 0xDEAD,
43281 +       0xB0000000,     0x00000000,
43282                 0x0EF, 0x00000000,
43283                 0x018, 0x0001712A,
43284                 0x0EF, 0x00000002,
43285 -       0xFF0F0104, 0xABCD,
43286 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
43287                 0x008, 0x0004E400,
43288 -       0xFF0F0204, 0xCDEF,
43289 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
43290                 0x008, 0x0004E400,
43291 -       0xFF0F0404, 0xCDEF,
43292 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
43293 +               0x008, 0x00002000,
43294 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
43295 +               0x008, 0x00002000,
43296 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
43297 +               0x008, 0x00002000,
43298 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
43299 +               0x008, 0x00002000,
43300 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
43301                 0x008, 0x0004E400,
43302 -       0xCDCDCDCD, 0xCDCD,
43303 +       0xA0000000,     0x00000000,
43304                 0x008, 0x00002000,
43305 -       0xFF0F0104, 0xDEAD,
43306 +       0xB0000000,     0x00000000,
43307                 0x0EF, 0x00000000,
43308                 0x0DF, 0x000000C0,
43309 -               0x01F, 0x00040064,
43310 -       0xFF0F0104, 0xABCD,
43311 +               0x01F, 0x00000064,
43312 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
43313                 0x058, 0x000A7284,
43314                 0x059, 0x000600EC,
43315 -       0xFF0F0204, 0xCDEF,
43316 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
43317                 0x058, 0x000A7284,
43318                 0x059, 0x000600EC,
43319 -       0xFF0F0404, 0xCDEF,
43320 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
43321 +               0x058, 0x00081184,
43322 +               0x059, 0x0006016C,
43323 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
43324 +               0x058, 0x00081184,
43325 +               0x059, 0x0006016C,
43326 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
43327 +               0x058, 0x00081184,
43328 +               0x059, 0x0006016C,
43329 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
43330                 0x058, 0x000A7284,
43331                 0x059, 0x000600EC,
43332 -       0xCDCDCDCD, 0xCDCD,
43333 +       0xA0000000,     0x00000000,
43334                 0x058, 0x00081184,
43335                 0x059, 0x0006016C,
43336 -       0xFF0F0104, 0xDEAD,
43337 -       0xFF0F0104, 0xABCD,
43338 +       0xB0000000,     0x00000000,
43339 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
43340                 0x061, 0x000E8D73,
43341                 0x062, 0x00093FC5,
43342 -       0xFF0F0204, 0xCDEF,
43343 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
43344                 0x061, 0x000E8D73,
43345                 0x062, 0x00093FC5,
43346 -       0xFF0F0404, 0xCDEF,
43347 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
43348 +               0x061, 0x000EFD83,
43349 +               0x062, 0x00093FCC,
43350 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
43351 +               0x061, 0x000EAD53,
43352 +               0x062, 0x00093BC4,
43353 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
43354 +               0x061, 0x000EFD83,
43355 +               0x062, 0x00093FCC,
43356 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
43357                 0x061, 0x000E8D73,
43358                 0x062, 0x00093FC5,
43359 -       0xCDCDCDCD, 0xCDCD,
43360 +       0xA0000000,     0x00000000,
43361                 0x061, 0x000EAD53,
43362                 0x062, 0x00093BC4,
43363 -       0xFF0F0104, 0xDEAD,
43364 -       0xFF0F0104, 0xABCD,
43365 +       0xB0000000,     0x00000000,
43366 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
43367                 0x063, 0x000110E9,
43368 -       0xFF0F0204, 0xCDEF,
43369 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
43370                 0x063, 0x000110E9,
43371 -       0xFF0F0404, 0xCDEF,
43372 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
43373 +               0x063, 0x000110EB,
43374 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
43375                 0x063, 0x000110E9,
43376 -       0xFF0F0200, 0xCDEF,
43377 -               0x063, 0x000710E9,
43378 -       0xFF0F02C0, 0xCDEF,
43379 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
43380                 0x063, 0x000110E9,
43381 -       0xCDCDCDCD, 0xCDCD,
43382 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
43383 +               0x063, 0x000110EB,
43384 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
43385 +               0x063, 0x000110E9,
43386 +       0xA0000000,     0x00000000,
43387                 0x063, 0x000714E9,
43388 -       0xFF0F0104, 0xDEAD,
43389 -       0xFF0F0104, 0xABCD,
43390 +       0xB0000000,     0x00000000,
43391 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
43392 +               0x064, 0x0001C27C,
43393 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
43394 +               0x064, 0x0001C27C,
43395 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
43396                 0x064, 0x0001C27C,
43397 -       0xFF0F0204, 0xCDEF,
43398 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
43399 +               0x064, 0x0001C67C,
43400 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
43401                 0x064, 0x0001C27C,
43402 -       0xFF0F0404, 0xCDEF,
43403 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
43404                 0x064, 0x0001C27C,
43405 -       0xCDCDCDCD, 0xCDCD,
43406 +       0xA0000000,     0x00000000,
43407                 0x064, 0x0001C67C,
43408 -       0xFF0F0104, 0xDEAD,
43409 -       0xFF0F0200, 0xABCD,
43410 +       0xB0000000,     0x00000000,
43411 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
43412 +               0x065, 0x00091016,
43413 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
43414 +               0x065, 0x00091016,
43415 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
43416                 0x065, 0x00093016,
43417 -       0xFF0F02C0, 0xCDEF,
43418 +               0x9000020c,     0x00000000,     0x40000000,     0x00000000,
43419                 0x065, 0x00093015,
43420 -       0xCDCDCDCD, 0xCDCD,
43421 +               0x9000040c,     0x00000000,     0x40000000,     0x00000000,
43422 +               0x065, 0x00093015,
43423 +               0x90000200,     0x00000000,     0x40000000,     0x00000000,
43424 +               0x065, 0x00093016,
43425 +               0xA0000000,     0x00000000,
43426                 0x065, 0x00091016,
43427 -       0xFF0F0200, 0xDEAD,
43428 +               0xB0000000,     0x00000000,
43429                 0x018, 0x00000006,
43430                 0x0EF, 0x00002000,
43431                 0x03B, 0x0003824B,
43432 @@ -1895,9 +2074,10 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43433                 0x0B4, 0x0001214C,
43434                 0x0B7, 0x0003000C,
43435                 0x01C, 0x000539D2,
43436 +               0x0C4, 0x000AFE00,
43437                 0x018, 0x0001F12A,
43438 -               0x0FE, 0x00000000,
43439 -               0x0FE, 0x00000000,
43440 +               0xFFE, 0x00000000,
43441 +               0xFFE, 0x00000000,
43442                 0x018, 0x0001712A,
43444  };
43445 @@ -2017,6 +2197,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
43446  u32 RTL8812AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8812AE_MAC_REG_ARRAY);
43448  u32 RTL8821AE_MAC_REG_ARRAY[] = {
43449 +               0x421, 0x0000000F,
43450                 0x428, 0x0000000A,
43451                 0x429, 0x00000010,
43452                 0x430, 0x00000000,
43453 @@ -2485,7 +2666,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
43454                 0x81C, 0xA6360001,
43455                 0x81C, 0xA5380001,
43456                 0x81C, 0xA43A0001,
43457 -               0x81C, 0xA33C0001,
43458 +               0x81C, 0x683C0001,
43459                 0x81C, 0x673E0001,
43460                 0x81C, 0x66400001,
43461                 0x81C, 0x65420001,
43462 @@ -2519,7 +2700,66 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
43463                 0x81C, 0x017A0001,
43464                 0x81C, 0x017C0001,
43465                 0x81C, 0x017E0001,
43466 -       0xFF0F02C0, 0xABCD,
43467 +       0x8000020c,     0x00000000,     0x40000000,     0x00000000,
43468 +               0x81C, 0xFB000101,
43469 +               0x81C, 0xFA020101,
43470 +               0x81C, 0xF9040101,
43471 +               0x81C, 0xF8060101,
43472 +               0x81C, 0xF7080101,
43473 +               0x81C, 0xF60A0101,
43474 +               0x81C, 0xF50C0101,
43475 +               0x81C, 0xF40E0101,
43476 +               0x81C, 0xF3100101,
43477 +               0x81C, 0xF2120101,
43478 +               0x81C, 0xF1140101,
43479 +               0x81C, 0xF0160101,
43480 +               0x81C, 0xEF180101,
43481 +               0x81C, 0xEE1A0101,
43482 +               0x81C, 0xED1C0101,
43483 +               0x81C, 0xEC1E0101,
43484 +               0x81C, 0xEB200101,
43485 +               0x81C, 0xEA220101,
43486 +               0x81C, 0xE9240101,
43487 +               0x81C, 0xE8260101,
43488 +               0x81C, 0xE7280101,
43489 +               0x81C, 0xE62A0101,
43490 +               0x81C, 0xE52C0101,
43491 +               0x81C, 0xE42E0101,
43492 +               0x81C, 0xE3300101,
43493 +               0x81C, 0xA5320101,
43494 +               0x81C, 0xA4340101,
43495 +               0x81C, 0xA3360101,
43496 +               0x81C, 0x87380101,
43497 +               0x81C, 0x863A0101,
43498 +               0x81C, 0x853C0101,
43499 +               0x81C, 0x843E0101,
43500 +               0x81C, 0x69400101,
43501 +               0x81C, 0x68420101,
43502 +               0x81C, 0x67440101,
43503 +               0x81C, 0x66460101,
43504 +               0x81C, 0x49480101,
43505 +               0x81C, 0x484A0101,
43506 +               0x81C, 0x474C0101,
43507 +               0x81C, 0x2A4E0101,
43508 +               0x81C, 0x29500101,
43509 +               0x81C, 0x28520101,
43510 +               0x81C, 0x27540101,
43511 +               0x81C, 0x26560101,
43512 +               0x81C, 0x25580101,
43513 +               0x81C, 0x245A0101,
43514 +               0x81C, 0x235C0101,
43515 +               0x81C, 0x055E0101,
43516 +               0x81C, 0x04600101,
43517 +               0x81C, 0x03620101,
43518 +               0x81C, 0x02640101,
43519 +               0x81C, 0x01660101,
43520 +               0x81C, 0x01680101,
43521 +               0x81C, 0x016A0101,
43522 +               0x81C, 0x016C0101,
43523 +               0x81C, 0x016E0101,
43524 +               0x81C, 0x01700101,
43525 +               0x81C, 0x01720101,
43526 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
43527                 0x81C, 0xFB000101,
43528                 0x81C, 0xFA020101,
43529                 0x81C, 0xF9040101,
43530 @@ -2578,7 +2818,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
43531                 0x81C, 0x016E0101,
43532                 0x81C, 0x01700101,
43533                 0x81C, 0x01720101,
43534 -       0xCDCDCDCD, 0xCDCD,
43535 +       0xA0000000,     0x00000000,
43536                 0x81C, 0xFF000101,
43537                 0x81C, 0xFF020101,
43538                 0x81C, 0xFE040101,
43539 @@ -2637,7 +2877,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
43540                 0x81C, 0x046E0101,
43541                 0x81C, 0x03700101,
43542                 0x81C, 0x02720101,
43543 -       0xFF0F02C0, 0xDEAD,
43544 +       0xB0000000,     0x00000000,
43545                 0x81C, 0x01740101,
43546                 0x81C, 0x01760101,
43547                 0x81C, 0x01780101,
43548 diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
43549 index 948cb79050ea..e7d51ac9b689 100644
43550 --- a/drivers/net/wireless/realtek/rtw88/debug.c
43551 +++ b/drivers/net/wireless/realtek/rtw88/debug.c
43552 @@ -270,7 +270,7 @@ static ssize_t rtw_debugfs_set_rsvd_page(struct file *filp,
43554         if (num != 2) {
43555                 rtw_warn(rtwdev, "invalid arguments\n");
43556 -               return num;
43557 +               return -EINVAL;
43558         }
43560         debugfs_priv->rsvd_page.page_offset = offset;
43561 diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
43562 index 35afea91fd29..92b9cf1f9525 100644
43563 --- a/drivers/net/wireless/realtek/rtw88/main.h
43564 +++ b/drivers/net/wireless/realtek/rtw88/main.h
43565 @@ -1166,6 +1166,7 @@ struct rtw_chip_info {
43566         bool en_dis_dpd;
43567         u16 dpd_ratemask;
43568         u8 iqk_threshold;
43569 +       u8 lck_threshold;
43570         const struct rtw_pwr_track_tbl *pwr_track_tbl;
43572         u8 bfer_su_max_num;
43573 @@ -1534,6 +1535,7 @@ struct rtw_dm_info {
43574         u32 rrsr_mask_min;
43575         u8 thermal_avg[RTW_RF_PATH_MAX];
43576         u8 thermal_meter_k;
43577 +       u8 thermal_meter_lck;
43578         s8 delta_power_index[RTW_RF_PATH_MAX];
43579         s8 delta_power_index_last[RTW_RF_PATH_MAX];
43580         u8 default_ofdm_index;
43581 diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
43582 index 786a48649946..6b5c885798a4 100644
43583 --- a/drivers/net/wireless/realtek/rtw88/pci.c
43584 +++ b/drivers/net/wireless/realtek/rtw88/pci.c
43585 @@ -581,23 +581,30 @@ static int rtw_pci_start(struct rtw_dev *rtwdev)
43587         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
43589 +       rtw_pci_napi_start(rtwdev);
43591         spin_lock_bh(&rtwpci->irq_lock);
43592 +       rtwpci->running = true;
43593         rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
43594         spin_unlock_bh(&rtwpci->irq_lock);
43596 -       rtw_pci_napi_start(rtwdev);
43598         return 0;
43601  static void rtw_pci_stop(struct rtw_dev *rtwdev)
43603         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
43604 +       struct pci_dev *pdev = rtwpci->pdev;
43606 +       spin_lock_bh(&rtwpci->irq_lock);
43607 +       rtwpci->running = false;
43608 +       rtw_pci_disable_interrupt(rtwdev, rtwpci);
43609 +       spin_unlock_bh(&rtwpci->irq_lock);
43611 +       synchronize_irq(pdev->irq);
43612         rtw_pci_napi_stop(rtwdev);
43614         spin_lock_bh(&rtwpci->irq_lock);
43615 -       rtw_pci_disable_interrupt(rtwdev, rtwpci);
43616         rtw_pci_dma_release(rtwdev, rtwpci);
43617         spin_unlock_bh(&rtwpci->irq_lock);
43619 @@ -1138,7 +1145,8 @@ static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
43620                 rtw_fw_c2h_cmd_isr(rtwdev);
43622         /* all of the jobs for this interrupt have been done */
43623 -       rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
43624 +       if (rtwpci->running)
43625 +               rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
43626         spin_unlock_bh(&rtwpci->irq_lock);
43628         return IRQ_HANDLED;
43629 @@ -1558,7 +1566,8 @@ static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
43630         if (work_done < budget) {
43631                 napi_complete_done(napi, work_done);
43632                 spin_lock_bh(&rtwpci->irq_lock);
43633 -               rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
43634 +               if (rtwpci->running)
43635 +                       rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
43636                 spin_unlock_bh(&rtwpci->irq_lock);
43637                 /* When ISR happens during polling and before napi_complete
43638                  * while no further data is received. Data on the dma_ring will
43639 diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
43640 index e76fc549a788..0ffae887527a 100644
43641 --- a/drivers/net/wireless/realtek/rtw88/pci.h
43642 +++ b/drivers/net/wireless/realtek/rtw88/pci.h
43643 @@ -211,6 +211,7 @@ struct rtw_pci {
43644         spinlock_t irq_lock;
43645         u32 irq_mask[4];
43646         bool irq_enabled;
43647 +       bool running;
43649         /* napi structure */
43650         struct net_device netdev;
43651 diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
43652 index e114ddecac09..21e77fcfa4d5 100644
43653 --- a/drivers/net/wireless/realtek/rtw88/phy.c
43654 +++ b/drivers/net/wireless/realtek/rtw88/phy.c
43655 @@ -1584,7 +1584,7 @@ void rtw_phy_load_tables(struct rtw_dev *rtwdev)
43657  EXPORT_SYMBOL(rtw_phy_load_tables);
43659 -static u8 rtw_get_channel_group(u8 channel)
43660 +static u8 rtw_get_channel_group(u8 channel, u8 rate)
43662         switch (channel) {
43663         default:
43664 @@ -1628,6 +1628,7 @@ static u8 rtw_get_channel_group(u8 channel)
43665         case 106:
43666                 return 4;
43667         case 14:
43668 +               return rate <= DESC_RATE11M ? 5 : 4;
43669         case 108:
43670         case 110:
43671         case 112:
43672 @@ -1879,7 +1880,7 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
43673         s8 *remnant = &pwr_param->pwr_remnant;
43675         pwr_idx = &rtwdev->efuse.txpwr_idx_table[path];
43676 -       group = rtw_get_channel_group(ch);
43677 +       group = rtw_get_channel_group(ch, rate);
43679         /* base power index for 2.4G/5G */
43680         if (IS_CH_2G_BAND(ch)) {
43681 @@ -2219,6 +2220,20 @@ s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
43683  EXPORT_SYMBOL(rtw_phy_pwrtrack_get_pwridx);
43685 +bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev)
43687 +       struct rtw_dm_info *dm_info = &rtwdev->dm_info;
43688 +       u8 delta_lck;
43690 +       delta_lck = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_lck);
43691 +       if (delta_lck >= rtwdev->chip->lck_threshold) {
43692 +               dm_info->thermal_meter_lck = dm_info->thermal_avg[0];
43693 +               return true;
43694 +       }
43695 +       return false;
43697 +EXPORT_SYMBOL(rtw_phy_pwrtrack_need_lck);
43699  bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev)
43701         struct rtw_dm_info *dm_info = &rtwdev->dm_info;
43702 diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
43703 index a4fcfb878550..a0742a69446d 100644
43704 --- a/drivers/net/wireless/realtek/rtw88/phy.h
43705 +++ b/drivers/net/wireless/realtek/rtw88/phy.h
43706 @@ -55,6 +55,7 @@ u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path);
43707  s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
43708                                struct rtw_swing_table *swing_table,
43709                                u8 tbl_path, u8 therm_path, u8 delta);
43710 +bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev);
43711  bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev);
43712  void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
43713                                 struct rtw_swing_table *swing_table);
43714 diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
43715 index ea518aa78552..819af34dac34 100644
43716 --- a/drivers/net/wireless/realtek/rtw88/reg.h
43717 +++ b/drivers/net/wireless/realtek/rtw88/reg.h
43718 @@ -652,8 +652,13 @@
43719  #define RF_TXATANK     0x64
43720  #define RF_TRXIQ       0x66
43721  #define RF_RXIQGEN     0x8d
43722 +#define RF_SYN_PFD     0xb0
43723  #define RF_XTALX2      0xb8
43724 +#define RF_SYN_CTRL    0xbb
43725  #define RF_MALSEL      0xbe
43726 +#define RF_SYN_AAC     0xc9
43727 +#define RF_AAC_CTRL    0xca
43728 +#define RF_FAST_LCK    0xcc
43729  #define RF_RCKD                0xde
43730  #define RF_TXADBG      0xde
43731  #define RF_LUTDBG      0xdf
43732 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
43733 index dd560c28abb2..448922cb2e63 100644
43734 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
43735 +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
43736 @@ -1126,6 +1126,7 @@ static void rtw8822c_pwrtrack_init(struct rtw_dev *rtwdev)
43738         dm_info->pwr_trk_triggered = false;
43739         dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
43740 +       dm_info->thermal_meter_lck = rtwdev->efuse.thermal_meter_k;
43743  static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
43744 @@ -2108,6 +2109,26 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
43745         rtw_write32_set(rtwdev, REG_RX_BREAK, BIT_COM_RX_GCK_EN);
43748 +static void rtw8822c_do_lck(struct rtw_dev *rtwdev)
43750 +       u32 val;
43752 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_CTRL, RFREG_MASK, 0x80010);
43753 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_PFD, RFREG_MASK, 0x1F0FA);
43754 +       fsleep(1);
43755 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_AAC_CTRL, RFREG_MASK, 0x80000);
43756 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_AAC, RFREG_MASK, 0x80001);
43757 +       read_poll_timeout(rtw_read_rf, val, val != 0x1, 1000, 100000,
43758 +                         true, rtwdev, RF_PATH_A, RF_AAC_CTRL, 0x1000);
43759 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_PFD, RFREG_MASK, 0x1F0F8);
43760 +       rtw_write_rf(rtwdev, RF_PATH_B, RF_SYN_CTRL, RFREG_MASK, 0x80010);
43762 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x0f000);
43763 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x4f000);
43764 +       fsleep(1);
43765 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x0f000);
43768  static void rtw8822c_do_iqk(struct rtw_dev *rtwdev)
43770         struct rtw_iqk_para para = {0};
43771 @@ -3538,11 +3559,12 @@ static void __rtw8822c_pwr_track(struct rtw_dev *rtwdev)
43773         rtw_phy_config_swing_table(rtwdev, &swing_table);
43775 +       if (rtw_phy_pwrtrack_need_lck(rtwdev))
43776 +               rtw8822c_do_lck(rtwdev);
43778         for (i = 0; i < rtwdev->hal.rf_path_num; i++)
43779                 rtw8822c_pwr_track_path(rtwdev, &swing_table, i);
43781 -       if (rtw_phy_pwrtrack_need_iqk(rtwdev))
43782 -               rtw8822c_do_iqk(rtwdev);
43785  static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
43786 @@ -4351,6 +4373,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
43787         .dpd_ratemask = DIS_DPD_RATEALL,
43788         .pwr_track_tbl = &rtw8822c_rtw_pwr_track_tbl,
43789         .iqk_threshold = 8,
43790 +       .lck_threshold = 8,
43791         .bfer_su_max_num = 2,
43792         .bfer_mu_max_num = 1,
43793         .rx_ldpc = true,
43794 diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
43795 index fe0287b22a25..e0c502bc4270 100644
43796 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
43797 +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
43798 @@ -1513,7 +1513,7 @@ static int rsi_restore(struct device *dev)
43800  static const struct dev_pm_ops rsi_pm_ops = {
43801         .suspend = rsi_suspend,
43802 -       .resume = rsi_resume,
43803 +       .resume_noirq = rsi_resume,
43804         .freeze = rsi_freeze,
43805         .thaw = rsi_thaw,
43806         .restore = rsi_restore,
43807 diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
43808 index e14d88e558f0..85abd0a2d1c9 100644
43809 --- a/drivers/net/wireless/ti/wlcore/boot.c
43810 +++ b/drivers/net/wireless/ti/wlcore/boot.c
43811 @@ -72,6 +72,7 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl)
43812         unsigned int *min_ver = (wl->fw_type == WL12XX_FW_TYPE_MULTI) ?
43813                 wl->min_mr_fw_ver : wl->min_sr_fw_ver;
43814         char min_fw_str[32] = "";
43815 +       int off = 0;
43816         int i;
43818         /* the chip must be exactly equal */
43819 @@ -105,13 +106,15 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl)
43820         return 0;
43822  fail:
43823 -       for (i = 0; i < NUM_FW_VER; i++)
43824 +       for (i = 0; i < NUM_FW_VER && off < sizeof(min_fw_str); i++)
43825                 if (min_ver[i] == WLCORE_FW_VER_IGNORE)
43826 -                       snprintf(min_fw_str, sizeof(min_fw_str),
43827 -                                 "%s*.", min_fw_str);
43828 +                       off += snprintf(min_fw_str + off,
43829 +                                       sizeof(min_fw_str) - off,
43830 +                                       "*.");
43831                 else
43832 -                       snprintf(min_fw_str, sizeof(min_fw_str),
43833 -                                 "%s%u.", min_fw_str, min_ver[i]);
43834 +                       off += snprintf(min_fw_str + off,
43835 +                                       sizeof(min_fw_str) - off,
43836 +                                       "%u.", min_ver[i]);
43838         wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is invalid.\n"
43839                      "Please use at least FW %s\n"
43840 diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
43841 index b143293e694f..a9e13e6d65c5 100644
43842 --- a/drivers/net/wireless/ti/wlcore/debugfs.h
43843 +++ b/drivers/net/wireless/ti/wlcore/debugfs.h
43844 @@ -78,13 +78,14 @@ static ssize_t sub## _ ##name## _read(struct file *file,            \
43845         struct wl1271 *wl = file->private_data;                         \
43846         struct struct_type *stats = wl->stats.fw_stats;                 \
43847         char buf[DEBUGFS_FORMAT_BUFFER_SIZE] = "";                      \
43848 +       int pos = 0;                                                    \
43849         int i;                                                          \
43850                                                                         \
43851         wl1271_debugfs_update_stats(wl);                                \
43852                                                                         \
43853 -       for (i = 0; i < len; i++)                                       \
43854 -               snprintf(buf, sizeof(buf), "%s[%d] = %d\n",             \
43855 -                        buf, i, stats->sub.name[i]);                   \
43856 +       for (i = 0; i < len && pos < sizeof(buf); i++)                  \
43857 +               pos += snprintf(buf + pos, sizeof(buf) - pos,           \
43858 +                        "[%d] = %d\n", i, stats->sub.name[i]);         \
43859                                                                         \
43860         return wl1271_format_buffer(userbuf, count, ppos, "%s", buf);   \
43861  }                                                                      \
43862 diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
43863 index e98e04ee9a2c..59b7b93c5963 100644
43864 --- a/drivers/net/wireless/wl3501.h
43865 +++ b/drivers/net/wireless/wl3501.h
43866 @@ -379,16 +379,7 @@ struct wl3501_get_confirm {
43867         u8      mib_value[100];
43868  };
43870 -struct wl3501_join_req {
43871 -       u16                         next_blk;
43872 -       u8                          sig_id;
43873 -       u8                          reserved;
43874 -       struct iw_mgmt_data_rset    operational_rset;
43875 -       u16                         reserved2;
43876 -       u16                         timeout;
43877 -       u16                         probe_delay;
43878 -       u8                          timestamp[8];
43879 -       u8                          local_time[8];
43880 +struct wl3501_req {
43881         u16                         beacon_period;
43882         u16                         dtim_period;
43883         u16                         cap_info;
43884 @@ -401,6 +392,19 @@ struct wl3501_join_req {
43885         struct iw_mgmt_data_rset    bss_basic_rset;
43886  };
43888 +struct wl3501_join_req {
43889 +       u16                         next_blk;
43890 +       u8                          sig_id;
43891 +       u8                          reserved;
43892 +       struct iw_mgmt_data_rset    operational_rset;
43893 +       u16                         reserved2;
43894 +       u16                         timeout;
43895 +       u16                         probe_delay;
43896 +       u8                          timestamp[8];
43897 +       u8                          local_time[8];
43898 +       struct wl3501_req           req;
43901  struct wl3501_join_confirm {
43902         u16     next_blk;
43903         u8      sig_id;
43904 @@ -443,16 +447,7 @@ struct wl3501_scan_confirm {
43905         u16                         status;
43906         char                        timestamp[8];
43907         char                        localtime[8];
43908 -       u16                         beacon_period;
43909 -       u16                         dtim_period;
43910 -       u16                         cap_info;
43911 -       u8                          bss_type;
43912 -       u8                          bssid[ETH_ALEN];
43913 -       struct iw_mgmt_essid_pset   ssid;
43914 -       struct iw_mgmt_ds_pset      ds_pset;
43915 -       struct iw_mgmt_cf_pset      cf_pset;
43916 -       struct iw_mgmt_ibss_pset    ibss_pset;
43917 -       struct iw_mgmt_data_rset    bss_basic_rset;
43918 +       struct wl3501_req           req;
43919         u8                          rssi;
43920  };
43922 @@ -471,8 +466,10 @@ struct wl3501_md_req {
43923         u16     size;
43924         u8      pri;
43925         u8      service_class;
43926 -       u8      daddr[ETH_ALEN];
43927 -       u8      saddr[ETH_ALEN];
43928 +       struct {
43929 +               u8      daddr[ETH_ALEN];
43930 +               u8      saddr[ETH_ALEN];
43931 +       } addr;
43932  };
43934  struct wl3501_md_ind {
43935 @@ -484,8 +481,10 @@ struct wl3501_md_ind {
43936         u8      reception;
43937         u8      pri;
43938         u8      service_class;
43939 -       u8      daddr[ETH_ALEN];
43940 -       u8      saddr[ETH_ALEN];
43941 +       struct {
43942 +               u8      daddr[ETH_ALEN];
43943 +               u8      saddr[ETH_ALEN];
43944 +       } addr;
43945  };
43947  struct wl3501_md_confirm {
43948 diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
43949 index 8ca5789c7b37..672f5d5f3f2c 100644
43950 --- a/drivers/net/wireless/wl3501_cs.c
43951 +++ b/drivers/net/wireless/wl3501_cs.c
43952 @@ -469,6 +469,7 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
43953         struct wl3501_md_req sig = {
43954                 .sig_id = WL3501_SIG_MD_REQ,
43955         };
43956 +       size_t sig_addr_len = sizeof(sig.addr);
43957         u8 *pdata = (char *)data;
43958         int rc = -EIO;
43960 @@ -484,9 +485,9 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
43961                         goto out;
43962                 }
43963                 rc = 0;
43964 -               memcpy(&sig.daddr[0], pdata, 12);
43965 -               pktlen = len - 12;
43966 -               pdata += 12;
43967 +               memcpy(&sig.addr, pdata, sig_addr_len);
43968 +               pktlen = len - sig_addr_len;
43969 +               pdata += sig_addr_len;
43970                 sig.data = bf;
43971                 if (((*pdata) * 256 + (*(pdata + 1))) > 1500) {
43972                         u8 addr4[ETH_ALEN] = {
43973 @@ -589,7 +590,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
43974         struct wl3501_join_req sig = {
43975                 .sig_id           = WL3501_SIG_JOIN_REQ,
43976                 .timeout          = 10,
43977 -               .ds_pset = {
43978 +               .req.ds_pset = {
43979                         .el = {
43980                                 .id  = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET,
43981                                 .len = 1,
43982 @@ -598,7 +599,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
43983                 },
43984         };
43986 -       memcpy(&sig.beacon_period, &this->bss_set[stas].beacon_period, 72);
43987 +       memcpy(&sig.req, &this->bss_set[stas].req, sizeof(sig.req));
43988         return wl3501_esbq_exec(this, &sig, sizeof(sig));
43991 @@ -666,35 +667,37 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
43992         if (sig.status == WL3501_STATUS_SUCCESS) {
43993                 pr_debug("success");
43994                 if ((this->net_type == IW_MODE_INFRA &&
43995 -                    (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
43996 +                    (sig.req.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
43997                     (this->net_type == IW_MODE_ADHOC &&
43998 -                    (sig.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
43999 +                    (sig.req.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
44000                     this->net_type == IW_MODE_AUTO) {
44001                         if (!this->essid.el.len)
44002                                 matchflag = 1;
44003                         else if (this->essid.el.len == 3 &&
44004                                  !memcmp(this->essid.essid, "ANY", 3))
44005                                 matchflag = 1;
44006 -                       else if (this->essid.el.len != sig.ssid.el.len)
44007 +                       else if (this->essid.el.len != sig.req.ssid.el.len)
44008                                 matchflag = 0;
44009 -                       else if (memcmp(this->essid.essid, sig.ssid.essid,
44010 +                       else if (memcmp(this->essid.essid, sig.req.ssid.essid,
44011                                         this->essid.el.len))
44012                                 matchflag = 0;
44013                         else
44014                                 matchflag = 1;
44015                         if (matchflag) {
44016                                 for (i = 0; i < this->bss_cnt; i++) {
44017 -                                       if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) {
44018 +                                       if (ether_addr_equal_unaligned(this->bss_set[i].req.bssid,
44019 +                                                                      sig.req.bssid)) {
44020                                                 matchflag = 0;
44021                                                 break;
44022                                         }
44023                                 }
44024                         }
44025                         if (matchflag && (i < 20)) {
44026 -                               memcpy(&this->bss_set[i].beacon_period,
44027 -                                      &sig.beacon_period, 73);
44028 +                               memcpy(&this->bss_set[i].req,
44029 +                                      &sig.req, sizeof(sig.req));
44030                                 this->bss_cnt++;
44031                                 this->rssi = sig.rssi;
44032 +                               this->bss_set[i].rssi = sig.rssi;
44033                         }
44034                 }
44035         } else if (sig.status == WL3501_STATUS_TIMEOUT) {
44036 @@ -886,19 +889,19 @@ static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr)
44037                         if (this->join_sta_bss < this->bss_cnt) {
44038                                 const int i = this->join_sta_bss;
44039                                 memcpy(this->bssid,
44040 -                                      this->bss_set[i].bssid, ETH_ALEN);
44041 -                               this->chan = this->bss_set[i].ds_pset.chan;
44042 +                                      this->bss_set[i].req.bssid, ETH_ALEN);
44043 +                               this->chan = this->bss_set[i].req.ds_pset.chan;
44044                                 iw_copy_mgmt_info_element(&this->keep_essid.el,
44045 -                                                    &this->bss_set[i].ssid.el);
44046 +                                                    &this->bss_set[i].req.ssid.el);
44047                                 wl3501_mgmt_auth(this);
44048                         }
44049                 } else {
44050                         const int i = this->join_sta_bss;
44052 -                       memcpy(&this->bssid, &this->bss_set[i].bssid, ETH_ALEN);
44053 -                       this->chan = this->bss_set[i].ds_pset.chan;
44054 +                       memcpy(&this->bssid, &this->bss_set[i].req.bssid, ETH_ALEN);
44055 +                       this->chan = this->bss_set[i].req.ds_pset.chan;
44056                         iw_copy_mgmt_info_element(&this->keep_essid.el,
44057 -                                                 &this->bss_set[i].ssid.el);
44058 +                                                 &this->bss_set[i].req.ssid.el);
44059                         wl3501_online(dev);
44060                 }
44061         } else {
44062 @@ -980,7 +983,8 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev,
44063         } else {
44064                 skb->dev = dev;
44065                 skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
44066 -               skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12);
44067 +               skb_copy_to_linear_data(skb, (unsigned char *)&sig.addr,
44068 +                                       sizeof(sig.addr));
44069                 wl3501_receive(this, skb->data, pkt_len);
44070                 skb_put(skb, pkt_len);
44071                 skb->protocol   = eth_type_trans(skb, dev);
44072 @@ -1571,30 +1575,30 @@ static int wl3501_get_scan(struct net_device *dev, struct iw_request_info *info,
44073         for (i = 0; i < this->bss_cnt; ++i) {
44074                 iwe.cmd                 = SIOCGIWAP;
44075                 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
44076 -               memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].bssid, ETH_ALEN);
44077 +               memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].req.bssid, ETH_ALEN);
44078                 current_ev = iwe_stream_add_event(info, current_ev,
44079                                                   extra + IW_SCAN_MAX_DATA,
44080                                                   &iwe, IW_EV_ADDR_LEN);
44081                 iwe.cmd           = SIOCGIWESSID;
44082                 iwe.u.data.flags  = 1;
44083 -               iwe.u.data.length = this->bss_set[i].ssid.el.len;
44084 +               iwe.u.data.length = this->bss_set[i].req.ssid.el.len;
44085                 current_ev = iwe_stream_add_point(info, current_ev,
44086                                                   extra + IW_SCAN_MAX_DATA,
44087                                                   &iwe,
44088 -                                                 this->bss_set[i].ssid.essid);
44089 +                                                 this->bss_set[i].req.ssid.essid);
44090                 iwe.cmd    = SIOCGIWMODE;
44091 -               iwe.u.mode = this->bss_set[i].bss_type;
44092 +               iwe.u.mode = this->bss_set[i].req.bss_type;
44093                 current_ev = iwe_stream_add_event(info, current_ev,
44094                                                   extra + IW_SCAN_MAX_DATA,
44095                                                   &iwe, IW_EV_UINT_LEN);
44096                 iwe.cmd = SIOCGIWFREQ;
44097 -               iwe.u.freq.m = this->bss_set[i].ds_pset.chan;
44098 +               iwe.u.freq.m = this->bss_set[i].req.ds_pset.chan;
44099                 iwe.u.freq.e = 0;
44100                 current_ev = iwe_stream_add_event(info, current_ev,
44101                                                   extra + IW_SCAN_MAX_DATA,
44102                                                   &iwe, IW_EV_FREQ_LEN);
44103                 iwe.cmd = SIOCGIWENCODE;
44104 -               if (this->bss_set[i].cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
44105 +               if (this->bss_set[i].req.cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
44106                         iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
44107                 else
44108                         iwe.u.data.flags = IW_ENCODE_DISABLED;
44109 diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
44110 index f1469ac8ff42..3fe5b81eda2d 100644
44111 --- a/drivers/nfc/pn533/pn533.c
44112 +++ b/drivers/nfc/pn533/pn533.c
44113 @@ -706,6 +706,9 @@ static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a,
44114         if (PN533_TYPE_A_SEL_CASCADE(type_a->sel_res) != 0)
44115                 return false;
44117 +       if (type_a->nfcid_len > NFC_NFCID1_MAXSIZE)
44118 +               return false;
44120         return true;
44123 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
44124 index 0896e21642be..d5d7e0cdd78d 100644
44125 --- a/drivers/nvme/host/core.c
44126 +++ b/drivers/nvme/host/core.c
44127 @@ -2681,7 +2681,8 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val)
44129         if (ctrl->ps_max_latency_us != latency) {
44130                 ctrl->ps_max_latency_us = latency;
44131 -               nvme_configure_apst(ctrl);
44132 +               if (ctrl->state == NVME_CTRL_LIVE)
44133 +                       nvme_configure_apst(ctrl);
44134         }
44137 diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
44138 index a1d476e1ac02..ec1e454848e5 100644
44139 --- a/drivers/nvme/host/multipath.c
44140 +++ b/drivers/nvme/host/multipath.c
44141 @@ -668,6 +668,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
44142                 if (desc.state) {
44143                         /* found the group desc: update */
44144                         nvme_update_ns_ana_state(&desc, ns);
44145 +               } else {
44146 +                       /* group desc not found: trigger a re-read */
44147 +                       set_bit(NVME_NS_ANA_PENDING, &ns->flags);
44148 +                       queue_work(nvme_wq, &ns->ctrl->ana_work);
44149                 }
44150         } else {
44151                 ns->ana_state = NVME_ANA_OPTIMIZED; 
44152 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
44153 index 7249ae74f71f..c92a15c3fbc5 100644
44154 --- a/drivers/nvme/host/pci.c
44155 +++ b/drivers/nvme/host/pci.c
44156 @@ -852,7 +852,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
44157                                 return nvme_setup_prp_simple(dev, req,
44158                                                              &cmnd->rw, &bv);
44160 -                       if (iod->nvmeq->qid &&
44161 +                       if (iod->nvmeq->qid && sgl_threshold &&
44162                             dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
44163                                 return nvme_setup_sgl_simple(dev, req,
44164                                                              &cmnd->rw, &bv);
44165 diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
44166 index a0f00cb8f9f3..d7d7c81d0701 100644
44167 --- a/drivers/nvme/host/tcp.c
44168 +++ b/drivers/nvme/host/tcp.c
44169 @@ -874,7 +874,7 @@ static void nvme_tcp_state_change(struct sock *sk)
44171         struct nvme_tcp_queue *queue;
44173 -       read_lock(&sk->sk_callback_lock);
44174 +       read_lock_bh(&sk->sk_callback_lock);
44175         queue = sk->sk_user_data;
44176         if (!queue)
44177                 goto done;
44178 @@ -895,7 +895,7 @@ static void nvme_tcp_state_change(struct sock *sk)
44180         queue->state_change(sk);
44181  done:
44182 -       read_unlock(&sk->sk_callback_lock);
44183 +       read_unlock_bh(&sk->sk_callback_lock);
44186  static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
44187 diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
44188 index fe6b8aa90b53..81224447605b 100644
44189 --- a/drivers/nvme/target/admin-cmd.c
44190 +++ b/drivers/nvme/target/admin-cmd.c
44191 @@ -919,15 +919,21 @@ void nvmet_execute_async_event(struct nvmet_req *req)
44192  void nvmet_execute_keep_alive(struct nvmet_req *req)
44194         struct nvmet_ctrl *ctrl = req->sq->ctrl;
44195 +       u16 status = 0;
44197         if (!nvmet_check_transfer_len(req, 0))
44198                 return;
44200 +       if (!ctrl->kato) {
44201 +               status = NVME_SC_KA_TIMEOUT_INVALID;
44202 +               goto out;
44203 +       }
44205         pr_debug("ctrl %d update keep-alive timer for %d secs\n",
44206                 ctrl->cntlid, ctrl->kato);
44208         mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
44209 -       nvmet_req_complete(req, 0);
44210 +out:
44211 +       nvmet_req_complete(req, status);
44214  u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
44215 diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
44216 index 682854e0e079..4845d12e374a 100644
44217 --- a/drivers/nvme/target/discovery.c
44218 +++ b/drivers/nvme/target/discovery.c
44219 @@ -178,12 +178,14 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
44220         if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
44221                 req->error_loc =
44222                         offsetof(struct nvme_get_log_page_command, lid);
44223 -               status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
44224 +               status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
44225                 goto out;
44226         }
44228         /* Spec requires dword aligned offsets */
44229         if (offset & 0x3) {
44230 +               req->error_loc =
44231 +                       offsetof(struct nvme_get_log_page_command, lpo);
44232                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
44233                 goto out;
44234         }
44235 @@ -250,7 +252,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
44237         if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
44238                 req->error_loc = offsetof(struct nvme_identify, cns);
44239 -               status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
44240 +               status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
44241                 goto out;
44242         }
44244 diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
44245 index 9a8b3726a37c..429263ca9b97 100644
44246 --- a/drivers/nvme/target/io-cmd-bdev.c
44247 +++ b/drivers/nvme/target/io-cmd-bdev.c
44248 @@ -258,7 +258,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
44250         sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
44252 -       if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
44253 +       if (nvmet_use_inline_bvec(req)) {
44254                 bio = &req->b.inline_bio;
44255                 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
44256         } else {
44257 diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
44258 index 4b84edb49f22..5aad34b106dc 100644
44259 --- a/drivers/nvme/target/nvmet.h
44260 +++ b/drivers/nvme/target/nvmet.h
44261 @@ -614,4 +614,10 @@ static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
44262         return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
44265 +static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
44267 +       return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
44268 +              req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
44271  #endif /* _NVMET_H */
44272 diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
44273 index 2798944899b7..39b1473f7204 100644
44274 --- a/drivers/nvme/target/passthru.c
44275 +++ b/drivers/nvme/target/passthru.c
44276 @@ -194,7 +194,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
44277         if (req->sg_cnt > BIO_MAX_VECS)
44278                 return -EINVAL;
44280 -       if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
44281 +       if (nvmet_use_inline_bvec(req)) {
44282                 bio = &req->p.inline_bio;
44283                 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
44284         } else {
44285 diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
44286 index 6c1f3ab7649c..7d607f435e36 100644
44287 --- a/drivers/nvme/target/rdma.c
44288 +++ b/drivers/nvme/target/rdma.c
44289 @@ -700,7 +700,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
44291         struct nvmet_rdma_rsp *rsp =
44292                 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
44293 -       struct nvmet_rdma_queue *queue = cq->cq_context;
44294 +       struct nvmet_rdma_queue *queue = wc->qp->qp_context;
44296         nvmet_rdma_release_rsp(rsp);
44298 @@ -786,7 +786,7 @@ static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
44300         struct nvmet_rdma_rsp *rsp =
44301                 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
44302 -       struct nvmet_rdma_queue *queue = cq->cq_context;
44303 +       struct nvmet_rdma_queue *queue = wc->qp->qp_context;
44304         struct rdma_cm_id *cm_id = rsp->queue->cm_id;
44305         u16 status;
44307 diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
44308 index d658c6e8263a..d958b5da9b88 100644
44309 --- a/drivers/nvme/target/tcp.c
44310 +++ b/drivers/nvme/target/tcp.c
44311 @@ -525,11 +525,36 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
44312         struct nvmet_tcp_cmd *cmd =
44313                 container_of(req, struct nvmet_tcp_cmd, req);
44314         struct nvmet_tcp_queue  *queue = cmd->queue;
44315 +       struct nvme_sgl_desc *sgl;
44316 +       u32 len;
44318 +       if (unlikely(cmd == queue->cmd)) {
44319 +               sgl = &cmd->req.cmd->common.dptr.sgl;
44320 +               len = le32_to_cpu(sgl->length);
44322 +               /*
44323 +                * Wait for inline data before processing the response.
44324 +                * Avoid using helpers, this might happen before
44325 +                * nvmet_req_init is completed.
44326 +                */
44327 +               if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
44328 +                   len && len < cmd->req.port->inline_data_size &&
44329 +                   nvme_is_write(cmd->req.cmd))
44330 +                       return;
44331 +       }
44333         llist_add(&cmd->lentry, &queue->resp_list);
44334         queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
44337 +static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
44339 +       if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
44340 +               nvmet_tcp_queue_response(&cmd->req);
44341 +       else
44342 +               cmd->req.execute(&cmd->req);
44345  static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
44347         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
44348 @@ -961,7 +986,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
44349                         le32_to_cpu(req->cmd->common.dptr.sgl.length));
44351                 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
44352 -               return -EAGAIN;
44353 +               return 0;
44354         }
44356         ret = nvmet_tcp_map_data(queue->cmd);
44357 @@ -1104,10 +1129,8 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
44358                 return 0;
44359         }
44361 -       if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
44362 -           cmd->rbytes_done == cmd->req.transfer_len) {
44363 -               cmd->req.execute(&cmd->req);
44364 -       }
44365 +       if (cmd->rbytes_done == cmd->req.transfer_len)
44366 +               nvmet_tcp_execute_request(cmd);
44368         nvmet_prepare_receive_pdu(queue);
44369         return 0;
44370 @@ -1144,9 +1167,9 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
44371                 goto out;
44372         }
44374 -       if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
44375 -           cmd->rbytes_done == cmd->req.transfer_len)
44376 -               cmd->req.execute(&cmd->req);
44377 +       if (cmd->rbytes_done == cmd->req.transfer_len)
44378 +               nvmet_tcp_execute_request(cmd);
44380         ret = 0;
44381  out:
44382         nvmet_prepare_receive_pdu(queue);
44383 @@ -1434,7 +1457,7 @@ static void nvmet_tcp_state_change(struct sock *sk)
44385         struct nvmet_tcp_queue *queue;
44387 -       write_lock_bh(&sk->sk_callback_lock);
44388 +       read_lock_bh(&sk->sk_callback_lock);
44389         queue = sk->sk_user_data;
44390         if (!queue)
44391                 goto done;
44392 @@ -1452,7 +1475,7 @@ static void nvmet_tcp_state_change(struct sock *sk)
44393                         queue->idx, sk->sk_state);
44394         }
44395  done:
44396 -       write_unlock_bh(&sk->sk_callback_lock);
44397 +       read_unlock_bh(&sk->sk_callback_lock);
44400  static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
44401 diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
44402 index 75d2594c16e1..267a0d9e99ba 100644
44403 --- a/drivers/nvmem/Kconfig
44404 +++ b/drivers/nvmem/Kconfig
44405 @@ -272,6 +272,7 @@ config SPRD_EFUSE
44407  config NVMEM_RMEM
44408         tristate "Reserved Memory Based Driver Support"
44409 +       depends on HAS_IOMEM
44410         help
44411           This driver maps reserved memory into an nvmem device. It might be
44412           useful to expose information left by firmware in memory.
44413 diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
44414 index 6cace24dfbf7..100d69d8f2e1 100644
44415 --- a/drivers/nvmem/qfprom.c
44416 +++ b/drivers/nvmem/qfprom.c
44417 @@ -127,6 +127,16 @@ static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
44419         int ret;
44421 +       /*
44422 +        * This may be a shared rail and may be able to run at a lower rate
44423 +        * when we're not blowing fuses.  At the moment, the regulator framework
44424 +        * applies voltage constraints even on disabled rails, so remove our
44425 +        * constraints and allow the rail to be adjusted by other users.
44426 +        */
44427 +       ret = regulator_set_voltage(priv->vcc, 0, INT_MAX);
44428 +       if (ret)
44429 +               dev_warn(priv->dev, "Failed to set 0 voltage (ignoring)\n");
44431         ret = regulator_disable(priv->vcc);
44432         if (ret)
44433                 dev_warn(priv->dev, "Failed to disable regulator (ignoring)\n");
44434 @@ -172,6 +182,17 @@ static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
44435                 goto err_clk_prepared;
44436         }
44438 +       /*
44439 +        * Hardware requires 1.8V min for fuse blowing; this may be
44440 +        * a rail shared do don't specify a max--regulator constraints
44441 +        * will handle.
44442 +        */
44443 +       ret = regulator_set_voltage(priv->vcc, 1800000, INT_MAX);
44444 +       if (ret) {
44445 +               dev_err(priv->dev, "Failed to set 1.8 voltage\n");
44446 +               goto err_clk_rate_set;
44447 +       }
44449         ret = regulator_enable(priv->vcc);
44450         if (ret) {
44451                 dev_err(priv->dev, "Failed to enable regulator\n");
44452 diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
44453 index 23effe5e50ec..2d132949572d 100644
44454 --- a/drivers/of/overlay.c
44455 +++ b/drivers/of/overlay.c
44456 @@ -796,6 +796,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
44457                 if (!fragment->target) {
44458                         of_node_put(fragment->overlay);
44459                         ret = -EINVAL;
44460 +                       of_node_put(node);
44461                         goto err_free_fragments;
44462                 }
44464 diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
44465 index 4547ac44c8d4..8fa1a7fdf12c 100644
44466 --- a/drivers/parport/ieee1284.c
44467 +++ b/drivers/parport/ieee1284.c
44468 @@ -202,7 +202,7 @@ int parport_wait_peripheral(struct parport *port,
44469                         /* parport_wait_event didn't time out, but the
44470                          * peripheral wasn't actually ready either.
44471                          * Wait for another 10ms. */
44472 -                       schedule_timeout_interruptible(msecs_to_jiffies(10));
44473 +                       schedule_msec_hrtimeout_interruptible((10));
44474                 }
44475         }
44477 diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
44478 index 2c11bd3fe1fd..8cb6b61c0880 100644
44479 --- a/drivers/parport/ieee1284_ops.c
44480 +++ b/drivers/parport/ieee1284_ops.c
44481 @@ -520,7 +520,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
44482                         /* Yield the port for a while. */
44483                         if (count && dev->port->irq != PARPORT_IRQ_NONE) {
44484                                 parport_release (dev);
44485 -                               schedule_timeout_interruptible(msecs_to_jiffies(40));
44486 +                               schedule_msec_hrtimeout_interruptible((40));
44487                                 parport_claim_or_block (dev);
44488                         }
44489                         else
44490 diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
44491 index 53aa35cb3a49..a59ecbec601f 100644
44492 --- a/drivers/pci/controller/dwc/pci-keystone.c
44493 +++ b/drivers/pci/controller/dwc/pci-keystone.c
44494 @@ -798,7 +798,8 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
44495         int ret;
44497         pp->bridge->ops = &ks_pcie_ops;
44498 -       pp->bridge->child_ops = &ks_child_pcie_ops;
44499 +       if (!ks_pcie->is_am6)
44500 +               pp->bridge->child_ops = &ks_child_pcie_ops;
44502         ret = ks_pcie_config_legacy_irq(ks_pcie);
44503         if (ret)
44504 diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
44505 index 1c25d8337151..8d028a88b375 100644
44506 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c
44507 +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
44508 @@ -705,6 +705,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
44509                 }
44510         }
44512 +       dw_pcie_iatu_detect(pci);
44514         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
44515         if (!res)
44516                 return -EINVAL;
44517 diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
44518 index 7e55b2b66182..24192b40e3a2 100644
44519 --- a/drivers/pci/controller/dwc/pcie-designware-host.c
44520 +++ b/drivers/pci/controller/dwc/pcie-designware-host.c
44521 @@ -398,6 +398,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
44522                 if (ret)
44523                         goto err_free_msi;
44524         }
44525 +       dw_pcie_iatu_detect(pci);
44527         dw_pcie_setup_rc(pp);
44528         dw_pcie_msi_init(pp);
44529 diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
44530 index 004cb860e266..a945f0c0e73d 100644
44531 --- a/drivers/pci/controller/dwc/pcie-designware.c
44532 +++ b/drivers/pci/controller/dwc/pcie-designware.c
44533 @@ -660,11 +660,9 @@ static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
44534         pci->num_ob_windows = ob;
44537 -void dw_pcie_setup(struct dw_pcie *pci)
44538 +void dw_pcie_iatu_detect(struct dw_pcie *pci)
44540 -       u32 val;
44541         struct device *dev = pci->dev;
44542 -       struct device_node *np = dev->of_node;
44543         struct platform_device *pdev = to_platform_device(dev);
44545         if (pci->version >= 0x480A || (!pci->version &&
44546 @@ -693,6 +691,13 @@ void dw_pcie_setup(struct dw_pcie *pci)
44548         dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
44549                  pci->num_ob_windows, pci->num_ib_windows);
44552 +void dw_pcie_setup(struct dw_pcie *pci)
44554 +       u32 val;
44555 +       struct device *dev = pci->dev;
44556 +       struct device_node *np = dev->of_node;
44558         if (pci->link_gen > 0)
44559                 dw_pcie_link_set_max_speed(pci, pci->link_gen);
44560 diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
44561 index 7247c8b01f04..7d6e9b7576be 100644
44562 --- a/drivers/pci/controller/dwc/pcie-designware.h
44563 +++ b/drivers/pci/controller/dwc/pcie-designware.h
44564 @@ -306,6 +306,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
44565  void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
44566                          enum dw_pcie_region_type type);
44567  void dw_pcie_setup(struct dw_pcie *pci);
44568 +void dw_pcie_iatu_detect(struct dw_pcie *pci);
44570  static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
44572 diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
44573 index 2afdc865253e..7f503dd4ff81 100644
44574 --- a/drivers/pci/controller/pci-xgene.c
44575 +++ b/drivers/pci/controller/pci-xgene.c
44576 @@ -354,7 +354,8 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
44577         if (IS_ERR(port->csr_base))
44578                 return PTR_ERR(port->csr_base);
44580 -       port->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
44581 +       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
44582 +       port->cfg_base = devm_ioremap_resource(dev, res);
44583         if (IS_ERR(port->cfg_base))
44584                 return PTR_ERR(port->cfg_base);
44585         port->cfg_addr = res->start;
44586 diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
44587 index e330e6811f0b..08bc788d9422 100644
44588 --- a/drivers/pci/controller/pcie-brcmstb.c
44589 +++ b/drivers/pci/controller/pcie-brcmstb.c
44590 @@ -1148,6 +1148,7 @@ static int brcm_pcie_suspend(struct device *dev)
44592         brcm_pcie_turn_off(pcie);
44593         ret = brcm_phy_stop(pcie);
44594 +       reset_control_rearm(pcie->rescal);
44595         clk_disable_unprepare(pcie->clk);
44597         return ret;
44598 @@ -1163,9 +1164,13 @@ static int brcm_pcie_resume(struct device *dev)
44599         base = pcie->base;
44600         clk_prepare_enable(pcie->clk);
44602 +       ret = reset_control_reset(pcie->rescal);
44603 +       if (ret)
44604 +               goto err_disable_clk;
44606         ret = brcm_phy_start(pcie);
44607         if (ret)
44608 -               goto err;
44609 +               goto err_reset;
44611         /* Take bridge out of reset so we can access the SERDES reg */
44612         pcie->bridge_sw_init_set(pcie, 0);
44613 @@ -1180,14 +1185,16 @@ static int brcm_pcie_resume(struct device *dev)
44615         ret = brcm_pcie_setup(pcie);
44616         if (ret)
44617 -               goto err;
44618 +               goto err_reset;
44620         if (pcie->msi)
44621                 brcm_msi_set_regs(pcie->msi);
44623         return 0;
44625 -err:
44626 +err_reset:
44627 +       reset_control_rearm(pcie->rescal);
44628 +err_disable_clk:
44629         clk_disable_unprepare(pcie->clk);
44630         return ret;
44632 @@ -1197,7 +1204,7 @@ static void __brcm_pcie_remove(struct brcm_pcie *pcie)
44633         brcm_msi_remove(pcie);
44634         brcm_pcie_turn_off(pcie);
44635         brcm_phy_stop(pcie);
44636 -       reset_control_assert(pcie->rescal);
44637 +       reset_control_rearm(pcie->rescal);
44638         clk_disable_unprepare(pcie->clk);
44641 @@ -1278,13 +1285,13 @@ static int brcm_pcie_probe(struct platform_device *pdev)
44642                 return PTR_ERR(pcie->perst_reset);
44643         }
44645 -       ret = reset_control_deassert(pcie->rescal);
44646 +       ret = reset_control_reset(pcie->rescal);
44647         if (ret)
44648                 dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
44650         ret = brcm_phy_start(pcie);
44651         if (ret) {
44652 -               reset_control_assert(pcie->rescal);
44653 +               reset_control_rearm(pcie->rescal);
44654                 clk_disable_unprepare(pcie->clk);
44655                 return ret;
44656         }
44657 @@ -1296,6 +1303,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
44658         pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
44659         if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
44660                 dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
44661 +               ret = -ENODEV;
44662                 goto fail;
44663         }
44665 diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
44666 index 908475d27e0e..eede4e8f3f75 100644
44667 --- a/drivers/pci/controller/pcie-iproc-msi.c
44668 +++ b/drivers/pci/controller/pcie-iproc-msi.c
44669 @@ -271,7 +271,7 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
44670                                     NULL, NULL);
44671         }
44673 -       return hwirq;
44674 +       return 0;
44677  static void iproc_msi_irq_domain_free(struct irq_domain *domain,
44678 diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
44679 index c0ac4e9cbe72..f9760e73d568 100644
44680 --- a/drivers/pci/endpoint/functions/pci-epf-test.c
44681 +++ b/drivers/pci/endpoint/functions/pci-epf-test.c
44682 @@ -833,15 +833,18 @@ static int pci_epf_test_bind(struct pci_epf *epf)
44683                 return -EINVAL;
44685         epc_features = pci_epc_get_features(epc, epf->func_no);
44686 -       if (epc_features) {
44687 -               linkup_notifier = epc_features->linkup_notifier;
44688 -               core_init_notifier = epc_features->core_init_notifier;
44689 -               test_reg_bar = pci_epc_get_first_free_bar(epc_features);
44690 -               if (test_reg_bar < 0)
44691 -                       return -EINVAL;
44692 -               pci_epf_configure_bar(epf, epc_features);
44693 +       if (!epc_features) {
44694 +               dev_err(&epf->dev, "epc_features not implemented\n");
44695 +               return -EOPNOTSUPP;
44696         }
44698 +       linkup_notifier = epc_features->linkup_notifier;
44699 +       core_init_notifier = epc_features->core_init_notifier;
44700 +       test_reg_bar = pci_epc_get_first_free_bar(epc_features);
44701 +       if (test_reg_bar < 0)
44702 +               return -EINVAL;
44703 +       pci_epf_configure_bar(epf, epc_features);
44705         epf_test->test_reg_bar = test_reg_bar;
44706         epf_test->epc_features = epc_features;
44708 @@ -922,6 +925,7 @@ static int __init pci_epf_test_init(void)
44710         ret = pci_epf_register_driver(&test_driver);
44711         if (ret) {
44712 +               destroy_workqueue(kpcitest_workqueue);
44713                 pr_err("Failed to register pci epf test driver --> %d\n", ret);
44714                 return ret;
44715         }
44716 @@ -932,6 +936,8 @@ module_init(pci_epf_test_init);
44718  static void __exit pci_epf_test_exit(void)
44720 +       if (kpcitest_workqueue)
44721 +               destroy_workqueue(kpcitest_workqueue);
44722         pci_epf_unregister_driver(&test_driver);
44724  module_exit(pci_epf_test_exit);
44725 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
44726 index 16a17215f633..e4d4e399004b 100644
44727 --- a/drivers/pci/pci.c
44728 +++ b/drivers/pci/pci.c
44729 @@ -1870,20 +1870,10 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
44730         int err;
44731         int i, bars = 0;
44733 -       /*
44734 -        * Power state could be unknown at this point, either due to a fresh
44735 -        * boot or a device removal call.  So get the current power state
44736 -        * so that things like MSI message writing will behave as expected
44737 -        * (e.g. if the device really is in D0 at enable time).
44738 -        */
44739 -       if (dev->pm_cap) {
44740 -               u16 pmcsr;
44741 -               pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
44742 -               dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
44743 -       }
44745 -       if (atomic_inc_return(&dev->enable_cnt) > 1)
44746 +       if (atomic_inc_return(&dev->enable_cnt) > 1) {
44747 +               pci_update_current_state(dev, dev->current_state);
44748                 return 0;               /* already enabled */
44749 +       }
44751         bridge = pci_upstream_bridge(dev);
44752         if (bridge)
44753 diff --git a/drivers/pci/pcie/rcec.c b/drivers/pci/pcie/rcec.c
44754 index 2c5c552994e4..d0bcd141ac9c 100644
44755 --- a/drivers/pci/pcie/rcec.c
44756 +++ b/drivers/pci/pcie/rcec.c
44757 @@ -32,7 +32,7 @@ static bool rcec_assoc_rciep(struct pci_dev *rcec, struct pci_dev *rciep)
44759         /* Same bus, so check bitmap */
44760         for_each_set_bit(devn, &bitmap, 32)
44761 -               if (devn == rciep->devfn)
44762 +               if (devn == PCI_SLOT(rciep->devfn))
44763                         return true;
44765         return false;
44766 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
44767 index 953f15abc850..be51670572fa 100644
44768 --- a/drivers/pci/probe.c
44769 +++ b/drivers/pci/probe.c
44770 @@ -2353,6 +2353,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
44771         pci_set_of_node(dev);
44773         if (pci_setup_device(dev)) {
44774 +               pci_release_of_node(dev);
44775                 pci_bus_put(dev->bus);
44776                 kfree(dev);
44777                 return NULL;
44778 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
44779 index 653660e3ba9e..c87fd7a275e4 100644
44780 --- a/drivers/pci/quirks.c
44781 +++ b/drivers/pci/quirks.c
44782 @@ -3558,6 +3558,106 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
44783         dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
44786 +static bool acs_on_downstream;
44787 +static bool acs_on_multifunction;
44789 +#define NUM_ACS_IDS 16
44790 +struct acs_on_id {
44791 +       unsigned short vendor;
44792 +       unsigned short device;
44794 +static struct acs_on_id acs_on_ids[NUM_ACS_IDS];
44795 +static u8 max_acs_id;
44797 +static __init int pcie_acs_override_setup(char *p)
44799 +       if (!p)
44800 +               return -EINVAL;
44802 +       while (*p) {
44803 +               if (!strncmp(p, "downstream", 10))
44804 +                       acs_on_downstream = true;
44805 +               if (!strncmp(p, "multifunction", 13))
44806 +                       acs_on_multifunction = true;
44807 +               if (!strncmp(p, "id:", 3)) {
44808 +                       char opt[5];
44809 +                       int ret;
44810 +                       long val;
44812 +                       if (max_acs_id >= NUM_ACS_IDS - 1) {
44813 +                               pr_warn("Out of PCIe ACS override slots (%d)\n",
44814 +                                               NUM_ACS_IDS);
44815 +                               goto next;
44816 +                       }
44818 +                       p += 3;
44819 +                       snprintf(opt, 5, "%s", p);
44820 +                       ret = kstrtol(opt, 16, &val);
44821 +                       if (ret) {
44822 +                               pr_warn("PCIe ACS ID parse error %d\n", ret);
44823 +                               goto next;
44824 +                       }
44825 +                       acs_on_ids[max_acs_id].vendor = val;
44827 +                       p += strcspn(p, ":");
44828 +                       if (*p != ':') {
44829 +                               pr_warn("PCIe ACS invalid ID\n");
44830 +                               goto next;
44831 +                       }
44833 +                       p++;
44834 +                       snprintf(opt, 5, "%s", p);
44835 +                       ret = kstrtol(opt, 16, &val);
44836 +                       if (ret) {
44837 +                               pr_warn("PCIe ACS ID parse error %d\n", ret);
44838 +                               goto next;
44839 +                       }
44840 +                       acs_on_ids[max_acs_id].device = val;
44841 +                       max_acs_id++;
44842 +               }
44843 +next:
44844 +               p += strcspn(p, ",");
44845 +               if (*p == ',')
44846 +                       p++;
44847 +       }
44849 +       if (acs_on_downstream || acs_on_multifunction || max_acs_id)
44850 +               pr_warn("Warning: PCIe ACS overrides enabled; This may allow non-IOMMU protected peer-to-peer DMA\n");
44852 +       return 0;
44854 +early_param("pcie_acs_override", pcie_acs_override_setup);
44856 +static int pcie_acs_overrides(struct pci_dev *dev, u16 acs_flags)
44858 +       int i;
44860 +       /* Never override ACS for legacy devices or devices with ACS caps */
44861 +       if (!pci_is_pcie(dev) ||
44862 +               pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS))
44863 +                       return -ENOTTY;
44865 +       for (i = 0; i < max_acs_id; i++)
44866 +               if (acs_on_ids[i].vendor == dev->vendor &&
44867 +                       acs_on_ids[i].device == dev->device)
44868 +                               return 1;
44870 +       switch (pci_pcie_type(dev)) {
44871 +       case PCI_EXP_TYPE_DOWNSTREAM:
44872 +       case PCI_EXP_TYPE_ROOT_PORT:
44873 +               if (acs_on_downstream)
44874 +                       return 1;
44875 +               break;
44876 +       case PCI_EXP_TYPE_ENDPOINT:
44877 +       case PCI_EXP_TYPE_UPSTREAM:
44878 +       case PCI_EXP_TYPE_LEG_END:
44879 +       case PCI_EXP_TYPE_RC_END:
44880 +               if (acs_on_multifunction && dev->multifunction)
44881 +                       return 1;
44882 +       }
44884 +       return -ENOTTY;
44886  /*
44887   * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
44888   * The device will throw a Link Down error on AER-capable systems and
44889 @@ -4773,6 +4873,7 @@ static const struct pci_dev_acs_enabled {
44890         { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
44891         /* Zhaoxin Root/Downstream Ports */
44892         { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
44893 +       { PCI_ANY_ID, PCI_ANY_ID, pcie_acs_overrides },
44894         { 0 }
44895  };
44897 diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
44898 index 7915d10f9aa1..bd549070c011 100644
44899 --- a/drivers/pci/vpd.c
44900 +++ b/drivers/pci/vpd.c
44901 @@ -570,7 +570,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
44902  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
44903  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
44904                 quirk_blacklist_vpd);
44905 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
44906  /*
44907   * The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
44908   * device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
44909 diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
44910 index 933bd8410fc2..ef9676418c9f 100644
44911 --- a/drivers/perf/arm_pmu_platform.c
44912 +++ b/drivers/perf/arm_pmu_platform.c
44913 @@ -6,6 +6,7 @@
44914   * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
44915   */
44916  #define pr_fmt(fmt) "hw perfevents: " fmt
44917 +#define dev_fmt pr_fmt
44919  #include <linux/bug.h>
44920  #include <linux/cpumask.h>
44921 @@ -100,10 +101,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
44922         struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
44924         num_irqs = platform_irq_count(pdev);
44925 -       if (num_irqs < 0) {
44926 -               pr_err("unable to count PMU IRQs\n");
44927 -               return num_irqs;
44928 -       }
44929 +       if (num_irqs < 0)
44930 +               return dev_err_probe(&pdev->dev, num_irqs, "unable to count PMU IRQs\n");
44932         /*
44933          * In this case we have no idea which CPUs are covered by the PMU.
44934 @@ -236,7 +235,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
44936         ret = armpmu_register(pmu);
44937         if (ret)
44938 -               goto out_free;
44939 +               goto out_free_irqs;
44941         return 0;
44943 diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
44944 index 26a0badabe38..19f32ae877b9 100644
44945 --- a/drivers/phy/cadence/phy-cadence-sierra.c
44946 +++ b/drivers/phy/cadence/phy-cadence-sierra.c
44947 @@ -319,6 +319,12 @@ static int cdns_sierra_phy_on(struct phy *gphy)
44948         u32 val;
44949         int ret;
44951 +       ret = reset_control_deassert(sp->phy_rst);
44952 +       if (ret) {
44953 +               dev_err(dev, "Failed to take the PHY out of reset\n");
44954 +               return ret;
44955 +       }
44957         /* Take the PHY lane group out of reset */
44958         ret = reset_control_deassert(ins->lnk_rst);
44959         if (ret) {
44960 @@ -616,7 +622,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
44962         pm_runtime_enable(dev);
44963         phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
44964 -       reset_control_deassert(sp->phy_rst);
44965         return PTR_ERR_OR_ZERO(phy_provider);
44967  put_child:
44968 diff --git a/drivers/phy/ingenic/phy-ingenic-usb.c b/drivers/phy/ingenic/phy-ingenic-usb.c
44969 index ea127b177f46..28c28d816484 100644
44970 --- a/drivers/phy/ingenic/phy-ingenic-usb.c
44971 +++ b/drivers/phy/ingenic/phy-ingenic-usb.c
44972 @@ -352,8 +352,8 @@ static int ingenic_usb_phy_probe(struct platform_device *pdev)
44973         }
44975         priv->phy = devm_phy_create(dev, NULL, &ingenic_usb_phy_ops);
44976 -       if (IS_ERR(priv))
44977 -               return PTR_ERR(priv);
44978 +       if (IS_ERR(priv->phy))
44979 +               return PTR_ERR(priv->phy);
44981         phy_set_drvdata(priv->phy, priv);
44983 diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
44984 index 6c96f2bf5266..c8ee23fc3a83 100644
44985 --- a/drivers/phy/marvell/Kconfig
44986 +++ b/drivers/phy/marvell/Kconfig
44987 @@ -3,8 +3,8 @@
44988  # Phy drivers for Marvell platforms
44990  config ARMADA375_USBCLUSTER_PHY
44991 -       def_bool y
44992 -       depends on MACH_ARMADA_375 || COMPILE_TEST
44993 +       bool "Armada 375 USB cluster PHY support" if COMPILE_TEST
44994 +       default y if MACH_ARMADA_375
44995         depends on OF && HAS_IOMEM
44996         select GENERIC_PHY
44998 diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c
44999 index 9a610b414b1f..753cb5bab930 100644
45000 --- a/drivers/phy/ralink/phy-mt7621-pci.c
45001 +++ b/drivers/phy/ralink/phy-mt7621-pci.c
45002 @@ -62,7 +62,7 @@
45004  #define RG_PE1_FRC_MSTCKDIV                    BIT(5)
45006 -#define XTAL_MASK                              GENMASK(7, 6)
45007 +#define XTAL_MASK                              GENMASK(8, 6)
45009  #define MAX_PHYS       2
45011 @@ -319,9 +319,9 @@ static int mt7621_pci_phy_probe(struct platform_device *pdev)
45012                 return PTR_ERR(phy->regmap);
45014         phy->phy = devm_phy_create(dev, dev->of_node, &mt7621_pci_phy_ops);
45015 -       if (IS_ERR(phy)) {
45016 +       if (IS_ERR(phy->phy)) {
45017                 dev_err(dev, "failed to create phy\n");
45018 -               return PTR_ERR(phy);
45019 +               return PTR_ERR(phy->phy);
45020         }
45022         phy_set_drvdata(phy->phy, phy);
45023 diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
45024 index c9cfafe89cbf..e28e25f98708 100644
45025 --- a/drivers/phy/ti/phy-j721e-wiz.c
45026 +++ b/drivers/phy/ti/phy-j721e-wiz.c
45027 @@ -615,6 +615,12 @@ static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
45028                 of_clk_del_provider(clk_node);
45029                 of_node_put(clk_node);
45030         }
45032 +       for (i = 0; i < wiz->clk_div_sel_num; i++) {
45033 +               clk_node = of_get_child_by_name(node, clk_div_sel[i].node_name);
45034 +               of_clk_del_provider(clk_node);
45035 +               of_node_put(clk_node);
45036 +       }
45039  static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
45040 @@ -947,27 +953,24 @@ static int wiz_probe(struct platform_device *pdev)
45041                 goto err_get_sync;
45042         }
45044 +       ret = wiz_init(wiz);
45045 +       if (ret) {
45046 +               dev_err(dev, "WIZ initialization failed\n");
45047 +               goto err_wiz_init;
45048 +       }
45050         serdes_pdev = of_platform_device_create(child_node, NULL, dev);
45051         if (!serdes_pdev) {
45052                 dev_WARN(dev, "Unable to create SERDES platform device\n");
45053                 ret = -ENOMEM;
45054 -               goto err_pdev_create;
45055 -       }
45056 -       wiz->serdes_pdev = serdes_pdev;
45058 -       ret = wiz_init(wiz);
45059 -       if (ret) {
45060 -               dev_err(dev, "WIZ initialization failed\n");
45061                 goto err_wiz_init;
45062         }
45063 +       wiz->serdes_pdev = serdes_pdev;
45065         of_node_put(child_node);
45066         return 0;
45068  err_wiz_init:
45069 -       of_platform_device_destroy(&serdes_pdev->dev, NULL);
45071 -err_pdev_create:
45072         wiz_clock_cleanup(wiz, node);
45074  err_get_sync:
45075 diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c
45076 index 9887f908f540..812e5409d359 100644
45077 --- a/drivers/phy/ti/phy-twl4030-usb.c
45078 +++ b/drivers/phy/ti/phy-twl4030-usb.c
45079 @@ -779,7 +779,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
45081         usb_remove_phy(&twl->phy);
45082         pm_runtime_get_sync(twl->dev);
45083 -       cancel_delayed_work(&twl->id_workaround_work);
45084 +       cancel_delayed_work_sync(&twl->id_workaround_work);
45085         device_remove_file(twl->dev, &dev_attr_vbus);
45087         /* set transceiver mode to power on defaults */
45088 diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
45089 index e71ebccc479c..03c32b2c5d30 100644
45090 --- a/drivers/pinctrl/pinctrl-at91-pio4.c
45091 +++ b/drivers/pinctrl/pinctrl-at91-pio4.c
45092 @@ -801,6 +801,10 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
45094         conf = atmel_pin_config_read(pctldev, pin_id);
45096 +       /* Keep slew rate enabled by default. */
45097 +       if (atmel_pioctrl->slew_rate_support)
45098 +               conf |= ATMEL_PIO_SR_MASK;
45100         for (i = 0; i < num_configs; i++) {
45101                 unsigned int param = pinconf_to_config_param(configs[i]);
45102                 unsigned int arg = pinconf_to_config_argument(configs[i]);
45103 @@ -808,10 +812,6 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
45104                 dev_dbg(pctldev->dev, "%s: pin=%u, config=0x%lx\n",
45105                         __func__, pin_id, configs[i]);
45107 -               /* Keep slew rate enabled by default. */
45108 -               if (atmel_pioctrl->slew_rate_support)
45109 -                       conf |= ATMEL_PIO_SR_MASK;
45111                 switch (param) {
45112                 case PIN_CONFIG_BIAS_DISABLE:
45113                         conf &= (~ATMEL_PIO_PUEN_MASK);
45114 diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
45115 index f2746125b077..3de0f767b7d1 100644
45116 --- a/drivers/pinctrl/pinctrl-ingenic.c
45117 +++ b/drivers/pinctrl/pinctrl-ingenic.c
45118 @@ -667,7 +667,9 @@ static int jz4770_pwm_pwm7_pins[] = { 0x6b, };
45119  static int jz4770_mac_rmii_pins[] = {
45120         0xa9, 0xab, 0xaa, 0xac, 0xa5, 0xa4, 0xad, 0xae, 0xa6, 0xa8,
45121  };
45122 -static int jz4770_mac_mii_pins[] = { 0xa7, 0xaf, };
45123 +static int jz4770_mac_mii_pins[] = {
45124 +       0x7b, 0x7a, 0x7d, 0x7c, 0xa7, 0x24, 0xaf,
45127  static const struct group_desc jz4770_groups[] = {
45128         INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data, 0),
45129 @@ -2107,26 +2109,48 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
45130         enum pin_config_param param = pinconf_to_config_param(*config);
45131         unsigned int idx = pin % PINS_PER_GPIO_CHIP;
45132         unsigned int offt = pin / PINS_PER_GPIO_CHIP;
45133 -       bool pull;
45134 +       unsigned int bias;
45135 +       bool pull, pullup, pulldown;
45137 -       if (jzpc->info->version >= ID_JZ4770)
45138 -               pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
45139 -       else
45140 -               pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
45141 +       if (jzpc->info->version >= ID_X1830) {
45142 +               unsigned int half = PINS_PER_GPIO_CHIP / 2;
45143 +               unsigned int idxh = (pin % half) * 2;
45145 +               if (idx < half)
45146 +                       regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
45147 +                                       X1830_GPIO_PEL, &bias);
45148 +               else
45149 +                       regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
45150 +                                       X1830_GPIO_PEH, &bias);
45152 +               bias = (bias >> idxh) & (GPIO_PULL_UP | GPIO_PULL_DOWN);
45154 +               pullup = (bias == GPIO_PULL_UP) && (jzpc->info->pull_ups[offt] & BIT(idx));
45155 +               pulldown = (bias == GPIO_PULL_DOWN) && (jzpc->info->pull_downs[offt] & BIT(idx));
45157 +       } else {
45158 +               if (jzpc->info->version >= ID_JZ4770)
45159 +                       pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
45160 +               else
45161 +                       pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
45163 +               pullup = pull && (jzpc->info->pull_ups[offt] & BIT(idx));
45164 +               pulldown = pull && (jzpc->info->pull_downs[offt] & BIT(idx));
45165 +       }
45167         switch (param) {
45168         case PIN_CONFIG_BIAS_DISABLE:
45169 -               if (pull)
45170 +               if (pullup || pulldown)
45171                         return -EINVAL;
45172                 break;
45174         case PIN_CONFIG_BIAS_PULL_UP:
45175 -               if (!pull || !(jzpc->info->pull_ups[offt] & BIT(idx)))
45176 +               if (!pullup)
45177                         return -EINVAL;
45178                 break;
45180         case PIN_CONFIG_BIAS_PULL_DOWN:
45181 -               if (!pull || !(jzpc->info->pull_downs[offt] & BIT(idx)))
45182 +               if (!pulldown)
45183                         return -EINVAL;
45184                 break;
45186 @@ -2144,7 +2168,7 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
45187         if (jzpc->info->version >= ID_X1830) {
45188                 unsigned int idx = pin % PINS_PER_GPIO_CHIP;
45189                 unsigned int half = PINS_PER_GPIO_CHIP / 2;
45190 -               unsigned int idxh = pin % half * 2;
45191 +               unsigned int idxh = (pin % half) * 2;
45192                 unsigned int offt = pin / PINS_PER_GPIO_CHIP;
45194                 if (idx < half) {
45195 diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
45196 index 7771316dfffa..10890fde9a75 100644
45197 --- a/drivers/pinctrl/pinctrl-single.c
45198 +++ b/drivers/pinctrl/pinctrl-single.c
45199 @@ -270,20 +270,44 @@ static void __maybe_unused pcs_writel(unsigned val, void __iomem *reg)
45200         writel(val, reg);
45203 +static unsigned int pcs_pin_reg_offset_get(struct pcs_device *pcs,
45204 +                                          unsigned int pin)
45206 +       unsigned int mux_bytes = pcs->width / BITS_PER_BYTE;
45208 +       if (pcs->bits_per_mux) {
45209 +               unsigned int pin_offset_bytes;
45211 +               pin_offset_bytes = (pcs->bits_per_pin * pin) / BITS_PER_BYTE;
45212 +               return (pin_offset_bytes / mux_bytes) * mux_bytes;
45213 +       }
45215 +       return pin * mux_bytes;
45218 +static unsigned int pcs_pin_shift_reg_get(struct pcs_device *pcs,
45219 +                                         unsigned int pin)
45221 +       return (pin % (pcs->width / pcs->bits_per_pin)) * pcs->bits_per_pin;
45224  static void pcs_pin_dbg_show(struct pinctrl_dev *pctldev,
45225                                         struct seq_file *s,
45226                                         unsigned pin)
45228         struct pcs_device *pcs;
45229 -       unsigned val, mux_bytes;
45230 +       unsigned int val;
45231         unsigned long offset;
45232         size_t pa;
45234         pcs = pinctrl_dev_get_drvdata(pctldev);
45236 -       mux_bytes = pcs->width / BITS_PER_BYTE;
45237 -       offset = pin * mux_bytes;
45238 +       offset = pcs_pin_reg_offset_get(pcs, pin);
45239         val = pcs->read(pcs->base + offset);
45241 +       if (pcs->bits_per_mux)
45242 +               val &= pcs->fmask << pcs_pin_shift_reg_get(pcs, pin);
45244         pa = pcs->res->start + offset;
45246         seq_printf(s, "%zx %08x %s ", pa, val, DRIVER_NAME);
45247 @@ -384,7 +408,6 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
45248         struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
45249         struct pcs_gpiofunc_range *frange = NULL;
45250         struct list_head *pos, *tmp;
45251 -       int mux_bytes = 0;
45252         unsigned data;
45254         /* If function mask is null, return directly. */
45255 @@ -392,29 +415,27 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
45256                 return -ENOTSUPP;
45258         list_for_each_safe(pos, tmp, &pcs->gpiofuncs) {
45259 +               u32 offset;
45261                 frange = list_entry(pos, struct pcs_gpiofunc_range, node);
45262                 if (pin >= frange->offset + frange->npins
45263                         || pin < frange->offset)
45264                         continue;
45265 -               mux_bytes = pcs->width / BITS_PER_BYTE;
45267 -               if (pcs->bits_per_mux) {
45268 -                       int byte_num, offset, pin_shift;
45269 +               offset = pcs_pin_reg_offset_get(pcs, pin);
45271 -                       byte_num = (pcs->bits_per_pin * pin) / BITS_PER_BYTE;
45272 -                       offset = (byte_num / mux_bytes) * mux_bytes;
45273 -                       pin_shift = pin % (pcs->width / pcs->bits_per_pin) *
45274 -                                   pcs->bits_per_pin;
45275 +               if (pcs->bits_per_mux) {
45276 +                       int pin_shift = pcs_pin_shift_reg_get(pcs, pin);
45278                         data = pcs->read(pcs->base + offset);
45279                         data &= ~(pcs->fmask << pin_shift);
45280                         data |= frange->gpiofunc << pin_shift;
45281                         pcs->write(data, pcs->base + offset);
45282                 } else {
45283 -                       data = pcs->read(pcs->base + pin * mux_bytes);
45284 +                       data = pcs->read(pcs->base + offset);
45285                         data &= ~pcs->fmask;
45286                         data |= frange->gpiofunc;
45287 -                       pcs->write(data, pcs->base + pin * mux_bytes);
45288 +                       pcs->write(data, pcs->base + offset);
45289                 }
45290                 break;
45291         }
45292 @@ -656,10 +677,8 @@ static const struct pinconf_ops pcs_pinconf_ops = {
45293   * pcs_add_pin() - add a pin to the static per controller pin array
45294   * @pcs: pcs driver instance
45295   * @offset: register offset from base
45296 - * @pin_pos: unused
45297   */
45298 -static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
45299 -               unsigned pin_pos)
45300 +static int pcs_add_pin(struct pcs_device *pcs, unsigned int offset)
45302         struct pcs_soc_data *pcs_soc = &pcs->socdata;
45303         struct pinctrl_pin_desc *pin;
45304 @@ -728,17 +747,9 @@ static int pcs_allocate_pin_table(struct pcs_device *pcs)
45305         for (i = 0; i < pcs->desc.npins; i++) {
45306                 unsigned offset;
45307                 int res;
45308 -               int byte_num;
45309 -               int pin_pos = 0;
45311 -               if (pcs->bits_per_mux) {
45312 -                       byte_num = (pcs->bits_per_pin * i) / BITS_PER_BYTE;
45313 -                       offset = (byte_num / mux_bytes) * mux_bytes;
45314 -                       pin_pos = i % num_pins_in_register;
45315 -               } else {
45316 -                       offset = i * mux_bytes;
45317 -               }
45318 -               res = pcs_add_pin(pcs, offset, pin_pos);
45319 +               offset = pcs_pin_reg_offset_get(pcs, i);
45320 +               res = pcs_add_pin(pcs, offset);
45321                 if (res < 0) {
45322                         dev_err(pcs->dev, "error adding pins: %i\n", res);
45323                         return res;
45324 diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
45325 index 0cd7f33cdf25..2b99f4130e1e 100644
45326 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c
45327 +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
45328 @@ -55,7 +55,7 @@ static void exynos_irq_mask(struct irq_data *irqd)
45329         struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
45330         struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
45331         unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
45332 -       unsigned long mask;
45333 +       unsigned int mask;
45334         unsigned long flags;
45336         raw_spin_lock_irqsave(&bank->slock, flags);
45337 @@ -83,7 +83,7 @@ static void exynos_irq_unmask(struct irq_data *irqd)
45338         struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
45339         struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
45340         unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
45341 -       unsigned long mask;
45342 +       unsigned int mask;
45343         unsigned long flags;
45345         /*
45346 @@ -483,7 +483,7 @@ static void exynos_irq_eint0_15(struct irq_desc *desc)
45347         chained_irq_exit(chip, desc);
45350 -static inline void exynos_irq_demux_eint(unsigned long pend,
45351 +static inline void exynos_irq_demux_eint(unsigned int pend,
45352                                                 struct irq_domain *domain)
45354         unsigned int irq;
45355 @@ -500,8 +500,8 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
45357         struct irq_chip *chip = irq_desc_get_chip(desc);
45358         struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
45359 -       unsigned long pend;
45360 -       unsigned long mask;
45361 +       unsigned int pend;
45362 +       unsigned int mask;
45363         int i;
45365         chained_irq_enter(chip, desc);
45366 diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
45367 index 5bcb59ed579d..89761d3e1a47 100644
45368 --- a/drivers/platform/surface/aggregator/controller.c
45369 +++ b/drivers/platform/surface/aggregator/controller.c
45370 @@ -1040,7 +1040,7 @@ static int ssam_dsm_load_u32(acpi_handle handle, u64 funcs, u64 func, u32 *ret)
45371         union acpi_object *obj;
45372         u64 val;
45374 -       if (!(funcs & BIT(func)))
45375 +       if (!(funcs & BIT_ULL(func)))
45376                 return 0; /* Not supported, leave *ret at its default value */
45378         obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
45379 diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
45380 index 7410ccae650c..a90ae6ba4a73 100644
45381 --- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
45382 +++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
45383 @@ -399,6 +399,7 @@ static int init_bios_attributes(int attr_type, const char *guid)
45384         union acpi_object *obj = NULL;
45385         union acpi_object *elements;
45386         struct kset *tmp_set;
45387 +       int min_elements;
45389         /* instance_id needs to be reset for each type GUID
45390          * also, instance IDs are unique within GUID but not across
45391 @@ -409,14 +410,38 @@ static int init_bios_attributes(int attr_type, const char *guid)
45392         retval = alloc_attributes_data(attr_type);
45393         if (retval)
45394                 return retval;
45396 +       switch (attr_type) {
45397 +       case ENUM:      min_elements = 8;       break;
45398 +       case INT:       min_elements = 9;       break;
45399 +       case STR:       min_elements = 8;       break;
45400 +       case PO:        min_elements = 4;       break;
45401 +       default:
45402 +               pr_err("Error: Unknown attr_type: %d\n", attr_type);
45403 +               return -EINVAL;
45404 +       }
45406         /* need to use specific instance_id and guid combination to get right data */
45407         obj = get_wmiobj_pointer(instance_id, guid);
45408 -       if (!obj || obj->type != ACPI_TYPE_PACKAGE)
45409 +       if (!obj)
45410                 return -ENODEV;
45411 -       elements = obj->package.elements;
45413         mutex_lock(&wmi_priv.mutex);
45414 -       while (elements) {
45415 +       while (obj) {
45416 +               if (obj->type != ACPI_TYPE_PACKAGE) {
45417 +                       pr_err("Error: Expected ACPI-package type, got: %d\n", obj->type);
45418 +                       retval = -EIO;
45419 +                       goto err_attr_init;
45420 +               }
45422 +               if (obj->package.count < min_elements) {
45423 +                       pr_err("Error: ACPI-package does not have enough elements: %d < %d\n",
45424 +                              obj->package.count, min_elements);
45425 +                       goto nextobj;
45426 +               }
45428 +               elements = obj->package.elements;
45430                 /* sanity checking */
45431                 if (elements[ATTR_NAME].type != ACPI_TYPE_STRING) {
45432                         pr_debug("incorrect element type\n");
45433 @@ -481,7 +506,6 @@ static int init_bios_attributes(int attr_type, const char *guid)
45434                 kfree(obj);
45435                 instance_id++;
45436                 obj = get_wmiobj_pointer(instance_id, guid);
45437 -               elements = obj ? obj->package.elements : NULL;
45438         }
45440         mutex_unlock(&wmi_priv.mutex);
45441 diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
45442 index bffe548187ee..c2918ee3e100 100644
45443 --- a/drivers/platform/x86/intel_ips.c
45444 +++ b/drivers/platform/x86/intel_ips.c
45445 @@ -798,7 +798,7 @@ static int ips_adjust(void *data)
45446                         ips_gpu_lower(ips);
45448  sleep:
45449 -               schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
45450 +               schedule_msec_hrtimeout_interruptible((IPS_ADJUST_PERIOD));
45451         } while (!kthread_should_stop());
45453         dev_dbg(ips->dev, "ips-adjust thread stopped\n");
45454 @@ -974,7 +974,7 @@ static int ips_monitor(void *data)
45455         seqno_timestamp = get_jiffies_64();
45457         old_cpu_power = thm_readl(THM_CEC);
45458 -       schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
45459 +       schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
45461         /* Collect an initial average */
45462         for (i = 0; i < IPS_SAMPLE_COUNT; i++) {
45463 @@ -1001,7 +1001,7 @@ static int ips_monitor(void *data)
45464                         mchp_samples[i] = mchp;
45465                 }
45467 -               schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
45468 +               schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
45469                 if (kthread_should_stop())
45470                         break;
45471         }
45472 @@ -1028,7 +1028,7 @@ static int ips_monitor(void *data)
45473          * us to reduce the sample frequency if the CPU and GPU are idle.
45474          */
45475         old_cpu_power = thm_readl(THM_CEC);
45476 -       schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
45477 +       schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
45478         last_sample_period = IPS_SAMPLE_PERIOD;
45480         timer_setup(&ips->timer, monitor_timeout, TIMER_DEFERRABLE);
45481 diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
45482 index b5888aeb4bcf..260d49dca1ad 100644
45483 --- a/drivers/platform/x86/intel_pmc_core.c
45484 +++ b/drivers/platform/x86/intel_pmc_core.c
45485 @@ -1186,9 +1186,15 @@ static const struct pci_device_id pmc_pci_ids[] = {
45486   * the platform BIOS enforces 24Mhz crystal to shutdown
45487   * before PMC can assert SLP_S0#.
45488   */
45489 +static bool xtal_ignore;
45490  static int quirk_xtal_ignore(const struct dmi_system_id *id)
45492 -       struct pmc_dev *pmcdev = &pmc;
45493 +       xtal_ignore = true;
45494 +       return 0;
45497 +static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev)
45499         u32 value;
45501         value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
45502 @@ -1197,7 +1203,6 @@ static int quirk_xtal_ignore(const struct dmi_system_id *id)
45503         /* Low Voltage Mode Enable */
45504         value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
45505         pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
45506 -       return 0;
45509  static const struct dmi_system_id pmc_core_dmi_table[]  = {
45510 @@ -1212,6 +1217,14 @@ static const struct dmi_system_id pmc_core_dmi_table[]  = {
45511         {}
45512  };
45514 +static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev)
45516 +       dmi_check_system(pmc_core_dmi_table);
45518 +       if (xtal_ignore)
45519 +               pmc_core_xtal_ignore(pmcdev);
45522  static int pmc_core_probe(struct platform_device *pdev)
45524         static bool device_initialized;
45525 @@ -1253,7 +1266,7 @@ static int pmc_core_probe(struct platform_device *pdev)
45526         mutex_init(&pmcdev->lock);
45527         platform_set_drvdata(pdev, pmcdev);
45528         pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
45529 -       dmi_check_system(pmc_core_dmi_table);
45530 +       pmc_core_do_dmi_quirks(pmcdev);
45532         /*
45533          * On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when
45534 diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
45535 index a2a2d923e60c..df1fc6c719f3 100644
45536 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
45537 +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
45538 @@ -21,12 +21,16 @@
45539  #define PUNIT_MAILBOX_BUSY_BIT         31
45541  /*
45542 - * The average time to complete some commands is about 40us. The current
45543 - * count is enough to satisfy 40us. But when the firmware is very busy, this
45544 - * causes timeout occasionally.  So increase to deal with some worst case
45545 - * scenarios. Most of the command still complete in few us.
45546 + * The average time to complete mailbox commands is less than 40us. Most of
45547 + * the commands complete in few micro seconds. But the same firmware handles
45548 + * requests from all power management features.
45549 + * We can create a scenario where we flood the firmware with requests then
45550 + * the mailbox response can be delayed for 100s of micro seconds. So define
45551 + * two timeouts. One for average case and one for long.
45552 + * If the firmware is taking more than average, just call cond_resched().
45553   */
45554 -#define OS_MAILBOX_RETRY_COUNT         100
45555 +#define OS_MAILBOX_TIMEOUT_AVG_US      40
45556 +#define OS_MAILBOX_TIMEOUT_MAX_US      1000
45558  struct isst_if_device {
45559         struct mutex mutex;
45560 @@ -35,11 +39,13 @@ struct isst_if_device {
45561  static int isst_if_mbox_cmd(struct pci_dev *pdev,
45562                             struct isst_if_mbox_cmd *mbox_cmd)
45564 -       u32 retries, data;
45565 +       s64 tm_delta = 0;
45566 +       ktime_t tm;
45567 +       u32 data;
45568         int ret;
45570         /* Poll for rb bit == 0 */
45571 -       retries = OS_MAILBOX_RETRY_COUNT;
45572 +       tm = ktime_get();
45573         do {
45574                 ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
45575                                             &data);
45576 @@ -48,11 +54,14 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
45578                 if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
45579                         ret = -EBUSY;
45580 +                       tm_delta = ktime_us_delta(ktime_get(), tm);
45581 +                       if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
45582 +                               cond_resched();
45583                         continue;
45584                 }
45585                 ret = 0;
45586                 break;
45587 -       } while (--retries);
45588 +       } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
45590         if (ret)
45591                 return ret;
45592 @@ -74,7 +83,8 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
45593                 return ret;
45595         /* Poll for rb bit == 0 */
45596 -       retries = OS_MAILBOX_RETRY_COUNT;
45597 +       tm_delta = 0;
45598 +       tm = ktime_get();
45599         do {
45600                 ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
45601                                             &data);
45602 @@ -83,6 +93,9 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
45604                 if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
45605                         ret = -EBUSY;
45606 +                       tm_delta = ktime_us_delta(ktime_get(), tm);
45607 +                       if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
45608 +                               cond_resched();
45609                         continue;
45610                 }
45612 @@ -96,7 +109,7 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
45613                 mbox_cmd->resp_data = data;
45614                 ret = 0;
45615                 break;
45616 -       } while (--retries);
45617 +       } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
45619         return ret;
45621 diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
45622 index ca684ed760d1..a9d2a4b98e57 100644
45623 --- a/drivers/platform/x86/pmc_atom.c
45624 +++ b/drivers/platform/x86/pmc_atom.c
45625 @@ -393,34 +393,10 @@ static const struct dmi_system_id critclk_systems[] = {
45626         },
45627         {
45628                 /* pmc_plt_clk* - are used for ethernet controllers */
45629 -               .ident = "Beckhoff CB3163",
45630 +               .ident = "Beckhoff Baytrail",
45631                 .matches = {
45632                         DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
45633 -                       DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
45634 -               },
45635 -       },
45636 -       {
45637 -               /* pmc_plt_clk* - are used for ethernet controllers */
45638 -               .ident = "Beckhoff CB4063",
45639 -               .matches = {
45640 -                       DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
45641 -                       DMI_MATCH(DMI_BOARD_NAME, "CB4063"),
45642 -               },
45643 -       },
45644 -       {
45645 -               /* pmc_plt_clk* - are used for ethernet controllers */
45646 -               .ident = "Beckhoff CB6263",
45647 -               .matches = {
45648 -                       DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
45649 -                       DMI_MATCH(DMI_BOARD_NAME, "CB6263"),
45650 -               },
45651 -       },
45652 -       {
45653 -               /* pmc_plt_clk* - are used for ethernet controllers */
45654 -               .ident = "Beckhoff CB6363",
45655 -               .matches = {
45656 -                       DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
45657 -                       DMI_MATCH(DMI_BOARD_NAME, "CB6363"),
45658 +                       DMI_MATCH(DMI_PRODUCT_FAMILY, "CBxx63"),
45659                 },
45660         },
45661         {
45662 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
45663 index 0d9e2ddbf904..61f1c91c62de 100644
45664 --- a/drivers/platform/x86/thinkpad_acpi.c
45665 +++ b/drivers/platform/x86/thinkpad_acpi.c
45666 @@ -6260,6 +6260,7 @@ enum thermal_access_mode {
45667  enum { /* TPACPI_THERMAL_TPEC_* */
45668         TP_EC_THERMAL_TMP0 = 0x78,      /* ACPI EC regs TMP 0..7 */
45669         TP_EC_THERMAL_TMP8 = 0xC0,      /* ACPI EC regs TMP 8..15 */
45670 +       TP_EC_FUNCREV      = 0xEF,      /* ACPI EC Functional revision */
45671         TP_EC_THERMAL_TMP_NA = -128,    /* ACPI EC sensor not available */
45673         TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */
45674 @@ -6458,7 +6459,7 @@ static const struct attribute_group thermal_temp_input8_group = {
45676  static int __init thermal_init(struct ibm_init_struct *iibm)
45678 -       u8 t, ta1, ta2;
45679 +       u8 t, ta1, ta2, ver = 0;
45680         int i;
45681         int acpi_tmp7;
45682         int res;
45683 @@ -6473,7 +6474,14 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
45684                  * 0x78-0x7F, 0xC0-0xC7.  Registers return 0x00 for
45685                  * non-implemented, thermal sensors return 0x80 when
45686                  * not available
45687 +                * The above rule is unfortunately flawed. This has been seen with
45688 +                * 0xC2 (power supply ID) causing thermal control problems.
45689 +                * The EC version can be determined by offset 0xEF and at least for
45690 +                * version 3 the Lenovo firmware team confirmed that registers 0xC0-0xC7
45691 +                * are not thermal registers.
45692                  */
45693 +               if (!acpi_ec_read(TP_EC_FUNCREV, &ver))
45694 +                       pr_warn("Thinkpad ACPI EC unable to access EC version\n");
45696                 ta1 = ta2 = 0;
45697                 for (i = 0; i < 8; i++) {
45698 @@ -6483,11 +6491,13 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
45699                                 ta1 = 0;
45700                                 break;
45701                         }
45702 -                       if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
45703 -                               ta2 |= t;
45704 -                       } else {
45705 -                               ta1 = 0;
45706 -                               break;
45707 +                       if (ver < 3) {
45708 +                               if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
45709 +                                       ta2 |= t;
45710 +                               } else {
45711 +                                       ta1 = 0;
45712 +                                       break;
45713 +                               }
45714                         }
45715                 }
45716                 if (ta1 == 0) {
45717 @@ -6500,9 +6510,12 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
45718                                 thermal_read_mode = TPACPI_THERMAL_NONE;
45719                         }
45720                 } else {
45721 -                       thermal_read_mode =
45722 -                           (ta2 != 0) ?
45723 -                           TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
45724 +                       if (ver >= 3)
45725 +                               thermal_read_mode = TPACPI_THERMAL_TPEC_8;
45726 +                       else
45727 +                               thermal_read_mode =
45728 +                                       (ta2 != 0) ?
45729 +                                       TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
45730                 }
45731         } else if (acpi_tmp7) {
45732                 if (tpacpi_is_ibm() &&
45733 diff --git a/drivers/power/supply/bq25980_charger.c b/drivers/power/supply/bq25980_charger.c
45734 index 530ff4025b31..0008c229fd9c 100644
45735 --- a/drivers/power/supply/bq25980_charger.c
45736 +++ b/drivers/power/supply/bq25980_charger.c
45737 @@ -606,33 +606,6 @@ static int bq25980_get_state(struct bq25980_device *bq,
45738         return 0;
45741 -static int bq25980_set_battery_property(struct power_supply *psy,
45742 -                               enum power_supply_property psp,
45743 -                               const union power_supply_propval *val)
45745 -       struct bq25980_device *bq = power_supply_get_drvdata(psy);
45746 -       int ret = 0;
45748 -       switch (psp) {
45749 -       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
45750 -               ret = bq25980_set_const_charge_curr(bq, val->intval);
45751 -               if (ret)
45752 -                       return ret;
45753 -               break;
45755 -       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
45756 -               ret = bq25980_set_const_charge_volt(bq, val->intval);
45757 -               if (ret)
45758 -                       return ret;
45759 -               break;
45761 -       default:
45762 -               return -EINVAL;
45763 -       }
45765 -       return ret;
45768  static int bq25980_get_battery_property(struct power_supply *psy,
45769                                 enum power_supply_property psp,
45770                                 union power_supply_propval *val)
45771 @@ -701,6 +674,18 @@ static int bq25980_set_charger_property(struct power_supply *psy,
45772                         return ret;
45773                 break;
45775 +       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
45776 +               ret = bq25980_set_const_charge_curr(bq, val->intval);
45777 +               if (ret)
45778 +                       return ret;
45779 +               break;
45781 +       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
45782 +               ret = bq25980_set_const_charge_volt(bq, val->intval);
45783 +               if (ret)
45784 +                       return ret;
45785 +               break;
45787         default:
45788                 return -EINVAL;
45789         }
45790 @@ -922,7 +907,6 @@ static struct power_supply_desc bq25980_battery_desc = {
45791         .name                   = "bq25980-battery",
45792         .type                   = POWER_SUPPLY_TYPE_BATTERY,
45793         .get_property           = bq25980_get_battery_property,
45794 -       .set_property           = bq25980_set_battery_property,
45795         .properties             = bq25980_battery_props,
45796         .num_properties         = ARRAY_SIZE(bq25980_battery_props),
45797         .property_is_writeable  = bq25980_property_is_writeable,
45798 diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
45799 index 4c4a7b1c64c5..20e1dc8a87cf 100644
45800 --- a/drivers/power/supply/bq27xxx_battery.c
45801 +++ b/drivers/power/supply/bq27xxx_battery.c
45802 @@ -1661,27 +1661,6 @@ static int bq27xxx_battery_read_time(struct bq27xxx_device_info *di, u8 reg)
45803         return tval * 60;
45807 - * Read an average power register.
45808 - * Return < 0 if something fails.
45809 - */
45810 -static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di)
45812 -       int tval;
45814 -       tval = bq27xxx_read(di, BQ27XXX_REG_AP, false);
45815 -       if (tval < 0) {
45816 -               dev_err(di->dev, "error reading average power register  %02x: %d\n",
45817 -                       BQ27XXX_REG_AP, tval);
45818 -               return tval;
45819 -       }
45821 -       if (di->opts & BQ27XXX_O_ZERO)
45822 -               return (tval * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
45823 -       else
45824 -               return tval;
45827  /*
45828   * Returns true if a battery over temperature condition is detected
45829   */
45830 @@ -1769,8 +1748,6 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di)
45831                 }
45832                 if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
45833                         cache.cycle_count = bq27xxx_battery_read_cyct(di);
45834 -               if (di->regs[BQ27XXX_REG_AP] != INVALID_REG_ADDR)
45835 -                       cache.power_avg = bq27xxx_battery_read_pwr_avg(di);
45837                 /* We only have to read charge design full once */
45838                 if (di->charge_design_full <= 0)
45839 @@ -1827,9 +1804,35 @@ static int bq27xxx_battery_current(struct bq27xxx_device_info *di,
45840                 val->intval = curr * BQ27XXX_CURRENT_CONSTANT / BQ27XXX_RS;
45841         } else {
45842                 /* Other gauges return signed value */
45843 -               val->intval = -(int)((s16)curr) * 1000;
45844 +               val->intval = (int)((s16)curr) * 1000;
45845 +       }
45847 +       return 0;
45851 + * Get the average power in µW
45852 + * Return < 0 if something fails.
45853 + */
45854 +static int bq27xxx_battery_pwr_avg(struct bq27xxx_device_info *di,
45855 +                                  union power_supply_propval *val)
45857 +       int power;
45859 +       power = bq27xxx_read(di, BQ27XXX_REG_AP, false);
45860 +       if (power < 0) {
45861 +               dev_err(di->dev,
45862 +                       "error reading average power register %02x: %d\n",
45863 +                       BQ27XXX_REG_AP, power);
45864 +               return power;
45865         }
45867 +       if (di->opts & BQ27XXX_O_ZERO)
45868 +               val->intval = (power * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
45869 +       else
45870 +               /* Other gauges return a signed value in units of 10mW */
45871 +               val->intval = (int)((s16)power) * 10000;
45873         return 0;
45876 @@ -2020,7 +2023,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
45877                 ret = bq27xxx_simple_value(di->cache.energy, val);
45878                 break;
45879         case POWER_SUPPLY_PROP_POWER_AVG:
45880 -               ret = bq27xxx_simple_value(di->cache.power_avg, val);
45881 +               ret = bq27xxx_battery_pwr_avg(di, val);
45882                 break;
45883         case POWER_SUPPLY_PROP_HEALTH:
45884                 ret = bq27xxx_simple_value(di->cache.health, val);
45885 diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
45886 index 6d5bcdb9f45d..a3fc0084cda0 100644
45887 --- a/drivers/power/supply/cpcap-battery.c
45888 +++ b/drivers/power/supply/cpcap-battery.c
45889 @@ -786,7 +786,7 @@ static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
45890                         break;
45891         }
45893 -       if (!d)
45894 +       if (list_entry_is_head(d, &ddata->irq_list, node))
45895                 return IRQ_NONE;
45897         latest = cpcap_battery_latest(ddata);
45898 diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
45899 index 641dcad1133f..2a8915c3e73e 100644
45900 --- a/drivers/power/supply/cpcap-charger.c
45901 +++ b/drivers/power/supply/cpcap-charger.c
45902 @@ -318,7 +318,7 @@ static int cpcap_charger_current_to_regval(int microamp)
45903                 return CPCAP_REG_CRM_ICHRG(0x0);
45904         if (miliamp < 177)
45905                 return CPCAP_REG_CRM_ICHRG(0x1);
45906 -       if (miliamp > 1596)
45907 +       if (miliamp >= 1596)
45908                 return CPCAP_REG_CRM_ICHRG(0xe);
45910         res = microamp / 88666;
45911 @@ -668,6 +668,9 @@ static void cpcap_usb_detect(struct work_struct *work)
45912                 return;
45913         }
45915 +       /* Delay for 80ms to avoid vbus bouncing when usb cable is plugged in */
45916 +       usleep_range(80000, 120000);
45918         /* Throttle chrgcurr2 interrupt for charger done and retry */
45919         switch (ddata->status) {
45920         case POWER_SUPPLY_STATUS_CHARGING:
45921 diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
45922 index 0032069fbc2b..66039c665dd1 100644
45923 --- a/drivers/power/supply/generic-adc-battery.c
45924 +++ b/drivers/power/supply/generic-adc-battery.c
45925 @@ -373,7 +373,7 @@ static int gab_remove(struct platform_device *pdev)
45926         }
45928         kfree(adc_bat->psy_desc.properties);
45929 -       cancel_delayed_work(&adc_bat->bat_work);
45930 +       cancel_delayed_work_sync(&adc_bat->bat_work);
45931         return 0;
45934 diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
45935 index e7931ffb7151..397e5a03b7d9 100644
45936 --- a/drivers/power/supply/lp8788-charger.c
45937 +++ b/drivers/power/supply/lp8788-charger.c
45938 @@ -501,7 +501,7 @@ static int lp8788_set_irqs(struct platform_device *pdev,
45940                 ret = request_threaded_irq(virq, NULL,
45941                                         lp8788_charger_irq_thread,
45942 -                                       0, name, pchg);
45943 +                                       IRQF_ONESHOT, name, pchg);
45944                 if (ret)
45945                         break;
45946         }
45947 diff --git a/drivers/power/supply/pm2301_charger.c b/drivers/power/supply/pm2301_charger.c
45948 index ac06ecf7fc9c..a3bfb9612b17 100644
45949 --- a/drivers/power/supply/pm2301_charger.c
45950 +++ b/drivers/power/supply/pm2301_charger.c
45951 @@ -1089,7 +1089,7 @@ static int pm2xxx_wall_charger_probe(struct i2c_client *i2c_client,
45952         ret = request_threaded_irq(gpio_to_irq(pm2->pdata->gpio_irq_number),
45953                                 NULL,
45954                                 pm2xxx_charger_irq[0].isr,
45955 -                               pm2->pdata->irq_type,
45956 +                               pm2->pdata->irq_type | IRQF_ONESHOT,
45957                                 pm2xxx_charger_irq[0].name, pm2);
45959         if (ret != 0) {
45960 diff --git a/drivers/power/supply/s3c_adc_battery.c b/drivers/power/supply/s3c_adc_battery.c
45961 index a2addc24ee8b..3e3a598f114d 100644
45962 --- a/drivers/power/supply/s3c_adc_battery.c
45963 +++ b/drivers/power/supply/s3c_adc_battery.c
45964 @@ -395,7 +395,7 @@ static int s3c_adc_bat_remove(struct platform_device *pdev)
45965         if (main_bat.charge_finished)
45966                 free_irq(gpiod_to_irq(main_bat.charge_finished), NULL);
45968 -       cancel_delayed_work(&bat_work);
45969 +       cancel_delayed_work_sync(&bat_work);
45971         if (pdata->exit)
45972                 pdata->exit();
45973 diff --git a/drivers/power/supply/tps65090-charger.c b/drivers/power/supply/tps65090-charger.c
45974 index 6b0098e5a88b..0990b2fa6cd8 100644
45975 --- a/drivers/power/supply/tps65090-charger.c
45976 +++ b/drivers/power/supply/tps65090-charger.c
45977 @@ -301,7 +301,7 @@ static int tps65090_charger_probe(struct platform_device *pdev)
45979         if (irq != -ENXIO) {
45980                 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
45981 -                       tps65090_charger_isr, 0, "tps65090-charger", cdata);
45982 +                       tps65090_charger_isr, IRQF_ONESHOT, "tps65090-charger", cdata);
45983                 if (ret) {
45984                         dev_err(cdata->dev,
45985                                 "Unable to register irq %d err %d\n", irq,
45986 diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c
45987 index 814c2b81fdfe..ba33d1617e0b 100644
45988 --- a/drivers/power/supply/tps65217_charger.c
45989 +++ b/drivers/power/supply/tps65217_charger.c
45990 @@ -238,7 +238,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
45991         for (i = 0; i < NUM_CHARGER_IRQS; i++) {
45992                 ret = devm_request_threaded_irq(&pdev->dev, irq[i], NULL,
45993                                                 tps65217_charger_irq,
45994 -                                               0, "tps65217-charger",
45995 +                                               IRQF_ONESHOT, "tps65217-charger",
45996                                                 charger);
45997                 if (ret) {
45998                         dev_err(charger->dev,
45999 diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
46000 index fdda2a737186..58ecdad26cca 100644
46001 --- a/drivers/powercap/intel_rapl_common.c
46002 +++ b/drivers/powercap/intel_rapl_common.c
46003 @@ -1454,7 +1454,7 @@ static int __init rapl_init(void)
46005         id = x86_match_cpu(rapl_ids);
46006         if (!id) {
46007 -               pr_err("driver does not support CPU family %d model %d\n",
46008 +               pr_info("driver does not support CPU family %d model %d\n",
46009                        boot_cpu_data.x86, boot_cpu_data.x86_model);
46011                 return -ENODEV;
46012 diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
46013 index 5813339b597b..3292158157b6 100644
46014 --- a/drivers/pwm/pwm-atmel.c
46015 +++ b/drivers/pwm/pwm-atmel.c
46016 @@ -319,7 +319,7 @@ static void atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
46018                 cdty = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
46019                                           atmel_pwm->data->regs.duty);
46020 -               tmp = (u64)cdty * NSEC_PER_SEC;
46021 +               tmp = (u64)(cprd - cdty) * NSEC_PER_SEC;
46022                 tmp <<= pres;
46023                 state->duty_cycle = DIV64_U64_ROUND_UP(tmp, rate);
46025 diff --git a/drivers/regulator/bd9576-regulator.c b/drivers/regulator/bd9576-regulator.c
46026 index a8b5832a5a1b..204a2da054f5 100644
46027 --- a/drivers/regulator/bd9576-regulator.c
46028 +++ b/drivers/regulator/bd9576-regulator.c
46029 @@ -206,7 +206,7 @@ static int bd957x_probe(struct platform_device *pdev)
46031         struct regmap *regmap;
46032         struct regulator_config config = { 0 };
46033 -       int i, err;
46034 +       int i;
46035         bool vout_mode, ddr_sel;
46036         const struct bd957x_regulator_data *reg_data = &bd9576_regulators[0];
46037         unsigned int num_reg_data = ARRAY_SIZE(bd9576_regulators);
46038 @@ -279,8 +279,7 @@ static int bd957x_probe(struct platform_device *pdev)
46039                 break;
46040         default:
46041                 dev_err(&pdev->dev, "Unsupported chip type\n");
46042 -               err = -EINVAL;
46043 -               goto err;
46044 +               return -EINVAL;
46045         }
46047         config.dev = pdev->dev.parent;
46048 @@ -300,8 +299,7 @@ static int bd957x_probe(struct platform_device *pdev)
46049                         dev_err(&pdev->dev,
46050                                 "failed to register %s regulator\n",
46051                                 desc->name);
46052 -                       err = PTR_ERR(rdev);
46053 -                       goto err;
46054 +                       return PTR_ERR(rdev);
46055                 }
46056                 /*
46057                  * Clear the VOUT1 GPIO setting - rest of the regulators do not
46058 @@ -310,8 +308,7 @@ static int bd957x_probe(struct platform_device *pdev)
46059                 config.ena_gpiod = NULL;
46060         }
46062 -err:
46063 -       return err;
46064 +       return 0;
46067  static const struct platform_device_id bd957x_pmic_id[] = {
46068 diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
46069 index a2ede7d7897e..08cbf688e14d 100644
46070 --- a/drivers/regulator/da9121-regulator.c
46071 +++ b/drivers/regulator/da9121-regulator.c
46072 @@ -40,6 +40,7 @@ struct da9121 {
46073         unsigned int passive_delay;
46074         int chip_irq;
46075         int variant_id;
46076 +       int subvariant_id;
46077  };
46079  /* Define ranges for different variants, enabling translation to/from
46080 @@ -812,7 +813,6 @@ static struct regmap_config da9121_2ch_regmap_config = {
46081  static int da9121_check_device_type(struct i2c_client *i2c, struct da9121 *chip)
46083         u32 device_id;
46084 -       u8 chip_id = chip->variant_id;
46085         u32 variant_id;
46086         u8 variant_mrc, variant_vrc;
46087         char *type;
46088 @@ -839,22 +839,34 @@ static int da9121_check_device_type(struct i2c_client *i2c, struct da9121 *chip)
46090         variant_vrc = variant_id & DA9121_MASK_OTP_VARIANT_ID_VRC;
46092 -       switch (variant_vrc) {
46093 -       case DA9121_VARIANT_VRC:
46094 -               type = "DA9121/DA9130";
46095 -               config_match = (chip_id == DA9121_TYPE_DA9121_DA9130);
46096 +       switch (chip->subvariant_id) {
46097 +       case DA9121_SUBTYPE_DA9121:
46098 +               type = "DA9121";
46099 +               config_match = (variant_vrc == DA9121_VARIANT_VRC);
46100                 break;
46101 -       case DA9220_VARIANT_VRC:
46102 -               type = "DA9220/DA9132";
46103 -               config_match = (chip_id == DA9121_TYPE_DA9220_DA9132);
46104 +       case DA9121_SUBTYPE_DA9130:
46105 +               type = "DA9130";
46106 +               config_match = (variant_vrc == DA9130_VARIANT_VRC);
46107                 break;
46108 -       case DA9122_VARIANT_VRC:
46109 -               type = "DA9122/DA9131";
46110 -               config_match = (chip_id == DA9121_TYPE_DA9122_DA9131);
46111 +       case DA9121_SUBTYPE_DA9220:
46112 +               type = "DA9220";
46113 +               config_match = (variant_vrc == DA9220_VARIANT_VRC);
46114                 break;
46115 -       case DA9217_VARIANT_VRC:
46116 +       case DA9121_SUBTYPE_DA9132:
46117 +               type = "DA9132";
46118 +               config_match = (variant_vrc == DA9132_VARIANT_VRC);
46119 +               break;
46120 +       case DA9121_SUBTYPE_DA9122:
46121 +               type = "DA9122";
46122 +               config_match = (variant_vrc == DA9122_VARIANT_VRC);
46123 +               break;
46124 +       case DA9121_SUBTYPE_DA9131:
46125 +               type = "DA9131";
46126 +               config_match = (variant_vrc == DA9131_VARIANT_VRC);
46127 +               break;
46128 +       case DA9121_SUBTYPE_DA9217:
46129                 type = "DA9217";
46130 -               config_match = (chip_id == DA9121_TYPE_DA9217);
46131 +               config_match = (variant_vrc == DA9217_VARIANT_VRC);
46132                 break;
46133         default:
46134                 type = "Unknown";
46135 @@ -892,15 +904,27 @@ static int da9121_assign_chip_model(struct i2c_client *i2c,
46137         chip->dev = &i2c->dev;
46139 -       switch (chip->variant_id) {
46140 -       case DA9121_TYPE_DA9121_DA9130:
46141 -               fallthrough;
46142 -       case DA9121_TYPE_DA9217:
46143 +       /* Use configured subtype to select the regulator descriptor index and
46144 +        * register map, common to both consumer and automotive grade variants
46145 +        */
46146 +       switch (chip->subvariant_id) {
46147 +       case DA9121_SUBTYPE_DA9121:
46148 +       case DA9121_SUBTYPE_DA9130:
46149 +               chip->variant_id = DA9121_TYPE_DA9121_DA9130;
46150                 regmap = &da9121_1ch_regmap_config;
46151                 break;
46152 -       case DA9121_TYPE_DA9122_DA9131:
46153 -               fallthrough;
46154 -       case DA9121_TYPE_DA9220_DA9132:
46155 +       case DA9121_SUBTYPE_DA9217:
46156 +               chip->variant_id = DA9121_TYPE_DA9217;
46157 +               regmap = &da9121_1ch_regmap_config;
46158 +               break;
46159 +       case DA9121_SUBTYPE_DA9122:
46160 +       case DA9121_SUBTYPE_DA9131:
46161 +               chip->variant_id = DA9121_TYPE_DA9122_DA9131;
46162 +               regmap = &da9121_2ch_regmap_config;
46163 +               break;
46164 +       case DA9121_SUBTYPE_DA9220:
46165 +       case DA9121_SUBTYPE_DA9132:
46166 +               chip->variant_id = DA9121_TYPE_DA9220_DA9132;
46167                 regmap = &da9121_2ch_regmap_config;
46168                 break;
46169         }
46170 @@ -975,13 +999,13 @@ static int da9121_config_irq(struct i2c_client *i2c,
46173  static const struct of_device_id da9121_dt_ids[] = {
46174 -       { .compatible = "dlg,da9121", .data = (void *) DA9121_TYPE_DA9121_DA9130 },
46175 -       { .compatible = "dlg,da9130", .data = (void *) DA9121_TYPE_DA9121_DA9130 },
46176 -       { .compatible = "dlg,da9217", .data = (void *) DA9121_TYPE_DA9217 },
46177 -       { .compatible = "dlg,da9122", .data = (void *) DA9121_TYPE_DA9122_DA9131 },
46178 -       { .compatible = "dlg,da9131", .data = (void *) DA9121_TYPE_DA9122_DA9131 },
46179 -       { .compatible = "dlg,da9220", .data = (void *) DA9121_TYPE_DA9220_DA9132 },
46180 -       { .compatible = "dlg,da9132", .data = (void *) DA9121_TYPE_DA9220_DA9132 },
46181 +       { .compatible = "dlg,da9121", .data = (void *) DA9121_SUBTYPE_DA9121 },
46182 +       { .compatible = "dlg,da9130", .data = (void *) DA9121_SUBTYPE_DA9130 },
46183 +       { .compatible = "dlg,da9217", .data = (void *) DA9121_SUBTYPE_DA9217 },
46184 +       { .compatible = "dlg,da9122", .data = (void *) DA9121_SUBTYPE_DA9122 },
46185 +       { .compatible = "dlg,da9131", .data = (void *) DA9121_SUBTYPE_DA9131 },
46186 +       { .compatible = "dlg,da9220", .data = (void *) DA9121_SUBTYPE_DA9220 },
46187 +       { .compatible = "dlg,da9132", .data = (void *) DA9121_SUBTYPE_DA9132 },
46188         { }
46189  };
46190  MODULE_DEVICE_TABLE(of, da9121_dt_ids);
46191 @@ -1011,7 +1035,7 @@ static int da9121_i2c_probe(struct i2c_client *i2c,
46192         }
46194         chip->pdata = i2c->dev.platform_data;
46195 -       chip->variant_id = da9121_of_get_id(&i2c->dev);
46196 +       chip->subvariant_id = da9121_of_get_id(&i2c->dev);
46198         ret = da9121_assign_chip_model(i2c, chip);
46199         if (ret < 0)
46200 diff --git a/drivers/regulator/da9121-regulator.h b/drivers/regulator/da9121-regulator.h
46201 index 3c34cb889ca8..357f416e17c1 100644
46202 --- a/drivers/regulator/da9121-regulator.h
46203 +++ b/drivers/regulator/da9121-regulator.h
46204 @@ -29,6 +29,16 @@ enum da9121_variant {
46205         DA9121_TYPE_DA9217
46206  };
46208 +enum da9121_subvariant {
46209 +       DA9121_SUBTYPE_DA9121,
46210 +       DA9121_SUBTYPE_DA9130,
46211 +       DA9121_SUBTYPE_DA9220,
46212 +       DA9121_SUBTYPE_DA9132,
46213 +       DA9121_SUBTYPE_DA9122,
46214 +       DA9121_SUBTYPE_DA9131,
46215 +       DA9121_SUBTYPE_DA9217
46218  /* Minimum, maximum and default polling millisecond periods are provided
46219   * here as an example. It is expected that any final implementation will
46220   * include a modification of these settings to match the required
46221 @@ -279,6 +289,9 @@ enum da9121_variant {
46222  #define DA9220_VARIANT_VRC     0x0
46223  #define DA9122_VARIANT_VRC     0x2
46224  #define DA9217_VARIANT_VRC     0x7
46225 +#define DA9130_VARIANT_VRC     0x0
46226 +#define DA9131_VARIANT_VRC     0x1
46227 +#define DA9132_VARIANT_VRC     0x2
46229  /* DA9121_REG_OTP_CUSTOMER_ID */
46231 diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
46232 index dcb380e868df..549ed3fed625 100644
46233 --- a/drivers/remoteproc/pru_rproc.c
46234 +++ b/drivers/remoteproc/pru_rproc.c
46235 @@ -266,12 +266,17 @@ static void pru_rproc_create_debug_entries(struct rproc *rproc)
46237  static void pru_dispose_irq_mapping(struct pru_rproc *pru)
46239 -       while (pru->evt_count--) {
46240 +       if (!pru->mapped_irq)
46241 +               return;
46243 +       while (pru->evt_count) {
46244 +               pru->evt_count--;
46245                 if (pru->mapped_irq[pru->evt_count] > 0)
46246                         irq_dispose_mapping(pru->mapped_irq[pru->evt_count]);
46247         }
46249         kfree(pru->mapped_irq);
46250 +       pru->mapped_irq = NULL;
46253  /*
46254 @@ -284,7 +289,7 @@ static int pru_handle_intrmap(struct rproc *rproc)
46255         struct pru_rproc *pru = rproc->priv;
46256         struct pru_irq_rsc *rsc = pru->pru_interrupt_map;
46257         struct irq_fwspec fwspec;
46258 -       struct device_node *irq_parent;
46259 +       struct device_node *parent, *irq_parent;
46260         int i, ret = 0;
46262         /* not having pru_interrupt_map is not an error */
46263 @@ -307,16 +312,31 @@ static int pru_handle_intrmap(struct rproc *rproc)
46264         pru->evt_count = rsc->num_evts;
46265         pru->mapped_irq = kcalloc(pru->evt_count, sizeof(unsigned int),
46266                                   GFP_KERNEL);
46267 -       if (!pru->mapped_irq)
46268 +       if (!pru->mapped_irq) {
46269 +               pru->evt_count = 0;
46270                 return -ENOMEM;
46271 +       }
46273         /*
46274          * parse and fill in system event to interrupt channel and
46275 -        * channel-to-host mapping
46276 +        * channel-to-host mapping. The interrupt controller to be used
46277 +        * for these mappings for a given PRU remoteproc is always its
46278 +        * corresponding sibling PRUSS INTC node.
46279          */
46280 -       irq_parent = of_irq_find_parent(pru->dev->of_node);
46281 +       parent = of_get_parent(dev_of_node(pru->dev));
46282 +       if (!parent) {
46283 +               kfree(pru->mapped_irq);
46284 +               pru->mapped_irq = NULL;
46285 +               pru->evt_count = 0;
46286 +               return -ENODEV;
46287 +       }
46289 +       irq_parent = of_get_child_by_name(parent, "interrupt-controller");
46290 +       of_node_put(parent);
46291         if (!irq_parent) {
46292                 kfree(pru->mapped_irq);
46293 +               pru->mapped_irq = NULL;
46294 +               pru->evt_count = 0;
46295                 return -ENODEV;
46296         }
46298 @@ -332,16 +352,20 @@ static int pru_handle_intrmap(struct rproc *rproc)
46300                 pru->mapped_irq[i] = irq_create_fwspec_mapping(&fwspec);
46301                 if (!pru->mapped_irq[i]) {
46302 -                       dev_err(dev, "failed to get virq\n");
46303 -                       ret = pru->mapped_irq[i];
46304 +                       dev_err(dev, "failed to get virq for fw mapping %d: event %d chnl %d host %d\n",
46305 +                               i, fwspec.param[0], fwspec.param[1],
46306 +                               fwspec.param[2]);
46307 +                       ret = -EINVAL;
46308                         goto map_fail;
46309                 }
46310         }
46311 +       of_node_put(irq_parent);
46313         return ret;
46315  map_fail:
46316         pru_dispose_irq_mapping(pru);
46317 +       of_node_put(irq_parent);
46319         return ret;
46321 @@ -387,8 +411,7 @@ static int pru_rproc_stop(struct rproc *rproc)
46322         pru_control_write_reg(pru, PRU_CTRL_CTRL, val);
46324         /* dispose irq mapping - new firmware can provide new mapping */
46325 -       if (pru->mapped_irq)
46326 -               pru_dispose_irq_mapping(pru);
46327 +       pru_dispose_irq_mapping(pru);
46329         return 0;
46331 diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
46332 index 66106ba25ba3..14e0ce5f18f5 100644
46333 --- a/drivers/remoteproc/qcom_q6v5_mss.c
46334 +++ b/drivers/remoteproc/qcom_q6v5_mss.c
46335 @@ -1210,6 +1210,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
46336                         goto release_firmware;
46337                 }
46339 +               if (phdr->p_filesz > phdr->p_memsz) {
46340 +                       dev_err(qproc->dev,
46341 +                               "refusing to load segment %d with p_filesz > p_memsz\n",
46342 +                               i);
46343 +                       ret = -EINVAL;
46344 +                       goto release_firmware;
46345 +               }
46347                 ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
46348                 if (!ptr) {
46349                         dev_err(qproc->dev,
46350 @@ -1241,6 +1249,16 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
46351                                 goto release_firmware;
46352                         }
46354 +                       if (seg_fw->size != phdr->p_filesz) {
46355 +                               dev_err(qproc->dev,
46356 +                                       "failed to load segment %d from truncated file %s\n",
46357 +                                       i, fw_name);
46358 +                               ret = -EINVAL;
46359 +                               release_firmware(seg_fw);
46360 +                               memunmap(ptr);
46361 +                               goto release_firmware;
46362 +                       }
46364                         release_firmware(seg_fw);
46365                 }
46367 diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
46368 index 27a05167c18c..4840886532ff 100644
46369 --- a/drivers/rpmsg/qcom_glink_native.c
46370 +++ b/drivers/rpmsg/qcom_glink_native.c
46371 @@ -857,6 +857,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
46372                         dev_err(glink->dev,
46373                                 "no intent found for channel %s intent %d",
46374                                 channel->name, liid);
46375 +                       ret = -ENOENT;
46376                         goto advance_rx;
46377                 }
46378         }
46379 diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
46380 index cd8e438bc9c4..8752620d8e34 100644
46381 --- a/drivers/rtc/rtc-ds1307.c
46382 +++ b/drivers/rtc/rtc-ds1307.c
46383 @@ -296,7 +296,11 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
46384         t->tm_min = bcd2bin(regs[DS1307_REG_MIN] & 0x7f);
46385         tmp = regs[DS1307_REG_HOUR] & 0x3f;
46386         t->tm_hour = bcd2bin(tmp);
46387 -       t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
46388 +       /* rx8130 is bit position, not BCD */
46389 +       if (ds1307->type == rx_8130)
46390 +               t->tm_wday = fls(regs[DS1307_REG_WDAY] & 0x7f);
46391 +       else
46392 +               t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
46393         t->tm_mday = bcd2bin(regs[DS1307_REG_MDAY] & 0x3f);
46394         tmp = regs[DS1307_REG_MONTH] & 0x1f;
46395         t->tm_mon = bcd2bin(tmp) - 1;
46396 @@ -343,7 +347,11 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
46397         regs[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
46398         regs[DS1307_REG_MIN] = bin2bcd(t->tm_min);
46399         regs[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
46400 -       regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
46401 +       /* rx8130 is bit position, not BCD */
46402 +       if (ds1307->type == rx_8130)
46403 +               regs[DS1307_REG_WDAY] = 1 << t->tm_wday;
46404 +       else
46405 +               regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
46406         regs[DS1307_REG_MDAY] = bin2bcd(t->tm_mday);
46407         regs[DS1307_REG_MONTH] = bin2bcd(t->tm_mon + 1);
46409 diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
46410 index 57cc09d0a806..c0df49fb978c 100644
46411 --- a/drivers/rtc/rtc-fsl-ftm-alarm.c
46412 +++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
46413 @@ -310,6 +310,7 @@ static const struct of_device_id ftm_rtc_match[] = {
46414         { .compatible = "fsl,lx2160a-ftm-alarm", },
46415         { },
46416  };
46417 +MODULE_DEVICE_TABLE(of, ftm_rtc_match);
46419  static const struct acpi_device_id ftm_imx_acpi_ids[] = {
46420         {"NXP0014",},
46421 diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
46422 index 288abb1abdb8..bc89c62ccb9b 100644
46423 --- a/drivers/rtc/rtc-tps65910.c
46424 +++ b/drivers/rtc/rtc-tps65910.c
46425 @@ -18,6 +18,7 @@
46426  #include <linux/rtc.h>
46427  #include <linux/bcd.h>
46428  #include <linux/math64.h>
46429 +#include <linux/property.h>
46430  #include <linux/platform_device.h>
46431  #include <linux/interrupt.h>
46432  #include <linux/mfd/tps65910.h>
46433 diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
46434 index 2018614f258f..fc19b312c345 100644
46435 --- a/drivers/rtc/rtc-wm8350.c
46436 +++ b/drivers/rtc/rtc-wm8350.c
46437 @@ -114,7 +114,7 @@ static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm)
46438         /* Wait until confirmation of stopping */
46439         do {
46440                 rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
46441 -               schedule_timeout_uninterruptible(msecs_to_jiffies(1));
46442 +               schedule_msec_hrtimeout_uninterruptible((1));
46443         } while (--retries && !(rtc_ctrl & WM8350_RTC_STS));
46445         if (!retries) {
46446 @@ -197,7 +197,7 @@ static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350)
46447         /* Wait until confirmation of stopping */
46448         do {
46449                 rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
46450 -               schedule_timeout_uninterruptible(msecs_to_jiffies(1));
46451 +               schedule_msec_hrtimeout_uninterruptible((1));
46452         } while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS));
46454         if (!(rtc_ctrl & WM8350_RTC_ALMSTS))
46455 @@ -220,7 +220,7 @@ static int wm8350_rtc_start_alarm(struct wm8350 *wm8350)
46456         /* Wait until confirmation */
46457         do {
46458                 rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
46459 -               schedule_timeout_uninterruptible(msecs_to_jiffies(1));
46460 +               schedule_msec_hrtimeout_uninterruptible((1));
46461         } while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS);
46463         if (rtc_ctrl & WM8350_RTC_ALMSTS)
46464 diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
46465 index 3f026021e95e..84f659cafe76 100644
46466 --- a/drivers/s390/cio/device.c
46467 +++ b/drivers/s390/cio/device.c
46468 @@ -1532,8 +1532,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
46469         switch (action) {
46470         case IO_SCH_ORPH_UNREG:
46471         case IO_SCH_UNREG:
46472 -               if (!cdev)
46473 -                       css_sch_device_unregister(sch);
46474 +               css_sch_device_unregister(sch);
46475                 break;
46476         case IO_SCH_ORPH_ATTACH:
46477         case IO_SCH_UNREG_ATTACH:
46478 diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
46479 index 34bf2f197c71..0e0044d70844 100644
46480 --- a/drivers/s390/cio/qdio.h
46481 +++ b/drivers/s390/cio/qdio.h
46482 @@ -181,12 +181,6 @@ struct qdio_input_q {
46483  struct qdio_output_q {
46484         /* PCIs are enabled for the queue */
46485         int pci_out_enabled;
46486 -       /* cq: use asynchronous output buffers */
46487 -       int use_cq;
46488 -       /* cq: aobs used for particual SBAL */
46489 -       struct qaob **aobs;
46490 -       /* cq: sbal state related to asynchronous operation */
46491 -       struct qdio_outbuf_state *sbal_state;
46492         /* timer to check for more outbound work */
46493         struct timer_list timer;
46494         /* tasklet to check for completions */
46495 @@ -379,12 +373,8 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data);
46496  void qdio_shutdown_irq(struct qdio_irq *irq);
46497  void qdio_print_subchannel_info(struct qdio_irq *irq_ptr);
46498  void qdio_free_queues(struct qdio_irq *irq_ptr);
46499 -void qdio_free_async_data(struct qdio_irq *irq_ptr);
46500  int qdio_setup_init(void);
46501  void qdio_setup_exit(void);
46502 -int qdio_enable_async_operation(struct qdio_output_q *q);
46503 -void qdio_disable_async_operation(struct qdio_output_q *q);
46504 -struct qaob *qdio_allocate_aob(void);
46506  int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
46507                         unsigned char *state);
46508 diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
46509 index 03a011619908..307ce7ff5ca4 100644
46510 --- a/drivers/s390/cio/qdio_main.c
46511 +++ b/drivers/s390/cio/qdio_main.c
46512 @@ -517,24 +517,6 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
46513         return 1;
46516 -static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
46517 -                                       int bufnr)
46519 -       unsigned long phys_aob = 0;
46521 -       if (!q->aobs[bufnr]) {
46522 -               struct qaob *aob = qdio_allocate_aob();
46523 -               q->aobs[bufnr] = aob;
46524 -       }
46525 -       if (q->aobs[bufnr]) {
46526 -               q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
46527 -               phys_aob = virt_to_phys(q->aobs[bufnr]);
46528 -               WARN_ON_ONCE(phys_aob & 0xFF);
46529 -       }
46531 -       return phys_aob;
46534  static inline int qdio_tasklet_schedule(struct qdio_q *q)
46536         if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
46537 @@ -548,7 +530,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
46538                                         unsigned int *error)
46540         unsigned char state = 0;
46541 -       unsigned int i;
46542         int count;
46544         q->timestamp = get_tod_clock_fast();
46545 @@ -570,10 +551,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
46547         switch (state) {
46548         case SLSB_P_OUTPUT_PENDING:
46549 -               /* detach the utilized QAOBs: */
46550 -               for (i = 0; i < count; i++)
46551 -                       q->u.out.aobs[QDIO_BUFNR(start + i)] = NULL;
46553                 *error = QDIO_ERROR_SLSB_PENDING;
46554                 fallthrough;
46555         case SLSB_P_OUTPUT_EMPTY:
46556 @@ -999,7 +976,6 @@ int qdio_free(struct ccw_device *cdev)
46557         cdev->private->qdio_data = NULL;
46558         mutex_unlock(&irq_ptr->setup_mutex);
46560 -       qdio_free_async_data(irq_ptr);
46561         qdio_free_queues(irq_ptr);
46562         free_page((unsigned long) irq_ptr->qdr);
46563         free_page(irq_ptr->chsc_page);
46564 @@ -1075,28 +1051,6 @@ int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
46566  EXPORT_SYMBOL_GPL(qdio_allocate);
46568 -static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
46570 -       struct qdio_q *q = irq_ptr->input_qs[0];
46571 -       int i, use_cq = 0;
46573 -       if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
46574 -               use_cq = 1;
46576 -       for_each_output_queue(irq_ptr, q, i) {
46577 -               if (use_cq) {
46578 -                       if (multicast_outbound(q))
46579 -                               continue;
46580 -                       if (qdio_enable_async_operation(&q->u.out) < 0) {
46581 -                               use_cq = 0;
46582 -                               continue;
46583 -                       }
46584 -               } else
46585 -                       qdio_disable_async_operation(&q->u.out);
46586 -       }
46587 -       DBF_EVENT("use_cq:%d", use_cq);
46590  static void qdio_trace_init_data(struct qdio_irq *irq,
46591                                  struct qdio_initialize *data)
46593 @@ -1191,8 +1145,6 @@ int qdio_establish(struct ccw_device *cdev,
46595         qdio_setup_ssqd_info(irq_ptr);
46597 -       qdio_detect_hsicq(irq_ptr);
46599         /* qebsm is now setup if available, initialize buffer states */
46600         qdio_init_buf_states(irq_ptr);
46602 @@ -1297,9 +1249,11 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
46603   * @callflags: flags
46604   * @bufnr: first buffer to process
46605   * @count: how many buffers are filled
46606 + * @aob: asynchronous operation block
46607   */
46608  static int handle_outbound(struct qdio_q *q, unsigned int callflags,
46609 -                          unsigned int bufnr, unsigned int count)
46610 +                          unsigned int bufnr, unsigned int count,
46611 +                          struct qaob *aob)
46613         const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
46614         unsigned char state = 0;
46615 @@ -1320,11 +1274,9 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
46616                 q->u.out.pci_out_enabled = 0;
46618         if (queue_type(q) == QDIO_IQDIO_QFMT) {
46619 -               unsigned long phys_aob = 0;
46621 -               if (q->u.out.use_cq && count == 1)
46622 -                       phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
46623 +               unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
46625 +               WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
46626                 rc = qdio_kick_outbound_q(q, count, phys_aob);
46627         } else if (need_siga_sync(q)) {
46628                 rc = qdio_siga_sync_q(q);
46629 @@ -1359,9 +1311,10 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
46630   * @q_nr: queue number
46631   * @bufnr: buffer number
46632   * @count: how many buffers to process
46633 + * @aob: asynchronous operation block (outbound only)
46634   */
46635  int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
46636 -           int q_nr, unsigned int bufnr, unsigned int count)
46637 +           int q_nr, unsigned int bufnr, unsigned int count, struct qaob *aob)
46639         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
46641 @@ -1383,7 +1336,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
46642                                       callflags, bufnr, count);
46643         else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
46644                 return handle_outbound(irq_ptr->output_qs[q_nr],
46645 -                                      callflags, bufnr, count);
46646 +                                      callflags, bufnr, count, aob);
46647         return -EINVAL;
46649  EXPORT_SYMBOL_GPL(do_QDIO);
46650 diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
46651 index c8b9620bc688..da67e4979402 100644
46652 --- a/drivers/s390/cio/qdio_setup.c
46653 +++ b/drivers/s390/cio/qdio_setup.c
46654 @@ -30,6 +30,7 @@ struct qaob *qdio_allocate_aob(void)
46656         return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
46658 +EXPORT_SYMBOL_GPL(qdio_allocate_aob);
46660  void qdio_release_aob(struct qaob *aob)
46662 @@ -247,8 +248,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
46663                          struct qdio_initialize *qdio_init)
46665         struct qdio_q *q;
46666 -       struct qdio_outbuf_state *output_sbal_state_array =
46667 -                                 qdio_init->output_sbal_state_array;
46668         int i;
46670         for_each_input_queue(irq_ptr, q, i) {
46671 @@ -265,9 +264,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
46672                 DBF_EVENT("outq:%1d", i);
46673                 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
46675 -               q->u.out.sbal_state = output_sbal_state_array;
46676 -               output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
46678                 q->is_input_q = 0;
46679                 setup_storage_lists(q, irq_ptr,
46680                                     qdio_init->output_sbal_addr_array[i], i);
46681 @@ -372,30 +368,6 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
46682         DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
46685 -void qdio_free_async_data(struct qdio_irq *irq_ptr)
46687 -       struct qdio_q *q;
46688 -       int i;
46690 -       for (i = 0; i < irq_ptr->max_output_qs; i++) {
46691 -               q = irq_ptr->output_qs[i];
46692 -               if (q->u.out.use_cq) {
46693 -                       unsigned int n;
46695 -                       for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; n++) {
46696 -                               struct qaob *aob = q->u.out.aobs[n];
46698 -                               if (aob) {
46699 -                                       qdio_release_aob(aob);
46700 -                                       q->u.out.aobs[n] = NULL;
46701 -                               }
46702 -                       }
46704 -                       qdio_disable_async_operation(&q->u.out);
46705 -               }
46706 -       }
46709  static void qdio_fill_qdr_desc(struct qdesfmt0 *desc, struct qdio_q *queue)
46711         desc->sliba = virt_to_phys(queue->slib);
46712 @@ -545,25 +517,6 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
46713         printk(KERN_INFO "%s", s);
46716 -int qdio_enable_async_operation(struct qdio_output_q *outq)
46718 -       outq->aobs = kcalloc(QDIO_MAX_BUFFERS_PER_Q, sizeof(struct qaob *),
46719 -                            GFP_KERNEL);
46720 -       if (!outq->aobs) {
46721 -               outq->use_cq = 0;
46722 -               return -ENOMEM;
46723 -       }
46724 -       outq->use_cq = 1;
46725 -       return 0;
46728 -void qdio_disable_async_operation(struct qdio_output_q *q)
46730 -       kfree(q->aobs);
46731 -       q->aobs = NULL;
46732 -       q->use_cq = 0;
46735  int __init qdio_setup_init(void)
46737         int rc;
46738 diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
46739 index 1ffdd411201c..6946a7e26eff 100644
46740 --- a/drivers/s390/crypto/vfio_ap_ops.c
46741 +++ b/drivers/s390/crypto/vfio_ap_ops.c
46742 @@ -294,6 +294,19 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
46743         matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
46744                                    struct ap_matrix_mdev, pqap_hook);
46746 +       /*
46747 +        * If the KVM pointer is in the process of being set, wait until the
46748 +        * process has completed.
46749 +        */
46750 +       wait_event_cmd(matrix_mdev->wait_for_kvm,
46751 +                      !matrix_mdev->kvm_busy,
46752 +                      mutex_unlock(&matrix_dev->lock),
46753 +                      mutex_lock(&matrix_dev->lock));
46755 +       /* If the there is no guest using the mdev, there is nothing to do */
46756 +       if (!matrix_mdev->kvm)
46757 +               goto out_unlock;
46759         q = vfio_ap_get_queue(matrix_mdev, apqn);
46760         if (!q)
46761                 goto out_unlock;
46762 @@ -337,6 +350,7 @@ static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
46764         matrix_mdev->mdev = mdev;
46765         vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
46766 +       init_waitqueue_head(&matrix_mdev->wait_for_kvm);
46767         mdev_set_drvdata(mdev, matrix_mdev);
46768         matrix_mdev->pqap_hook.hook = handle_pqap;
46769         matrix_mdev->pqap_hook.owner = THIS_MODULE;
46770 @@ -351,17 +365,23 @@ static int vfio_ap_mdev_remove(struct mdev_device *mdev)
46772         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
46774 -       if (matrix_mdev->kvm)
46775 +       mutex_lock(&matrix_dev->lock);
46777 +       /*
46778 +        * If the KVM pointer is in flux or the guest is running, disallow
46779 +        * un-assignment of control domain.
46780 +        */
46781 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
46782 +               mutex_unlock(&matrix_dev->lock);
46783                 return -EBUSY;
46784 +       }
46786 -       mutex_lock(&matrix_dev->lock);
46787         vfio_ap_mdev_reset_queues(mdev);
46788         list_del(&matrix_mdev->node);
46789 -       mutex_unlock(&matrix_dev->lock);
46791         kfree(matrix_mdev);
46792         mdev_set_drvdata(mdev, NULL);
46793         atomic_inc(&matrix_dev->available_instances);
46794 +       mutex_unlock(&matrix_dev->lock);
46796         return 0;
46798 @@ -606,24 +626,31 @@ static ssize_t assign_adapter_store(struct device *dev,
46799         struct mdev_device *mdev = mdev_from_dev(dev);
46800         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
46802 -       /* If the guest is running, disallow assignment of adapter */
46803 -       if (matrix_mdev->kvm)
46804 -               return -EBUSY;
46805 +       mutex_lock(&matrix_dev->lock);
46807 +       /*
46808 +        * If the KVM pointer is in flux or the guest is running, disallow
46809 +        * un-assignment of adapter
46810 +        */
46811 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
46812 +               ret = -EBUSY;
46813 +               goto done;
46814 +       }
46816         ret = kstrtoul(buf, 0, &apid);
46817         if (ret)
46818 -               return ret;
46819 +               goto done;
46821 -       if (apid > matrix_mdev->matrix.apm_max)
46822 -               return -ENODEV;
46823 +       if (apid > matrix_mdev->matrix.apm_max) {
46824 +               ret = -ENODEV;
46825 +               goto done;
46826 +       }
46828         /*
46829          * Set the bit in the AP mask (APM) corresponding to the AP adapter
46830          * number (APID). The bits in the mask, from most significant to least
46831          * significant bit, correspond to APIDs 0-255.
46832          */
46833 -       mutex_lock(&matrix_dev->lock);
46835         ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
46836         if (ret)
46837                 goto done;
46838 @@ -672,22 +699,31 @@ static ssize_t unassign_adapter_store(struct device *dev,
46839         struct mdev_device *mdev = mdev_from_dev(dev);
46840         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
46842 -       /* If the guest is running, disallow un-assignment of adapter */
46843 -       if (matrix_mdev->kvm)
46844 -               return -EBUSY;
46845 +       mutex_lock(&matrix_dev->lock);
46847 +       /*
46848 +        * If the KVM pointer is in flux or the guest is running, disallow
46849 +        * un-assignment of adapter
46850 +        */
46851 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
46852 +               ret = -EBUSY;
46853 +               goto done;
46854 +       }
46856         ret = kstrtoul(buf, 0, &apid);
46857         if (ret)
46858 -               return ret;
46859 +               goto done;
46861 -       if (apid > matrix_mdev->matrix.apm_max)
46862 -               return -ENODEV;
46863 +       if (apid > matrix_mdev->matrix.apm_max) {
46864 +               ret = -ENODEV;
46865 +               goto done;
46866 +       }
46868 -       mutex_lock(&matrix_dev->lock);
46869         clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
46870 +       ret = count;
46871 +done:
46872         mutex_unlock(&matrix_dev->lock);
46874 -       return count;
46875 +       return ret;
46877  static DEVICE_ATTR_WO(unassign_adapter);
46879 @@ -753,17 +789,24 @@ static ssize_t assign_domain_store(struct device *dev,
46880         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
46881         unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
46883 -       /* If the guest is running, disallow assignment of domain */
46884 -       if (matrix_mdev->kvm)
46885 -               return -EBUSY;
46886 +       mutex_lock(&matrix_dev->lock);
46888 +       /*
46889 +        * If the KVM pointer is in flux or the guest is running, disallow
46890 +        * assignment of domain
46891 +        */
46892 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
46893 +               ret = -EBUSY;
46894 +               goto done;
46895 +       }
46897         ret = kstrtoul(buf, 0, &apqi);
46898         if (ret)
46899 -               return ret;
46900 -       if (apqi > max_apqi)
46901 -               return -ENODEV;
46903 -       mutex_lock(&matrix_dev->lock);
46904 +               goto done;
46905 +       if (apqi > max_apqi) {
46906 +               ret = -ENODEV;
46907 +               goto done;
46908 +       }
46910         ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
46911         if (ret)
46912 @@ -814,22 +857,32 @@ static ssize_t unassign_domain_store(struct device *dev,
46913         struct mdev_device *mdev = mdev_from_dev(dev);
46914         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
46916 -       /* If the guest is running, disallow un-assignment of domain */
46917 -       if (matrix_mdev->kvm)
46918 -               return -EBUSY;
46919 +       mutex_lock(&matrix_dev->lock);
46921 +       /*
46922 +        * If the KVM pointer is in flux or the guest is running, disallow
46923 +        * un-assignment of domain
46924 +        */
46925 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
46926 +               ret = -EBUSY;
46927 +               goto done;
46928 +       }
46930         ret = kstrtoul(buf, 0, &apqi);
46931         if (ret)
46932 -               return ret;
46933 +               goto done;
46935 -       if (apqi > matrix_mdev->matrix.aqm_max)
46936 -               return -ENODEV;
46937 +       if (apqi > matrix_mdev->matrix.aqm_max) {
46938 +               ret = -ENODEV;
46939 +               goto done;
46940 +       }
46942 -       mutex_lock(&matrix_dev->lock);
46943         clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
46944 -       mutex_unlock(&matrix_dev->lock);
46945 +       ret = count;
46947 -       return count;
46948 +done:
46949 +       mutex_unlock(&matrix_dev->lock);
46950 +       return ret;
46952  static DEVICE_ATTR_WO(unassign_domain);
46954 @@ -858,27 +911,36 @@ static ssize_t assign_control_domain_store(struct device *dev,
46955         struct mdev_device *mdev = mdev_from_dev(dev);
46956         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
46958 -       /* If the guest is running, disallow assignment of control domain */
46959 -       if (matrix_mdev->kvm)
46960 -               return -EBUSY;
46961 +       mutex_lock(&matrix_dev->lock);
46963 +       /*
46964 +        * If the KVM pointer is in flux or the guest is running, disallow
46965 +        * assignment of control domain.
46966 +        */
46967 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
46968 +               ret = -EBUSY;
46969 +               goto done;
46970 +       }
46972         ret = kstrtoul(buf, 0, &id);
46973         if (ret)
46974 -               return ret;
46975 +               goto done;
46977 -       if (id > matrix_mdev->matrix.adm_max)
46978 -               return -ENODEV;
46979 +       if (id > matrix_mdev->matrix.adm_max) {
46980 +               ret = -ENODEV;
46981 +               goto done;
46982 +       }
46984         /* Set the bit in the ADM (bitmask) corresponding to the AP control
46985          * domain number (id). The bits in the mask, from most significant to
46986          * least significant, correspond to IDs 0 up to the one less than the
46987          * number of control domains that can be assigned.
46988          */
46989 -       mutex_lock(&matrix_dev->lock);
46990         set_bit_inv(id, matrix_mdev->matrix.adm);
46991 +       ret = count;
46992 +done:
46993         mutex_unlock(&matrix_dev->lock);
46995 -       return count;
46996 +       return ret;
46998  static DEVICE_ATTR_WO(assign_control_domain);
47000 @@ -908,21 +970,30 @@ static ssize_t unassign_control_domain_store(struct device *dev,
47001         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
47002         unsigned long max_domid =  matrix_mdev->matrix.adm_max;
47004 -       /* If the guest is running, disallow un-assignment of control domain */
47005 -       if (matrix_mdev->kvm)
47006 -               return -EBUSY;
47007 +       mutex_lock(&matrix_dev->lock);
47009 +       /*
47010 +        * If the KVM pointer is in flux or the guest is running, disallow
47011 +        * un-assignment of control domain.
47012 +        */
47013 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
47014 +               ret = -EBUSY;
47015 +               goto done;
47016 +       }
47018         ret = kstrtoul(buf, 0, &domid);
47019         if (ret)
47020 -               return ret;
47021 -       if (domid > max_domid)
47022 -               return -ENODEV;
47023 +               goto done;
47024 +       if (domid > max_domid) {
47025 +               ret = -ENODEV;
47026 +               goto done;
47027 +       }
47029 -       mutex_lock(&matrix_dev->lock);
47030         clear_bit_inv(domid, matrix_mdev->matrix.adm);
47031 +       ret = count;
47032 +done:
47033         mutex_unlock(&matrix_dev->lock);
47035 -       return count;
47036 +       return ret;
47038  static DEVICE_ATTR_WO(unassign_control_domain);
47040 @@ -1027,8 +1098,15 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
47041   * @matrix_mdev: a mediated matrix device
47042   * @kvm: reference to KVM instance
47043   *
47044 - * Verifies no other mediated matrix device has @kvm and sets a reference to
47045 - * it in @matrix_mdev->kvm.
47046 + * Sets all data for @matrix_mdev that are needed to manage AP resources
47047 + * for the guest whose state is represented by @kvm.
47048 + *
47049 + * Note: The matrix_dev->lock must be taken prior to calling
47050 + * this function; however, the lock will be temporarily released while the
47051 + * guest's AP configuration is set to avoid a potential lockdep splat.
47052 + * The kvm->lock is taken to set the guest's AP configuration which, under
47053 + * certain circumstances, will result in a circular lock dependency if this is
47054 + * done under the @matrix_mdev->lock.
47055   *
47056   * Return 0 if no other mediated matrix device has a reference to @kvm;
47057   * otherwise, returns an -EPERM.
47058 @@ -1038,14 +1116,25 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
47060         struct ap_matrix_mdev *m;
47062 -       list_for_each_entry(m, &matrix_dev->mdev_list, node) {
47063 -               if ((m != matrix_mdev) && (m->kvm == kvm))
47064 -                       return -EPERM;
47065 -       }
47066 +       if (kvm->arch.crypto.crycbd) {
47067 +               list_for_each_entry(m, &matrix_dev->mdev_list, node) {
47068 +                       if (m != matrix_mdev && m->kvm == kvm)
47069 +                               return -EPERM;
47070 +               }
47072 -       matrix_mdev->kvm = kvm;
47073 -       kvm_get_kvm(kvm);
47074 -       kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
47075 +               kvm_get_kvm(kvm);
47076 +               matrix_mdev->kvm_busy = true;
47077 +               mutex_unlock(&matrix_dev->lock);
47078 +               kvm_arch_crypto_set_masks(kvm,
47079 +                                         matrix_mdev->matrix.apm,
47080 +                                         matrix_mdev->matrix.aqm,
47081 +                                         matrix_mdev->matrix.adm);
47082 +               mutex_lock(&matrix_dev->lock);
47083 +               kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
47084 +               matrix_mdev->kvm = kvm;
47085 +               matrix_mdev->kvm_busy = false;
47086 +               wake_up_all(&matrix_mdev->wait_for_kvm);
47087 +       }
47089         return 0;
47091 @@ -1079,51 +1168,65 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
47092         return NOTIFY_DONE;
47095 +/**
47096 + * vfio_ap_mdev_unset_kvm
47097 + *
47098 + * @matrix_mdev: a matrix mediated device
47099 + *
47100 + * Performs clean-up of resources no longer needed by @matrix_mdev.
47101 + *
47102 + * Note: The matrix_dev->lock must be taken prior to calling
47103 + * this function; however, the lock will be temporarily released while the
47104 + * guest's AP configuration is cleared to avoid a potential lockdep splat.
47105 + * The kvm->lock is taken to clear the guest's AP configuration which, under
47106 + * certain circumstances, will result in a circular lock dependency if this is
47107 + * done under the @matrix_mdev->lock.
47108 + *
47109 + */
47110  static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
47112 -       kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
47113 -       matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
47114 -       vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
47115 -       kvm_put_kvm(matrix_mdev->kvm);
47116 -       matrix_mdev->kvm = NULL;
47117 +       /*
47118 +        * If the KVM pointer is in the process of being set, wait until the
47119 +        * process has completed.
47120 +        */
47121 +       wait_event_cmd(matrix_mdev->wait_for_kvm,
47122 +                      !matrix_mdev->kvm_busy,
47123 +                      mutex_unlock(&matrix_dev->lock),
47124 +                      mutex_lock(&matrix_dev->lock));
47126 +       if (matrix_mdev->kvm) {
47127 +               matrix_mdev->kvm_busy = true;
47128 +               mutex_unlock(&matrix_dev->lock);
47129 +               kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
47130 +               mutex_lock(&matrix_dev->lock);
47131 +               vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
47132 +               matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
47133 +               kvm_put_kvm(matrix_mdev->kvm);
47134 +               matrix_mdev->kvm = NULL;
47135 +               matrix_mdev->kvm_busy = false;
47136 +               wake_up_all(&matrix_mdev->wait_for_kvm);
47137 +       }
47140  static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
47141                                        unsigned long action, void *data)
47143 -       int ret, notify_rc = NOTIFY_OK;
47144 +       int notify_rc = NOTIFY_OK;
47145         struct ap_matrix_mdev *matrix_mdev;
47147         if (action != VFIO_GROUP_NOTIFY_SET_KVM)
47148                 return NOTIFY_OK;
47150 -       matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
47151         mutex_lock(&matrix_dev->lock);
47152 +       matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
47154 -       if (!data) {
47155 -               if (matrix_mdev->kvm)
47156 -                       vfio_ap_mdev_unset_kvm(matrix_mdev);
47157 -               goto notify_done;
47158 -       }
47160 -       ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
47161 -       if (ret) {
47162 -               notify_rc = NOTIFY_DONE;
47163 -               goto notify_done;
47164 -       }
47166 -       /* If there is no CRYCB pointer, then we can't copy the masks */
47167 -       if (!matrix_mdev->kvm->arch.crypto.crycbd) {
47168 +       if (!data)
47169 +               vfio_ap_mdev_unset_kvm(matrix_mdev);
47170 +       else if (vfio_ap_mdev_set_kvm(matrix_mdev, data))
47171                 notify_rc = NOTIFY_DONE;
47172 -               goto notify_done;
47173 -       }
47175 -       kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
47176 -                                 matrix_mdev->matrix.aqm,
47177 -                                 matrix_mdev->matrix.adm);
47179 -notify_done:
47180         mutex_unlock(&matrix_dev->lock);
47182         return notify_rc;
47185 @@ -1258,8 +1361,7 @@ static void vfio_ap_mdev_release(struct mdev_device *mdev)
47186         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
47188         mutex_lock(&matrix_dev->lock);
47189 -       if (matrix_mdev->kvm)
47190 -               vfio_ap_mdev_unset_kvm(matrix_mdev);
47191 +       vfio_ap_mdev_unset_kvm(matrix_mdev);
47192         mutex_unlock(&matrix_dev->lock);
47194         vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
47195 @@ -1293,6 +1395,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
47196                                     unsigned int cmd, unsigned long arg)
47198         int ret;
47199 +       struct ap_matrix_mdev *matrix_mdev;
47201         mutex_lock(&matrix_dev->lock);
47202         switch (cmd) {
47203 @@ -1300,6 +1403,21 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
47204                 ret = vfio_ap_mdev_get_device_info(arg);
47205                 break;
47206         case VFIO_DEVICE_RESET:
47207 +               matrix_mdev = mdev_get_drvdata(mdev);
47208 +               if (WARN(!matrix_mdev, "Driver data missing from mdev!!")) {
47209 +                       ret = -EINVAL;
47210 +                       break;
47211 +               }
47213 +               /*
47214 +                * If the KVM pointer is in the process of being set, wait until
47215 +                * the process has completed.
47216 +                */
47217 +               wait_event_cmd(matrix_mdev->wait_for_kvm,
47218 +                              !matrix_mdev->kvm_busy,
47219 +                              mutex_unlock(&matrix_dev->lock),
47220 +                              mutex_lock(&matrix_dev->lock));
47222                 ret = vfio_ap_mdev_reset_queues(mdev);
47223                 break;
47224         default:
47225 diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
47226 index 28e9d9989768..f82a6396acae 100644
47227 --- a/drivers/s390/crypto/vfio_ap_private.h
47228 +++ b/drivers/s390/crypto/vfio_ap_private.h
47229 @@ -83,6 +83,8 @@ struct ap_matrix_mdev {
47230         struct ap_matrix matrix;
47231         struct notifier_block group_notifier;
47232         struct notifier_block iommu_notifier;
47233 +       bool kvm_busy;
47234 +       wait_queue_head_t wait_for_kvm;
47235         struct kvm *kvm;
47236         struct kvm_s390_module_hook pqap_hook;
47237         struct mdev_device *mdev;
47238 diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
47239 index 33b23884b133..09fe6bb8880b 100644
47240 --- a/drivers/s390/crypto/zcrypt_card.c
47241 +++ b/drivers/s390/crypto/zcrypt_card.c
47242 @@ -192,5 +192,6 @@ void zcrypt_card_unregister(struct zcrypt_card *zc)
47243         spin_unlock(&zcrypt_list_lock);
47244         sysfs_remove_group(&zc->card->ap_dev.device.kobj,
47245                            &zcrypt_card_attr_group);
47246 +       zcrypt_card_put(zc);
47248  EXPORT_SYMBOL(zcrypt_card_unregister);
47249 diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
47250 index 5062eae73d4a..c3ffbd26b73f 100644
47251 --- a/drivers/s390/crypto/zcrypt_queue.c
47252 +++ b/drivers/s390/crypto/zcrypt_queue.c
47253 @@ -223,5 +223,6 @@ void zcrypt_queue_unregister(struct zcrypt_queue *zq)
47254         sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
47255                            &zcrypt_queue_attr_group);
47256         zcrypt_card_put(zc);
47257 +       zcrypt_queue_put(zq);
47259  EXPORT_SYMBOL(zcrypt_queue_unregister);
47260 diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
47261 index 91acff493612..fd9b869d278e 100644
47262 --- a/drivers/s390/net/qeth_core.h
47263 +++ b/drivers/s390/net/qeth_core.h
47264 @@ -437,6 +437,7 @@ struct qeth_qdio_out_buffer {
47266         struct qeth_qdio_out_q *q;
47267         struct list_head list_entry;
47268 +       struct qaob *aob;
47269  };
47271  struct qeth_card;
47272 @@ -499,7 +500,6 @@ struct qeth_out_q_stats {
47273  struct qeth_qdio_out_q {
47274         struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
47275         struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
47276 -       struct qdio_outbuf_state *bufstates; /* convenience pointer */
47277         struct list_head pending_bufs;
47278         struct qeth_out_q_stats stats;
47279         spinlock_t lock;
47280 @@ -563,7 +563,6 @@ struct qeth_qdio_info {
47281         /* output */
47282         unsigned int no_out_queues;
47283         struct qeth_qdio_out_q *out_qs[QETH_MAX_OUT_QUEUES];
47284 -       struct qdio_outbuf_state *out_bufstates;
47286         /* priority queueing */
47287         int do_prio_queueing;
47288 diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
47289 index a814698387bc..175b82b98f36 100644
47290 --- a/drivers/s390/net/qeth_core_main.c
47291 +++ b/drivers/s390/net/qeth_core_main.c
47292 @@ -369,8 +369,7 @@ static int qeth_cq_init(struct qeth_card *card)
47293                                    QDIO_MAX_BUFFERS_PER_Q);
47294                 card->qdio.c_q->next_buf_to_init = 127;
47295                 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
47296 -                            card->qdio.no_in_queues - 1, 0,
47297 -                            127);
47298 +                            card->qdio.no_in_queues - 1, 0, 127, NULL);
47299                 if (rc) {
47300                         QETH_CARD_TEXT_(card, 2, "1err%d", rc);
47301                         goto out;
47302 @@ -383,48 +382,22 @@ static int qeth_cq_init(struct qeth_card *card)
47304  static int qeth_alloc_cq(struct qeth_card *card)
47306 -       int rc;
47308         if (card->options.cq == QETH_CQ_ENABLED) {
47309 -               int i;
47310 -               struct qdio_outbuf_state *outbuf_states;
47312                 QETH_CARD_TEXT(card, 2, "cqon");
47313                 card->qdio.c_q = qeth_alloc_qdio_queue();
47314                 if (!card->qdio.c_q) {
47315 -                       rc = -1;
47316 -                       goto kmsg_out;
47317 +                       dev_err(&card->gdev->dev, "Failed to create completion queue\n");
47318 +                       return -ENOMEM;
47319                 }
47321                 card->qdio.no_in_queues = 2;
47322 -               card->qdio.out_bufstates =
47323 -                       kcalloc(card->qdio.no_out_queues *
47324 -                                       QDIO_MAX_BUFFERS_PER_Q,
47325 -                               sizeof(struct qdio_outbuf_state),
47326 -                               GFP_KERNEL);
47327 -               outbuf_states = card->qdio.out_bufstates;
47328 -               if (outbuf_states == NULL) {
47329 -                       rc = -1;
47330 -                       goto free_cq_out;
47331 -               }
47332 -               for (i = 0; i < card->qdio.no_out_queues; ++i) {
47333 -                       card->qdio.out_qs[i]->bufstates = outbuf_states;
47334 -                       outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
47335 -               }
47336         } else {
47337                 QETH_CARD_TEXT(card, 2, "nocq");
47338                 card->qdio.c_q = NULL;
47339                 card->qdio.no_in_queues = 1;
47340         }
47341         QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
47342 -       rc = 0;
47343 -out:
47344 -       return rc;
47345 -free_cq_out:
47346 -       qeth_free_qdio_queue(card->qdio.c_q);
47347 -       card->qdio.c_q = NULL;
47348 -kmsg_out:
47349 -       dev_err(&card->gdev->dev, "Failed to create completion queue\n");
47350 -       goto out;
47351 +       return 0;
47354  static void qeth_free_cq(struct qeth_card *card)
47355 @@ -434,8 +407,6 @@ static void qeth_free_cq(struct qeth_card *card)
47356                 qeth_free_qdio_queue(card->qdio.c_q);
47357                 card->qdio.c_q = NULL;
47358         }
47359 -       kfree(card->qdio.out_bufstates);
47360 -       card->qdio.out_bufstates = NULL;
47363  static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
47364 @@ -487,12 +458,12 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
47365         switch (atomic_xchg(&buffer->state, new_state)) {
47366         case QETH_QDIO_BUF_PRIMED:
47367                 /* Faster than TX completion code, let it handle the async
47368 -                * completion for us.
47369 +                * completion for us. It will also recycle the QAOB.
47370                  */
47371                 break;
47372         case QETH_QDIO_BUF_PENDING:
47373                 /* TX completion code is active and will handle the async
47374 -                * completion for us.
47375 +                * completion for us. It will also recycle the QAOB.
47376                  */
47377                 break;
47378         case QETH_QDIO_BUF_NEED_QAOB:
47379 @@ -501,7 +472,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
47380                 qeth_notify_skbs(buffer->q, buffer, notification);
47382                 /* Free dangling allocations. The attached skbs are handled by
47383 -                * qeth_tx_complete_pending_bufs().
47384 +                * qeth_tx_complete_pending_bufs(), and so is the QAOB.
47385                  */
47386                 for (i = 0;
47387                      i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
47388 @@ -520,8 +491,6 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
47389         default:
47390                 WARN_ON_ONCE(1);
47391         }
47393 -       qdio_release_aob(aob);
47396  static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
47397 @@ -1451,6 +1420,13 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
47398         atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
47401 +static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
47403 +       if (buf->aob)
47404 +               qdio_release_aob(buf->aob);
47405 +       kmem_cache_free(qeth_qdio_outbuf_cache, buf);
47408  static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
47409                                           struct qeth_qdio_out_q *queue,
47410                                           bool drain)
47411 @@ -1468,7 +1444,7 @@ static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
47412                         qeth_tx_complete_buf(buf, drain, 0);
47414                         list_del(&buf->list_entry);
47415 -                       kmem_cache_free(qeth_qdio_outbuf_cache, buf);
47416 +                       qeth_free_out_buf(buf);
47417                 }
47418         }
47420 @@ -1485,7 +1461,7 @@ static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
47422                 qeth_clear_output_buffer(q, q->bufs[j], true, 0);
47423                 if (free) {
47424 -                       kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
47425 +                       qeth_free_out_buf(q->bufs[j]);
47426                         q->bufs[j] = NULL;
47427                 }
47428         }
47429 @@ -2637,7 +2613,7 @@ static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
47431  err_out_bufs:
47432         while (i > 0)
47433 -               kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[--i]);
47434 +               qeth_free_out_buf(q->bufs[--i]);
47435         qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
47436  err_qdio_bufs:
47437         kfree(q);
47438 @@ -3024,7 +3000,8 @@ static int qeth_init_qdio_queues(struct qeth_card *card)
47439         }
47441         card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
47442 -       rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs);
47443 +       rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs,
47444 +                    NULL);
47445         if (rc) {
47446                 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
47447                 return rc;
47448 @@ -3516,7 +3493,7 @@ static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
47449                 }
47451                 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
47452 -                            queue->next_buf_to_init, count);
47453 +                            queue->next_buf_to_init, count, NULL);
47454                 if (rc) {
47455                         QETH_CARD_TEXT(card, 2, "qinberr");
47456                 }
47457 @@ -3625,6 +3602,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
47458         struct qeth_qdio_out_buffer *buf = queue->bufs[index];
47459         unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
47460         struct qeth_card *card = queue->card;
47461 +       struct qaob *aob = NULL;
47462         int rc;
47463         int i;
47465 @@ -3637,16 +3615,24 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
47466                                 SBAL_EFLAGS_LAST_ENTRY;
47467                 queue->coalesced_frames += buf->frames;
47469 -               if (queue->bufstates)
47470 -                       queue->bufstates[bidx].user = buf;
47472                 if (IS_IQD(card)) {
47473                         skb_queue_walk(&buf->skb_list, skb)
47474                                 skb_tx_timestamp(skb);
47475                 }
47476         }
47478 -       if (!IS_IQD(card)) {
47479 +       if (IS_IQD(card)) {
47480 +               if (card->options.cq == QETH_CQ_ENABLED &&
47481 +                   !qeth_iqd_is_mcast_queue(card, queue) &&
47482 +                   count == 1) {
47483 +                       if (!buf->aob)
47484 +                               buf->aob = qdio_allocate_aob();
47485 +                       if (buf->aob) {
47486 +                               aob = buf->aob;
47487 +                               aob->user1 = (u64) buf;
47488 +                       }
47489 +               }
47490 +       } else {
47491                 if (!queue->do_pack) {
47492                         if ((atomic_read(&queue->used_buffers) >=
47493                                 (QETH_HIGH_WATERMARK_PACK -
47494 @@ -3677,8 +3663,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
47495         }
47497         QETH_TXQ_STAT_INC(queue, doorbell);
47498 -       rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
47499 -                    queue->queue_no, index, count);
47500 +       rc = do_QDIO(CARD_DDEV(card), qdio_flags, queue->queue_no, index, count,
47501 +                    aob);
47503         switch (rc) {
47504         case 0:
47505 @@ -3814,8 +3800,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
47506                 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
47507         }
47508         rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
47509 -                   card->qdio.c_q->next_buf_to_init,
47510 -                   count);
47511 +                    cq->next_buf_to_init, count, NULL);
47512         if (rc) {
47513                 dev_warn(&card->gdev->dev,
47514                         "QDIO reported an error, rc=%i\n", rc);
47515 @@ -5270,7 +5255,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
47516         init_data.int_parm               = (unsigned long) card;
47517         init_data.input_sbal_addr_array  = in_sbal_ptrs;
47518         init_data.output_sbal_addr_array = out_sbal_ptrs;
47519 -       init_data.output_sbal_state_array = card->qdio.out_bufstates;
47520         init_data.scan_threshold         = IS_IQD(card) ? 0 : 32;
47522         if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
47523 @@ -6069,7 +6053,15 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
47524         bool error = !!qdio_error;
47526         if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
47527 -               WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
47528 +               struct qaob *aob = buffer->aob;
47530 +               if (!aob) {
47531 +                       netdev_WARN_ONCE(card->dev,
47532 +                                        "Pending TX buffer %#x without QAOB on TX queue %u\n",
47533 +                                        bidx, queue->queue_no);
47534 +                       qeth_schedule_recovery(card);
47535 +                       return;
47536 +               }
47538                 QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
47540 @@ -6125,6 +6117,8 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
47541                 default:
47542                         WARN_ON_ONCE(1);
47543                 }
47545 +               memset(aob, 0, sizeof(*aob));
47546         } else if (card->options.cq == QETH_CQ_ENABLED) {
47547                 qeth_notify_skbs(queue, buffer,
47548                                  qeth_compute_cq_notification(sflags, 0));
47549 diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
47550 index 23ab16d65f2a..049596cbfb5d 100644
47551 --- a/drivers/s390/scsi/zfcp_qdio.c
47552 +++ b/drivers/s390/scsi/zfcp_qdio.c
47553 @@ -128,7 +128,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
47554         /*
47555          * put SBALs back to response queue
47556          */
47557 -       if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
47558 +       if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count, NULL))
47559                 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
47562 @@ -298,7 +298,7 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
47563         atomic_sub(sbal_number, &qdio->req_q_free);
47565         retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
47566 -                        q_req->sbal_first, sbal_number);
47567 +                        q_req->sbal_first, sbal_number, NULL);
47569         if (unlikely(retval)) {
47570                 /* Failed to submit the IO, roll back our modifications. */
47571 @@ -463,7 +463,8 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
47572                 sbale->addr = 0;
47573         }
47575 -       if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
47576 +       if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q,
47577 +                   NULL))
47578                 goto failed_qdio;
47580         /* set index of first available SBALS / number of available SBALS */
47581 diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
47582 index ea436a14087f..5eff3368143d 100644
47583 --- a/drivers/scsi/device_handler/scsi_dh_alua.c
47584 +++ b/drivers/scsi/device_handler/scsi_dh_alua.c
47585 @@ -573,10 +573,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
47586                  * even though it shouldn't according to T10.
47587                  * The retry without rtpg_ext_hdr_req set
47588                  * handles this.
47589 +                * Note:  some arrays return a sense key of ILLEGAL_REQUEST
47590 +                * with ASC 00h if they don't support the extended header.
47591                  */
47592                 if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
47593 -                   sense_hdr.sense_key == ILLEGAL_REQUEST &&
47594 -                   sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
47595 +                   sense_hdr.sense_key == ILLEGAL_REQUEST) {
47596                         pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
47597                         goto retry;
47598                 }
47599 diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
47600 index 36744968378f..09e49e21deb6 100644
47601 --- a/drivers/scsi/fnic/fnic_scsi.c
47602 +++ b/drivers/scsi/fnic/fnic_scsi.c
47603 @@ -217,7 +217,7 @@ int fnic_fw_reset_handler(struct fnic *fnic)
47605         /* wait for io cmpl */
47606         while (atomic_read(&fnic->in_flight))
47607 -               schedule_timeout(msecs_to_jiffies(1));
47608 +               schedule_msec_hrtimeout((1));
47610         spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
47612 @@ -2277,7 +2277,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
47613                 }
47614         }
47616 -       schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
47617 +       schedule_msec_hrtimeout((2 * fnic->config.ed_tov));
47619         /* walk again to check, if IOs are still pending in fw */
47620         if (fnic_is_abts_pending(fnic, lr_sc))
47621 diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
47622 index 7451377c4cb6..3e359ac752fd 100644
47623 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
47624 +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
47625 @@ -1646,7 +1646,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
47626                 idx = i * HISI_SAS_PHY_INT_NR;
47627                 for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) {
47628                         irq = platform_get_irq(pdev, idx);
47629 -                       if (!irq) {
47630 +                       if (irq < 0) {
47631                                 dev_err(dev, "irq init: fail map phy interrupt %d\n",
47632                                         idx);
47633                                 return -ENOENT;
47634 @@ -1665,7 +1665,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
47635         idx = hisi_hba->n_phy * HISI_SAS_PHY_INT_NR;
47636         for (i = 0; i < hisi_hba->queue_count; i++, idx++) {
47637                 irq = platform_get_irq(pdev, idx);
47638 -               if (!irq) {
47639 +               if (irq < 0) {
47640                         dev_err(dev, "irq init: could not map cq interrupt %d\n",
47641                                 idx);
47642                         return -ENOENT;
47643 @@ -1683,7 +1683,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
47644         idx = (hisi_hba->n_phy * HISI_SAS_PHY_INT_NR) + hisi_hba->queue_count;
47645         for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++, idx++) {
47646                 irq = platform_get_irq(pdev, idx);
47647 -               if (!irq) {
47648 +               if (irq < 0) {
47649                         dev_err(dev, "irq init: could not map fatal interrupt %d\n",
47650                                 idx);
47651                         return -ENOENT;
47652 diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
47653 index 61831f2fdb30..d6675a25719d 100644
47654 --- a/drivers/scsi/ibmvscsi/ibmvfc.c
47655 +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
47656 @@ -603,8 +603,17 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
47657                 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
47658                         vhost->action = action;
47659                 break;
47660 +       case IBMVFC_HOST_ACTION_REENABLE:
47661 +       case IBMVFC_HOST_ACTION_RESET:
47662 +               vhost->action = action;
47663 +               break;
47664         case IBMVFC_HOST_ACTION_INIT:
47665         case IBMVFC_HOST_ACTION_TGT_DEL:
47666 +       case IBMVFC_HOST_ACTION_LOGO:
47667 +       case IBMVFC_HOST_ACTION_QUERY_TGTS:
47668 +       case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
47669 +       case IBMVFC_HOST_ACTION_NONE:
47670 +       default:
47671                 switch (vhost->action) {
47672                 case IBMVFC_HOST_ACTION_RESET:
47673                 case IBMVFC_HOST_ACTION_REENABLE:
47674 @@ -614,15 +623,6 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
47675                         break;
47676                 }
47677                 break;
47678 -       case IBMVFC_HOST_ACTION_LOGO:
47679 -       case IBMVFC_HOST_ACTION_QUERY_TGTS:
47680 -       case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
47681 -       case IBMVFC_HOST_ACTION_NONE:
47682 -       case IBMVFC_HOST_ACTION_RESET:
47683 -       case IBMVFC_HOST_ACTION_REENABLE:
47684 -       default:
47685 -               vhost->action = action;
47686 -               break;
47687         }
47690 @@ -5373,30 +5373,49 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
47691         case IBMVFC_HOST_ACTION_INIT_WAIT:
47692                 break;
47693         case IBMVFC_HOST_ACTION_RESET:
47694 -               vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
47695                 list_splice_init(&vhost->purge, &purge);
47696                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
47697                 ibmvfc_complete_purge(&purge);
47698                 rc = ibmvfc_reset_crq(vhost);
47700                 spin_lock_irqsave(vhost->host->host_lock, flags);
47701 -               if (rc == H_CLOSED)
47702 +               if (!rc || rc == H_CLOSED)
47703                         vio_enable_interrupts(to_vio_dev(vhost->dev));
47704 -               if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
47705 -                   (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
47706 -                       ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
47707 -                       dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
47708 +               if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
47709 +                       /*
47710 +                        * The only action we could have changed to would have
47711 +                        * been reenable, in which case, we skip the rest of
47712 +                        * this path and wait until we've done the re-enable
47713 +                        * before sending the crq init.
47714 +                        */
47715 +                       vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
47717 +                       if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
47718 +                           (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
47719 +                               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
47720 +                               dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
47721 +                       }
47722                 }
47723                 break;
47724         case IBMVFC_HOST_ACTION_REENABLE:
47725 -               vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
47726                 list_splice_init(&vhost->purge, &purge);
47727                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
47728                 ibmvfc_complete_purge(&purge);
47729                 rc = ibmvfc_reenable_crq_queue(vhost);
47731                 spin_lock_irqsave(vhost->host->host_lock, flags);
47732 -               if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
47733 -                       ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
47734 -                       dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
47735 +               if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
47736 +                       /*
47737 +                        * The only action we could have changed to would have
47738 +                        * been reset, in which case, we skip the rest of this
47739 +                        * path and wait until we've done the reset before
47740 +                        * sending the crq init.
47741 +                        */
47742 +                       vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
47743 +                       if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
47744 +                               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
47745 +                               dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
47746 +                       }
47747                 }
47748                 break;
47749         case IBMVFC_HOST_ACTION_LOGO:
47750 diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
47751 index f0ed6863cc70..60a88a95a8e2 100644
47752 --- a/drivers/scsi/jazz_esp.c
47753 +++ b/drivers/scsi/jazz_esp.c
47754 @@ -143,7 +143,9 @@ static int esp_jazz_probe(struct platform_device *dev)
47755         if (!esp->command_block)
47756                 goto fail_unmap_regs;
47758 -       host->irq = platform_get_irq(dev, 0);
47759 +       host->irq = err = platform_get_irq(dev, 0);
47760 +       if (err < 0)
47761 +               goto fail_unmap_command_block;
47762         err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
47763         if (err < 0)
47764                 goto fail_unmap_command_block;
47765 diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
47766 index 22826544da7e..9989669beec3 100644
47767 --- a/drivers/scsi/libfc/fc_lport.c
47768 +++ b/drivers/scsi/libfc/fc_lport.c
47769 @@ -1731,7 +1731,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
47771         if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
47772                 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
47773 -                            "lport->mfs:%hu\n", mfs, lport->mfs);
47774 +                            "lport->mfs:%u\n", mfs, lport->mfs);
47775                 fc_lport_error(lport, fp);
47776                 goto out;
47777         }
47778 diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
47779 index bdd9a29f4201..0496a60735ef 100644
47780 --- a/drivers/scsi/lpfc/lpfc_attr.c
47781 +++ b/drivers/scsi/lpfc/lpfc_attr.c
47782 @@ -1687,8 +1687,7 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
47783                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
47784                                 "0071 Set trunk mode failed with status: %d",
47785                                 rc);
47786 -       if (rc != MBX_TIMEOUT)
47787 -               mempool_free(mbox, phba->mbox_mem_pool);
47788 +       mempool_free(mbox, phba->mbox_mem_pool);
47790         return 0;
47792 @@ -6793,15 +6792,19 @@ lpfc_get_stats(struct Scsi_Host *shost)
47793         pmboxq->ctx_buf = NULL;
47794         pmboxq->vport = vport;
47796 -       if (vport->fc_flag & FC_OFFLINE_MODE)
47797 +       if (vport->fc_flag & FC_OFFLINE_MODE) {
47798                 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
47799 -       else
47800 -               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
47802 -       if (rc != MBX_SUCCESS) {
47803 -               if (rc != MBX_TIMEOUT)
47804 +               if (rc != MBX_SUCCESS) {
47805                         mempool_free(pmboxq, phba->mbox_mem_pool);
47806 -               return NULL;
47807 +                       return NULL;
47808 +               }
47809 +       } else {
47810 +               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
47811 +               if (rc != MBX_SUCCESS) {
47812 +                       if (rc != MBX_TIMEOUT)
47813 +                               mempool_free(pmboxq, phba->mbox_mem_pool);
47814 +                       return NULL;
47815 +               }
47816         }
47818         memset(hs, 0, sizeof (struct fc_host_statistics));
47819 @@ -6825,15 +6828,19 @@ lpfc_get_stats(struct Scsi_Host *shost)
47820         pmboxq->ctx_buf = NULL;
47821         pmboxq->vport = vport;
47823 -       if (vport->fc_flag & FC_OFFLINE_MODE)
47824 +       if (vport->fc_flag & FC_OFFLINE_MODE) {
47825                 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
47826 -       else
47827 -               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
47829 -       if (rc != MBX_SUCCESS) {
47830 -               if (rc != MBX_TIMEOUT)
47831 +               if (rc != MBX_SUCCESS) {
47832                         mempool_free(pmboxq, phba->mbox_mem_pool);
47833 -               return NULL;
47834 +                       return NULL;
47835 +               }
47836 +       } else {
47837 +               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
47838 +               if (rc != MBX_SUCCESS) {
47839 +                       if (rc != MBX_TIMEOUT)
47840 +                               mempool_free(pmboxq, phba->mbox_mem_pool);
47841 +                       return NULL;
47842 +               }
47843         }
47845         hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
47846 @@ -6906,15 +6913,19 @@ lpfc_reset_stats(struct Scsi_Host *shost)
47847         pmboxq->vport = vport;
47849         if ((vport->fc_flag & FC_OFFLINE_MODE) ||
47850 -               (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
47851 +               (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
47852                 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
47853 -       else
47854 -               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
47856 -       if (rc != MBX_SUCCESS) {
47857 -               if (rc != MBX_TIMEOUT)
47858 +               if (rc != MBX_SUCCESS) {
47859                         mempool_free(pmboxq, phba->mbox_mem_pool);
47860 -               return;
47861 +                       return;
47862 +               }
47863 +       } else {
47864 +               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
47865 +               if (rc != MBX_SUCCESS) {
47866 +                       if (rc != MBX_TIMEOUT)
47867 +                               mempool_free(pmboxq, phba->mbox_mem_pool);
47868 +                       return;
47869 +               }
47870         }
47872         memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
47873 @@ -6924,15 +6935,19 @@ lpfc_reset_stats(struct Scsi_Host *shost)
47874         pmboxq->vport = vport;
47876         if ((vport->fc_flag & FC_OFFLINE_MODE) ||
47877 -           (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
47878 +           (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
47879                 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
47880 -       else
47881 +               if (rc != MBX_SUCCESS) {
47882 +                       mempool_free(pmboxq, phba->mbox_mem_pool);
47883 +                       return;
47884 +               }
47885 +       } else {
47886                 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
47888 -       if (rc != MBX_SUCCESS) {
47889 -               if (rc != MBX_TIMEOUT)
47890 -                       mempool_free( pmboxq, phba->mbox_mem_pool);
47891 -               return;
47892 +               if (rc != MBX_SUCCESS) {
47893 +                       if (rc != MBX_TIMEOUT)
47894 +                               mempool_free(pmboxq, phba->mbox_mem_pool);
47895 +                       return;
47896 +               }
47897         }
47899         lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
47900 diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
47901 index a0aad4896a45..763b1eeb0ca8 100644
47902 --- a/drivers/scsi/lpfc/lpfc_crtn.h
47903 +++ b/drivers/scsi/lpfc/lpfc_crtn.h
47904 @@ -55,9 +55,6 @@ void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
47905  void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
47906  void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
47907  void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
47908 -void lpfc_supported_pages(struct lpfcMboxq *);
47909 -void lpfc_pc_sli4_params(struct lpfcMboxq *);
47910 -int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
47911  int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
47912                            uint16_t, uint16_t, bool);
47913  int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
47914 @@ -351,8 +348,8 @@ int lpfc_sli_hbq_size(void);
47915  int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
47916                                struct lpfc_iocbq *, void *);
47917  int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
47918 -int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
47919 -                       uint64_t, lpfc_ctx_cmd);
47920 +int lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
47921 +                       lpfc_ctx_cmd abort_cmd);
47922  int
47923  lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *,
47924                         uint16_t, uint64_t, lpfc_ctx_cmd);
47925 diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
47926 index f0a758138ae8..3dd22da3153f 100644
47927 --- a/drivers/scsi/lpfc/lpfc_els.c
47928 +++ b/drivers/scsi/lpfc/lpfc_els.c
47929 @@ -1,7 +1,7 @@
47930  /*******************************************************************
47931   * This file is part of the Emulex Linux Device Driver for         *
47932   * Fibre Channel Host Bus Adapters.                                *
47933 - * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
47934 + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
47935   * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
47936   * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
47937   * EMULEX and SLI are trademarks of Emulex.                        *
47938 @@ -1600,7 +1600,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
47939         struct lpfc_nodelist *new_ndlp;
47940         struct serv_parm *sp;
47941         uint8_t  name[sizeof(struct lpfc_name)];
47942 -       uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
47943 +       uint32_t keepDID = 0, keep_nlp_flag = 0;
47944         uint32_t keep_new_nlp_flag = 0;
47945         uint16_t keep_nlp_state;
47946         u32 keep_nlp_fc4_type = 0;
47947 @@ -1622,7 +1622,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
47948         new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
47950         /* return immediately if the WWPN matches ndlp */
47951 -       if (new_ndlp == ndlp)
47952 +       if (!new_ndlp || (new_ndlp == ndlp))
47953                 return ndlp;
47955         if (phba->sli_rev == LPFC_SLI_REV4) {
47956 @@ -1641,30 +1641,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
47957                          (new_ndlp ? new_ndlp->nlp_flag : 0),
47958                          (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
47960 -       if (!new_ndlp) {
47961 -               rc = memcmp(&ndlp->nlp_portname, name,
47962 -                           sizeof(struct lpfc_name));
47963 -               if (!rc) {
47964 -                       if (active_rrqs_xri_bitmap)
47965 -                               mempool_free(active_rrqs_xri_bitmap,
47966 -                                            phba->active_rrq_pool);
47967 -                       return ndlp;
47968 -               }
47969 -               new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
47970 -               if (!new_ndlp) {
47971 -                       if (active_rrqs_xri_bitmap)
47972 -                               mempool_free(active_rrqs_xri_bitmap,
47973 -                                            phba->active_rrq_pool);
47974 -                       return ndlp;
47975 -               }
47976 -       } else {
47977 -               keepDID = new_ndlp->nlp_DID;
47978 -               if (phba->sli_rev == LPFC_SLI_REV4 &&
47979 -                   active_rrqs_xri_bitmap)
47980 -                       memcpy(active_rrqs_xri_bitmap,
47981 -                              new_ndlp->active_rrqs_xri_bitmap,
47982 -                              phba->cfg_rrq_xri_bitmap_sz);
47983 -       }
47984 +       keepDID = new_ndlp->nlp_DID;
47986 +       if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap)
47987 +               memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap,
47988 +                      phba->cfg_rrq_xri_bitmap_sz);
47990         /* At this point in this routine, we know new_ndlp will be
47991          * returned. however, any previous GID_FTs that were done
47992 @@ -2063,13 +2044,12 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
47993   * This routine issues a Port Login (PLOGI) command to a remote N_Port
47994   * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
47995   * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
47996 - * This routine constructs the proper feilds of the PLOGI IOCB and invokes
47997 + * This routine constructs the proper fields of the PLOGI IOCB and invokes
47998   * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
47999   *
48000 - * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
48001 - * will be incremented by 1 for holding the ndlp and the reference to ndlp
48002 - * will be stored into the context1 field of the IOCB for the completion
48003 - * callback function to the PLOGI ELS command.
48004 + * Note that the ndlp reference count will be incremented by 1 for holding
48005 + * the ndlp and the reference to ndlp will be stored into the context1 field
48006 + * of the IOCB for the completion callback function to the PLOGI ELS command.
48007   *
48008   * Return code
48009   *   0 - Successfully issued a plogi for @vport
48010 @@ -2087,29 +2067,28 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
48011         int ret;
48013         ndlp = lpfc_findnode_did(vport, did);
48014 +       if (!ndlp)
48015 +               return 1;
48017 -       if (ndlp) {
48018 -               /* Defer the processing of the issue PLOGI until after the
48019 -                * outstanding UNREG_RPI mbox command completes, unless we
48020 -                * are going offline. This logic does not apply for Fabric DIDs
48021 -                */
48022 -               if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
48023 -                   ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
48024 -                   !(vport->fc_flag & FC_OFFLINE_MODE)) {
48025 -                       lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
48026 -                                        "4110 Issue PLOGI x%x deferred "
48027 -                                        "on NPort x%x rpi x%x Data: x%px\n",
48028 -                                        ndlp->nlp_defer_did, ndlp->nlp_DID,
48029 -                                        ndlp->nlp_rpi, ndlp);
48031 -                       /* We can only defer 1st PLOGI */
48032 -                       if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
48033 -                               ndlp->nlp_defer_did = did;
48034 -                       return 0;
48035 -               }
48036 +       /* Defer the processing of the issue PLOGI until after the
48037 +        * outstanding UNREG_RPI mbox command completes, unless we
48038 +        * are going offline. This logic does not apply for Fabric DIDs
48039 +        */
48040 +       if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
48041 +           ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
48042 +           !(vport->fc_flag & FC_OFFLINE_MODE)) {
48043 +               lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
48044 +                                "4110 Issue PLOGI x%x deferred "
48045 +                                "on NPort x%x rpi x%x Data: x%px\n",
48046 +                                ndlp->nlp_defer_did, ndlp->nlp_DID,
48047 +                                ndlp->nlp_rpi, ndlp);
48049 +               /* We can only defer 1st PLOGI */
48050 +               if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
48051 +                       ndlp->nlp_defer_did = did;
48052 +               return 0;
48053         }
48055 -       /* If ndlp is not NULL, we will bump the reference count on it */
48056         cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
48057         elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
48058                                      ELS_CMD_PLOGI);
48059 @@ -3829,7 +3808,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
48060                 did = irsp->un.elsreq64.remoteID;
48061                 ndlp = lpfc_findnode_did(vport, did);
48062                 if (!ndlp && (cmd != ELS_CMD_PLOGI))
48063 -                       return 1;
48064 +                       return 0;
48065         }
48067         lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
48068 @@ -4473,10 +4452,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
48069   * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
48070   * field in the command IOCB is not NULL, the referred mailbox command will
48071   * be send out, and then invokes the lpfc_els_free_iocb() routine to release
48072 - * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
48073 - * link down event occurred during the discovery, the lpfc_nlp_not_used()
48074 - * routine shall be invoked trying to release the ndlp if no other threads
48075 - * are currently referring it.
48076 + * the IOCB.
48077   **/
48078  static void
48079  lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
48080 @@ -4486,10 +4462,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
48081         struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
48082         struct Scsi_Host  *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
48083         IOCB_t  *irsp;
48084 -       uint8_t *pcmd;
48085         LPFC_MBOXQ_t *mbox = NULL;
48086         struct lpfc_dmabuf *mp = NULL;
48087 -       uint32_t ls_rjt = 0;
48089         irsp = &rspiocb->iocb;
48091 @@ -4501,18 +4475,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
48092         if (cmdiocb->context_un.mbox)
48093                 mbox = cmdiocb->context_un.mbox;
48095 -       /* First determine if this is a LS_RJT cmpl. Note, this callback
48096 -        * function can have cmdiocb->contest1 (ndlp) field set to NULL.
48097 -        */
48098 -       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
48099 -       if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
48100 -               /* A LS_RJT associated with Default RPI cleanup has its own
48101 -                * separate code path.
48102 -                */
48103 -               if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
48104 -                       ls_rjt = 1;
48105 -       }
48107         /* Check to see if link went down during discovery */
48108         if (!ndlp || lpfc_els_chk_latt(vport)) {
48109                 if (mbox) {
48110 @@ -4523,15 +4485,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
48111                         }
48112                         mempool_free(mbox, phba->mbox_mem_pool);
48113                 }
48114 -               if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
48115 -                       if (lpfc_nlp_not_used(ndlp)) {
48116 -                               ndlp = NULL;
48117 -                               /* Indicate the node has already released,
48118 -                                * should not reference to it from within
48119 -                                * the routine lpfc_els_free_iocb.
48120 -                                */
48121 -                               cmdiocb->context1 = NULL;
48122 -                       }
48123                 goto out;
48124         }
48126 @@ -4609,29 +4562,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
48127                                 "Data: x%x x%x x%x\n",
48128                                 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
48129                                 ndlp->nlp_rpi);
48131 -                       if (lpfc_nlp_not_used(ndlp)) {
48132 -                               ndlp = NULL;
48133 -                               /* Indicate node has already been released,
48134 -                                * should not reference to it from within
48135 -                                * the routine lpfc_els_free_iocb.
48136 -                                */
48137 -                               cmdiocb->context1 = NULL;
48138 -                       }
48139 -               } else {
48140 -                       /* Do not drop node for lpfc_els_abort'ed ELS cmds */
48141 -                       if (!lpfc_error_lost_link(irsp) &&
48142 -                           ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
48143 -                               if (lpfc_nlp_not_used(ndlp)) {
48144 -                                       ndlp = NULL;
48145 -                                       /* Indicate node has already been
48146 -                                        * released, should not reference
48147 -                                        * to it from within the routine
48148 -                                        * lpfc_els_free_iocb.
48149 -                                        */
48150 -                                       cmdiocb->context1 = NULL;
48151 -                               }
48152 -                       }
48153                 }
48154                 mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
48155                 if (mp) {
48156 @@ -4647,19 +4577,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
48157                         ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
48158                 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
48159                 spin_unlock_irq(&ndlp->lock);
48161 -               /* If the node is not being used by another discovery thread,
48162 -                * and we are sending a reject, we are done with it.
48163 -                * Release driver reference count here and free associated
48164 -                * resources.
48165 -                */
48166 -               if (ls_rjt)
48167 -                       if (lpfc_nlp_not_used(ndlp))
48168 -                               /* Indicate node has already been released,
48169 -                                * should not reference to it from within
48170 -                                * the routine lpfc_els_free_iocb.
48171 -                                */
48172 -                               cmdiocb->context1 = NULL;
48173         }
48175         /* Release the originating I/O reference. */
48176 diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
48177 index 48ca4a612f80..c5176f406386 100644
48178 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c
48179 +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
48180 @@ -140,11 +140,8 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
48181                               "rport terminate: sid:x%x did:x%x flg:x%x",
48182                               ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
48184 -       if (ndlp->nlp_sid != NLP_NO_SID) {
48185 -               lpfc_sli_abort_iocb(vport,
48186 -                                   &vport->phba->sli.sli3_ring[LPFC_FCP_RING],
48187 -                                   ndlp->nlp_sid, 0, LPFC_CTX_TGT);
48188 -       }
48189 +       if (ndlp->nlp_sid != NLP_NO_SID)
48190 +               lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
48193  /*
48194 @@ -299,8 +296,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
48196         if (ndlp->nlp_sid != NLP_NO_SID) {
48197                 warn_on = 1;
48198 -               lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
48199 -                                   ndlp->nlp_sid, 0, LPFC_CTX_TGT);
48200 +               lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
48201         }
48203         if (warn_on) {
48204 diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
48205 index 541b9aef6bfe..f5bc2c32a817 100644
48206 --- a/drivers/scsi/lpfc/lpfc_hw4.h
48207 +++ b/drivers/scsi/lpfc/lpfc_hw4.h
48208 @@ -124,6 +124,7 @@ struct lpfc_sli_intf {
48209  /* Define SLI4 Alignment requirements. */
48210  #define LPFC_ALIGN_16_BYTE     16
48211  #define LPFC_ALIGN_64_BYTE     64
48212 +#define SLI4_PAGE_SIZE         4096
48214  /* Define SLI4 specific definitions. */
48215  #define LPFC_MQ_CQE_BYTE_OFFSET        256
48216 @@ -2976,62 +2977,6 @@ struct lpfc_mbx_request_features {
48217  #define lpfc_mbx_rq_ftr_rsp_mrqp_WORD          word3
48218  };
48220 -struct lpfc_mbx_supp_pages {
48221 -       uint32_t word1;
48222 -#define qs_SHIFT                               0
48223 -#define qs_MASK                                        0x00000001
48224 -#define qs_WORD                                        word1
48225 -#define wr_SHIFT                               1
48226 -#define wr_MASK                                0x00000001
48227 -#define wr_WORD                                        word1
48228 -#define pf_SHIFT                               8
48229 -#define pf_MASK                                        0x000000ff
48230 -#define pf_WORD                                        word1
48231 -#define cpn_SHIFT                              16
48232 -#define cpn_MASK                               0x000000ff
48233 -#define cpn_WORD                               word1
48234 -       uint32_t word2;
48235 -#define list_offset_SHIFT                      0
48236 -#define list_offset_MASK                       0x000000ff
48237 -#define list_offset_WORD                       word2
48238 -#define next_offset_SHIFT                      8
48239 -#define next_offset_MASK                       0x000000ff
48240 -#define next_offset_WORD                       word2
48241 -#define elem_cnt_SHIFT                         16
48242 -#define elem_cnt_MASK                          0x000000ff
48243 -#define elem_cnt_WORD                          word2
48244 -       uint32_t word3;
48245 -#define pn_0_SHIFT                             24
48246 -#define pn_0_MASK                              0x000000ff
48247 -#define pn_0_WORD                              word3
48248 -#define pn_1_SHIFT                             16
48249 -#define pn_1_MASK                              0x000000ff
48250 -#define pn_1_WORD                              word3
48251 -#define pn_2_SHIFT                             8
48252 -#define pn_2_MASK                              0x000000ff
48253 -#define pn_2_WORD                              word3
48254 -#define pn_3_SHIFT                             0
48255 -#define pn_3_MASK                              0x000000ff
48256 -#define pn_3_WORD                              word3
48257 -       uint32_t word4;
48258 -#define pn_4_SHIFT                             24
48259 -#define pn_4_MASK                              0x000000ff
48260 -#define pn_4_WORD                              word4
48261 -#define pn_5_SHIFT                             16
48262 -#define pn_5_MASK                              0x000000ff
48263 -#define pn_5_WORD                              word4
48264 -#define pn_6_SHIFT                             8
48265 -#define pn_6_MASK                              0x000000ff
48266 -#define pn_6_WORD                              word4
48267 -#define pn_7_SHIFT                             0
48268 -#define pn_7_MASK                              0x000000ff
48269 -#define pn_7_WORD                              word4
48270 -       uint32_t rsvd[27];
48271 -#define LPFC_SUPP_PAGES                        0
48272 -#define LPFC_BLOCK_GUARD_PROFILES      1
48273 -#define LPFC_SLI4_PARAMETERS           2
48276  struct lpfc_mbx_memory_dump_type3 {
48277         uint32_t word1;
48278  #define lpfc_mbx_memory_dump_type3_type_SHIFT    0
48279 @@ -3248,121 +3193,6 @@ struct user_eeprom {
48280         uint8_t reserved191[57];
48281  };
48283 -struct lpfc_mbx_pc_sli4_params {
48284 -       uint32_t word1;
48285 -#define qs_SHIFT                               0
48286 -#define qs_MASK                                        0x00000001
48287 -#define qs_WORD                                        word1
48288 -#define wr_SHIFT                               1
48289 -#define wr_MASK                                        0x00000001
48290 -#define wr_WORD                                        word1
48291 -#define pf_SHIFT                               8
48292 -#define pf_MASK                                        0x000000ff
48293 -#define pf_WORD                                        word1
48294 -#define cpn_SHIFT                              16
48295 -#define cpn_MASK                               0x000000ff
48296 -#define cpn_WORD                               word1
48297 -       uint32_t word2;
48298 -#define if_type_SHIFT                          0
48299 -#define if_type_MASK                           0x00000007
48300 -#define if_type_WORD                           word2
48301 -#define sli_rev_SHIFT                          4
48302 -#define sli_rev_MASK                           0x0000000f
48303 -#define sli_rev_WORD                           word2
48304 -#define sli_family_SHIFT                       8
48305 -#define sli_family_MASK                                0x000000ff
48306 -#define sli_family_WORD                                word2
48307 -#define featurelevel_1_SHIFT                   16
48308 -#define featurelevel_1_MASK                    0x000000ff
48309 -#define featurelevel_1_WORD                    word2
48310 -#define featurelevel_2_SHIFT                   24
48311 -#define featurelevel_2_MASK                    0x0000001f
48312 -#define featurelevel_2_WORD                    word2
48313 -       uint32_t word3;
48314 -#define fcoe_SHIFT                             0
48315 -#define fcoe_MASK                              0x00000001
48316 -#define fcoe_WORD                              word3
48317 -#define fc_SHIFT                               1
48318 -#define fc_MASK                                        0x00000001
48319 -#define fc_WORD                                        word3
48320 -#define nic_SHIFT                              2
48321 -#define nic_MASK                               0x00000001
48322 -#define nic_WORD                               word3
48323 -#define iscsi_SHIFT                            3
48324 -#define iscsi_MASK                             0x00000001
48325 -#define iscsi_WORD                             word3
48326 -#define rdma_SHIFT                             4
48327 -#define rdma_MASK                              0x00000001
48328 -#define rdma_WORD                              word3
48329 -       uint32_t sge_supp_len;
48330 -#define SLI4_PAGE_SIZE 4096
48331 -       uint32_t word5;
48332 -#define if_page_sz_SHIFT                       0
48333 -#define if_page_sz_MASK                                0x0000ffff
48334 -#define if_page_sz_WORD                                word5
48335 -#define loopbk_scope_SHIFT                     24
48336 -#define loopbk_scope_MASK                      0x0000000f
48337 -#define loopbk_scope_WORD                      word5
48338 -#define rq_db_window_SHIFT                     28
48339 -#define rq_db_window_MASK                      0x0000000f
48340 -#define rq_db_window_WORD                      word5
48341 -       uint32_t word6;
48342 -#define eq_pages_SHIFT                         0
48343 -#define eq_pages_MASK                          0x0000000f
48344 -#define eq_pages_WORD                          word6
48345 -#define eqe_size_SHIFT                         8
48346 -#define eqe_size_MASK                          0x000000ff
48347 -#define eqe_size_WORD                          word6
48348 -       uint32_t word7;
48349 -#define cq_pages_SHIFT                         0
48350 -#define cq_pages_MASK                          0x0000000f
48351 -#define cq_pages_WORD                          word7
48352 -#define cqe_size_SHIFT                         8
48353 -#define cqe_size_MASK                          0x000000ff
48354 -#define cqe_size_WORD                          word7
48355 -       uint32_t word8;
48356 -#define mq_pages_SHIFT                         0
48357 -#define mq_pages_MASK                          0x0000000f
48358 -#define mq_pages_WORD                          word8
48359 -#define mqe_size_SHIFT                         8
48360 -#define mqe_size_MASK                          0x000000ff
48361 -#define mqe_size_WORD                          word8
48362 -#define mq_elem_cnt_SHIFT                      16
48363 -#define mq_elem_cnt_MASK                       0x000000ff
48364 -#define mq_elem_cnt_WORD                       word8
48365 -       uint32_t word9;
48366 -#define wq_pages_SHIFT                         0
48367 -#define wq_pages_MASK                          0x0000ffff
48368 -#define wq_pages_WORD                          word9
48369 -#define wqe_size_SHIFT                         8
48370 -#define wqe_size_MASK                          0x000000ff
48371 -#define wqe_size_WORD                          word9
48372 -       uint32_t word10;
48373 -#define rq_pages_SHIFT                         0
48374 -#define rq_pages_MASK                          0x0000ffff
48375 -#define rq_pages_WORD                          word10
48376 -#define rqe_size_SHIFT                         8
48377 -#define rqe_size_MASK                          0x000000ff
48378 -#define rqe_size_WORD                          word10
48379 -       uint32_t word11;
48380 -#define hdr_pages_SHIFT                                0
48381 -#define hdr_pages_MASK                         0x0000000f
48382 -#define hdr_pages_WORD                         word11
48383 -#define hdr_size_SHIFT                         8
48384 -#define hdr_size_MASK                          0x0000000f
48385 -#define hdr_size_WORD                          word11
48386 -#define hdr_pp_align_SHIFT                     16
48387 -#define hdr_pp_align_MASK                      0x0000ffff
48388 -#define hdr_pp_align_WORD                      word11
48389 -       uint32_t word12;
48390 -#define sgl_pages_SHIFT                                0
48391 -#define sgl_pages_MASK                         0x0000000f
48392 -#define sgl_pages_WORD                         word12
48393 -#define sgl_pp_align_SHIFT                     16
48394 -#define sgl_pp_align_MASK                      0x0000ffff
48395 -#define sgl_pp_align_WORD                      word12
48396 -       uint32_t rsvd_13_63[51];
48398  #define SLI4_PAGE_ALIGN(addr) (((addr)+((SLI4_PAGE_SIZE)-1)) \
48399                                &(~((SLI4_PAGE_SIZE)-1)))
48401 @@ -3994,8 +3824,6 @@ struct lpfc_mqe {
48402                 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
48403                 struct lpfc_mbx_query_fw_config query_fw_cfg;
48404                 struct lpfc_mbx_set_beacon_config beacon_config;
48405 -               struct lpfc_mbx_supp_pages supp_pages;
48406 -               struct lpfc_mbx_pc_sli4_params sli4_params;
48407                 struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
48408                 struct lpfc_mbx_set_link_diag_state link_diag_state;
48409                 struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
48410 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
48411 index 71f340dd4fbd..a67051ba3f12 100644
48412 --- a/drivers/scsi/lpfc/lpfc_init.c
48413 +++ b/drivers/scsi/lpfc/lpfc_init.c
48414 @@ -6573,8 +6573,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
48415         LPFC_MBOXQ_t *mboxq;
48416         MAILBOX_t *mb;
48417         int rc, i, max_buf_size;
48418 -       uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
48419 -       struct lpfc_mqe *mqe;
48420         int longs;
48421         int extra;
48422         uint64_t wwn;
48423 @@ -6808,32 +6806,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
48425         lpfc_nvme_mod_param_dep(phba);
48427 -       /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
48428 -       lpfc_supported_pages(mboxq);
48429 -       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
48430 -       if (!rc) {
48431 -               mqe = &mboxq->u.mqe;
48432 -               memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
48433 -                      LPFC_MAX_SUPPORTED_PAGES);
48434 -               for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
48435 -                       switch (pn_page[i]) {
48436 -                       case LPFC_SLI4_PARAMETERS:
48437 -                               phba->sli4_hba.pc_sli4_params.supported = 1;
48438 -                               break;
48439 -                       default:
48440 -                               break;
48441 -                       }
48442 -               }
48443 -               /* Read the port's SLI4 Parameters capabilities if supported. */
48444 -               if (phba->sli4_hba.pc_sli4_params.supported)
48445 -                       rc = lpfc_pc_sli4_params_get(phba, mboxq);
48446 -               if (rc) {
48447 -                       mempool_free(mboxq, phba->mbox_mem_pool);
48448 -                       rc = -EIO;
48449 -                       goto out_free_bsmbx;
48450 -               }
48451 -       }
48453         /*
48454          * Get sli4 parameters that override parameters from Port capabilities.
48455          * If this call fails, it isn't critical unless the SLI4 parameters come
48456 @@ -9660,8 +9632,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
48457                                 "3250 QUERY_FW_CFG mailbox failed with status "
48458                                 "x%x add_status x%x, mbx status x%x\n",
48459                                 shdr_status, shdr_add_status, rc);
48460 -               if (rc != MBX_TIMEOUT)
48461 -                       mempool_free(mboxq, phba->mbox_mem_pool);
48462 +               mempool_free(mboxq, phba->mbox_mem_pool);
48463                 rc = -ENXIO;
48464                 goto out_error;
48465         }
48466 @@ -9677,8 +9648,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
48467                         "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
48468                         phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
48470 -       if (rc != MBX_TIMEOUT)
48471 -               mempool_free(mboxq, phba->mbox_mem_pool);
48472 +       mempool_free(mboxq, phba->mbox_mem_pool);
48474         /*
48475          * Set up HBA Event Queues (EQs)
48476 @@ -10276,8 +10246,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
48477                 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
48478                 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
48479                                          &shdr->response);
48480 -               if (rc != MBX_TIMEOUT)
48481 -                       mempool_free(mboxq, phba->mbox_mem_pool);
48482 +               mempool_free(mboxq, phba->mbox_mem_pool);
48483                 if (shdr_status || shdr_add_status || rc) {
48484                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
48485                                         "0495 SLI_FUNCTION_RESET mailbox "
48486 @@ -12075,78 +12044,6 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
48487                 phba->pport->work_port_events = 0;
48490 - /**
48491 - * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
48492 - * @phba: Pointer to HBA context object.
48493 - * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
48494 - *
48495 - * This function is called in the SLI4 code path to read the port's
48496 - * sli4 capabilities.
48497 - *
48498 - * This function may be be called from any context that can block-wait
48499 - * for the completion.  The expectation is that this routine is called
48500 - * typically from probe_one or from the online routine.
48501 - **/
48502 -int
48503 -lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
48505 -       int rc;
48506 -       struct lpfc_mqe *mqe;
48507 -       struct lpfc_pc_sli4_params *sli4_params;
48508 -       uint32_t mbox_tmo;
48510 -       rc = 0;
48511 -       mqe = &mboxq->u.mqe;
48513 -       /* Read the port's SLI4 Parameters port capabilities */
48514 -       lpfc_pc_sli4_params(mboxq);
48515 -       if (!phba->sli4_hba.intr_enable)
48516 -               rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
48517 -       else {
48518 -               mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
48519 -               rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
48520 -       }
48522 -       if (unlikely(rc))
48523 -               return 1;
48525 -       sli4_params = &phba->sli4_hba.pc_sli4_params;
48526 -       sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
48527 -       sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
48528 -       sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
48529 -       sli4_params->featurelevel_1 = bf_get(featurelevel_1,
48530 -                                            &mqe->un.sli4_params);
48531 -       sli4_params->featurelevel_2 = bf_get(featurelevel_2,
48532 -                                            &mqe->un.sli4_params);
48533 -       sli4_params->proto_types = mqe->un.sli4_params.word3;
48534 -       sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
48535 -       sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
48536 -       sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
48537 -       sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
48538 -       sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
48539 -       sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
48540 -       sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
48541 -       sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
48542 -       sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
48543 -       sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
48544 -       sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
48545 -       sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
48546 -       sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
48547 -       sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
48548 -       sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
48549 -       sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
48550 -       sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
48551 -       sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
48552 -       sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
48553 -       sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
48555 -       /* Make sure that sge_supp_len can be handled by the driver */
48556 -       if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
48557 -               sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
48559 -       return rc;
48562  /**
48563   * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
48564   * @phba: Pointer to HBA context object.
48565 @@ -12205,7 +12102,8 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
48566         else
48567                 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
48568         sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
48569 -       sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
48570 +       sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
48571 +                                          mbx_sli4_parameters);
48572         sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
48573         sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
48574         sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
48575 diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
48576 index c03a7f12dd65..72dd22ad5dcc 100644
48577 --- a/drivers/scsi/lpfc/lpfc_mbox.c
48578 +++ b/drivers/scsi/lpfc/lpfc_mbox.c
48579 @@ -2624,39 +2624,3 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
48580         resume_rpi->event_tag = ndlp->phba->fc_eventTag;
48583 -/**
48584 - * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
48585 - *                        mailbox command.
48586 - * @mbox: pointer to lpfc mbox command to initialize.
48587 - *
48588 - * The PORT_CAPABILITIES supported pages mailbox command is issued to
48589 - * retrieve the particular feature pages supported by the port.
48590 - **/
48591 -void
48592 -lpfc_supported_pages(struct lpfcMboxq *mbox)
48594 -       struct lpfc_mbx_supp_pages *supp_pages;
48596 -       memset(mbox, 0, sizeof(*mbox));
48597 -       supp_pages = &mbox->u.mqe.un.supp_pages;
48598 -       bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
48599 -       bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
48602 -/**
48603 - * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
48604 - * @mbox: pointer to lpfc mbox command to initialize.
48605 - *
48606 - * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
48607 - * retrieve the particular SLI4 features supported by the port.
48608 - **/
48609 -void
48610 -lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
48612 -       struct lpfc_mbx_pc_sli4_params *sli4_params;
48614 -       memset(mbox, 0, sizeof(*mbox));
48615 -       sli4_params = &mbox->u.mqe.un.sli4_params;
48616 -       bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
48617 -       bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
48619 diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
48620 index 135d8e8a42ba..9f05f5e329c6 100644
48621 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c
48622 +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
48623 @@ -279,106 +279,43 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
48624         lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
48627 -/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up
48628 +/* lpfc_defer_plogi_acc - Issue PLOGI ACC after reg_login completes
48629   * @phba: pointer to lpfc hba data structure.
48630 - * @link_mbox: pointer to CONFIG_LINK mailbox object
48631 + * @login_mbox: pointer to REG_RPI mailbox object
48632   *
48633 - * This routine is only called if we are SLI3, direct connect pt2pt
48634 - * mode and the remote NPort issues the PLOGI after link up.
48635 + * The ACC for a rcv'ed PLOGI is deferred until AFTER the REG_RPI completes
48636   */
48637  static void
48638 -lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
48639 +lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
48641 -       LPFC_MBOXQ_t *login_mbox;
48642 -       MAILBOX_t *mb = &link_mbox->u.mb;
48643         struct lpfc_iocbq *save_iocb;
48644         struct lpfc_nodelist *ndlp;
48645 +       MAILBOX_t *mb = &login_mbox->u.mb;
48647         int rc;
48649 -       ndlp = link_mbox->ctx_ndlp;
48650 -       login_mbox = link_mbox->context3;
48651 +       ndlp = login_mbox->ctx_ndlp;
48652         save_iocb = login_mbox->context3;
48653 -       link_mbox->context3 = NULL;
48654 -       login_mbox->context3 = NULL;
48656 -       /* Check for CONFIG_LINK error */
48657 -       if (mb->mbxStatus) {
48658 -               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
48659 -                               "4575 CONFIG_LINK fails pt2pt discovery: %x\n",
48660 -                               mb->mbxStatus);
48661 -               mempool_free(login_mbox, phba->mbox_mem_pool);
48662 -               mempool_free(link_mbox, phba->mbox_mem_pool);
48663 -               kfree(save_iocb);
48664 -               return;
48665 -       }
48667 -       /* Now that CONFIG_LINK completed, and our SID is configured,
48668 -        * we can now proceed with sending the PLOGI ACC.
48669 -        */
48670 -       rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
48671 -                             save_iocb, ndlp, login_mbox);
48672 -       if (rc) {
48673 -               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
48674 -                               "4576 PLOGI ACC fails pt2pt discovery: %x\n",
48675 -                               rc);
48676 -               mempool_free(login_mbox, phba->mbox_mem_pool);
48677 +       if (mb->mbxStatus == MBX_SUCCESS) {
48678 +               /* Now that REG_RPI completed successfully,
48679 +                * we can now proceed with sending the PLOGI ACC.
48680 +                */
48681 +               rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
48682 +                                     save_iocb, ndlp, NULL);
48683 +               if (rc) {
48684 +                       lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
48685 +                                       "4576 PLOGI ACC fails pt2pt discovery: "
48686 +                                       "DID %x Data: %x\n", ndlp->nlp_DID, rc);
48687 +               }
48688         }
48690 -       mempool_free(link_mbox, phba->mbox_mem_pool);
48691 +       /* Now process the REG_RPI cmpl */
48692 +       lpfc_mbx_cmpl_reg_login(phba, login_mbox);
48693 +       ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
48694         kfree(save_iocb);
48697 -/**
48698 - * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler
48699 - * @phba: Pointer to HBA context object.
48700 - * @pmb: Pointer to mailbox object.
48701 - *
48702 - * This function provides the unreg rpi mailbox completion handler for a tgt.
48703 - * The routine frees the memory resources associated with the completed
48704 - * mailbox command and transmits the ELS ACC.
48705 - *
48706 - * This routine is only called if we are SLI4, acting in target
48707 - * mode and the remote NPort issues the PLOGI after link up.
48708 - **/
48709 -static void
48710 -lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
48712 -       struct lpfc_vport *vport = pmb->vport;
48713 -       struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
48714 -       LPFC_MBOXQ_t *mbox = pmb->context3;
48715 -       struct lpfc_iocbq *piocb = NULL;
48716 -       int rc;
48718 -       if (mbox) {
48719 -               pmb->context3 = NULL;
48720 -               piocb = mbox->context3;
48721 -               mbox->context3 = NULL;
48722 -       }
48724 -       /*
48725 -        * Complete the unreg rpi mbx request, and update flags.
48726 -        * This will also restart any deferred events.
48727 -        */
48728 -       lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
48730 -       if (!piocb) {
48731 -               lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
48732 -                                "4578 PLOGI ACC fail\n");
48733 -               if (mbox)
48734 -                       mempool_free(mbox, phba->mbox_mem_pool);
48735 -               return;
48736 -       }
48738 -       rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
48739 -       if (rc) {
48740 -               lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
48741 -                                "4579 PLOGI ACC fail %x\n", rc);
48742 -               if (mbox)
48743 -                       mempool_free(mbox, phba->mbox_mem_pool);
48744 -       }
48745 -       kfree(piocb);
48748  static int
48749  lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
48750                struct lpfc_iocbq *cmdiocb)
48751 @@ -395,8 +332,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
48752         struct lpfc_iocbq *save_iocb;
48753         struct ls_rjt stat;
48754         uint32_t vid, flag;
48755 -       u16 rpi;
48756 -       int rc, defer_acc;
48757 +       int rc;
48759         memset(&stat, 0, sizeof (struct ls_rjt));
48760         pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
48761 @@ -445,7 +381,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
48762         else
48763                 ndlp->nlp_fcp_info |= CLASS3;
48765 -       defer_acc = 0;
48766         ndlp->nlp_class_sup = 0;
48767         if (sp->cls1.classValid)
48768                 ndlp->nlp_class_sup |= FC_COS_CLASS1;
48769 @@ -539,27 +474,26 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
48771                 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
48773 -               /* Issue config_link / reg_vfi to account for updated TOV's */
48775 +               /* Issue CONFIG_LINK for SLI3 or REG_VFI for SLI4,
48776 +                * to account for updated TOV's / parameters
48777 +                */
48778                 if (phba->sli_rev == LPFC_SLI_REV4)
48779                         lpfc_issue_reg_vfi(vport);
48780                 else {
48781 -                       defer_acc = 1;
48782                         link_mbox = mempool_alloc(phba->mbox_mem_pool,
48783                                                   GFP_KERNEL);
48784                         if (!link_mbox)
48785                                 goto out;
48786                         lpfc_config_link(phba, link_mbox);
48787 -                       link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc;
48788 +                       link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
48789                         link_mbox->vport = vport;
48790                         link_mbox->ctx_ndlp = ndlp;
48792 -                       save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
48793 -                       if (!save_iocb)
48794 +                       rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
48795 +                       if (rc == MBX_NOT_FINISHED) {
48796 +                               mempool_free(link_mbox, phba->mbox_mem_pool);
48797                                 goto out;
48798 -                       /* Save info from cmd IOCB used in rsp */
48799 -                       memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
48800 -                              sizeof(struct lpfc_iocbq));
48801 +                       }
48802                 }
48804                 lpfc_can_disctmo(vport);
48805 @@ -578,59 +512,28 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
48806         if (!login_mbox)
48807                 goto out;
48809 -       /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
48810 -       if (phba->nvmet_support && !defer_acc) {
48811 -               link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
48812 -               if (!link_mbox)
48813 -                       goto out;
48815 -               /* As unique identifiers such as iotag would be overwritten
48816 -                * with those from the cmdiocb, allocate separate temporary
48817 -                * storage for the copy.
48818 -                */
48819 -               save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
48820 -               if (!save_iocb)
48821 -                       goto out;
48823 -               /* Unreg RPI is required for SLI4. */
48824 -               rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
48825 -               lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
48826 -               link_mbox->vport = vport;
48827 -               link_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
48828 -               if (!link_mbox->ctx_ndlp)
48829 -                       goto out;
48831 -               link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
48833 -               if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
48834 -                   (!(vport->fc_flag & FC_OFFLINE_MODE)))
48835 -                       ndlp->nlp_flag |= NLP_UNREG_INP;
48836 +       save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
48837 +       if (!save_iocb)
48838 +               goto out;
48840 -               /* Save info from cmd IOCB used in rsp */
48841 -               memcpy(save_iocb, cmdiocb, sizeof(*save_iocb));
48842 +       /* Save info from cmd IOCB to be used in rsp after all mbox completes */
48843 +       memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
48844 +              sizeof(struct lpfc_iocbq));
48846 -               /* Delay sending ACC till unreg RPI completes. */
48847 -               defer_acc = 1;
48848 -       } else if (phba->sli_rev == LPFC_SLI_REV4)
48849 +       /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
48850 +       if (phba->sli_rev == LPFC_SLI_REV4)
48851                 lpfc_unreg_rpi(vport, ndlp);
48853 +       /* Issue REG_LOGIN first, before ACCing the PLOGI, thus we will
48854 +        * always be deferring the ACC.
48855 +        */
48856         rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
48857                             (uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
48858         if (rc)
48859                 goto out;
48861 -       /* ACC PLOGI rsp command needs to execute first,
48862 -        * queue this login_mbox command to be processed later.
48863 -        */
48864         login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
48865 -       /*
48866 -        * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
48867 -        * command issued in lpfc_cmpl_els_acc().
48868 -        */
48869         login_mbox->vport = vport;
48870 -       spin_lock_irq(&ndlp->lock);
48871 -       ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
48872 -       spin_unlock_irq(&ndlp->lock);
48874         /*
48875          * If there is an outstanding PLOGI issued, abort it before
48876 @@ -660,7 +563,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
48877                  * to register, then unregister the RPI.
48878                  */
48879                 spin_lock_irq(&ndlp->lock);
48880 -               ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
48881 +               ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
48882 +                                  NLP_RCV_PLOGI);
48883                 spin_unlock_irq(&ndlp->lock);
48884                 stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
48885                 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
48886 @@ -670,42 +574,39 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
48887                         mempool_free(login_mbox, phba->mbox_mem_pool);
48888                 return 1;
48889         }
48890 -       if (defer_acc) {
48891 -               /* So the order here should be:
48892 -                * SLI3 pt2pt
48893 -                *   Issue CONFIG_LINK mbox
48894 -                *   CONFIG_LINK cmpl
48895 -                * SLI4 tgt
48896 -                *   Issue UNREG RPI mbx
48897 -                *   UNREG RPI cmpl
48898 -                * Issue PLOGI ACC
48899 -                * PLOGI ACC cmpl
48900 -                * Issue REG_LOGIN mbox
48901 -                */
48903 -               /* Save the REG_LOGIN mbox for and rcv IOCB copy later */
48904 -               link_mbox->context3 = login_mbox;
48905 -               login_mbox->context3 = save_iocb;
48906 +       /* So the order here should be:
48907 +        * SLI3 pt2pt
48908 +        *   Issue CONFIG_LINK mbox
48909 +        *   CONFIG_LINK cmpl
48910 +        * SLI4 pt2pt
48911 +        *   Issue REG_VFI mbox
48912 +        *   REG_VFI cmpl
48913 +        * SLI4
48914 +        *   Issue UNREG RPI mbx
48915 +        *   UNREG RPI cmpl
48916 +        * Issue REG_RPI mbox
48917 +        * REG RPI cmpl
48918 +        * Issue PLOGI ACC
48919 +        * PLOGI ACC cmpl
48920 +        */
48921 +       login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
48922 +       login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
48923 +       login_mbox->context3 = save_iocb; /* For PLOGI ACC */
48925 -               /* Start the ball rolling by issuing CONFIG_LINK here */
48926 -               rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
48927 -               if (rc == MBX_NOT_FINISHED)
48928 -                       goto out;
48929 -               return 1;
48930 -       }
48931 +       spin_lock_irq(&ndlp->lock);
48932 +       ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
48933 +       spin_unlock_irq(&ndlp->lock);
48935 +       /* Start the ball rolling by issuing REG_LOGIN here */
48936 +       rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
48937 +       if (rc == MBX_NOT_FINISHED)
48938 +               goto out;
48939 +       lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
48941 -       rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox);
48942 -       if (rc)
48943 -               mempool_free(login_mbox, phba->mbox_mem_pool);
48944         return 1;
48945  out:
48946 -       if (defer_acc)
48947 -               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
48948 -                               "4577 discovery failure: %p %p %p\n",
48949 -                               save_iocb, link_mbox, login_mbox);
48950         kfree(save_iocb);
48951 -       if (link_mbox)
48952 -               mempool_free(link_mbox, phba->mbox_mem_pool);
48953         if (login_mbox)
48954                 mempool_free(login_mbox, phba->mbox_mem_pool);
48956 @@ -913,9 +814,14 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
48957                 }
48958         } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
48959                 ((ndlp->nlp_type & NLP_FCP_TARGET) ||
48960 -               !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
48961 +               (ndlp->nlp_type & NLP_NVME_TARGET) ||
48962 +               (vport->fc_flag & FC_PT2PT))) ||
48963                 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
48964 -               /* Only try to re-login if this is NOT a Fabric Node */
48965 +               /* Only try to re-login if this is NOT a Fabric Node
48966 +                * AND the remote NPORT is a FCP/NVME Target or we
48967 +                * are in pt2pt mode. NLP_STE_ADISC_ISSUE is a special
48968 +                * case for LOGO as a response to ADISC behavior.
48969 +                */
48970                 mod_timer(&ndlp->nlp_delayfunc,
48971                           jiffies + msecs_to_jiffies(1000 * 1));
48972                 spin_lock_irq(&ndlp->lock);
48973 @@ -1985,8 +1891,6 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
48974                 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
48976                 lpfc_issue_els_logo(vport, ndlp, 0);
48977 -               ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
48978 -               lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
48979                 return ndlp->nlp_state;
48980         }
48982 @@ -2633,12 +2537,10 @@ static uint32_t
48983  lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
48984                           void *arg, uint32_t evt)
48986 -       struct lpfc_hba  *phba = vport->phba;
48987         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
48989         /* flush the target */
48990 -       lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
48991 -                           ndlp->nlp_sid, 0, LPFC_CTX_TGT);
48992 +       lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
48994         /* Treat like rcv logo */
48995         lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
48996 diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
48997 index bb2a4a0d1295..a3fd959f7431 100644
48998 --- a/drivers/scsi/lpfc/lpfc_nvmet.c
48999 +++ b/drivers/scsi/lpfc/lpfc_nvmet.c
49000 @@ -3304,7 +3304,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
49001         bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
49003         /* Word 10 */
49004 -       bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
49005         bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
49006         bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
49007                LPFC_WQE_LENLOC_WORD12);
49008 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
49009 index a4d697373c71..fab9ea6fe965 100644
49010 --- a/drivers/scsi/lpfc/lpfc_scsi.c
49011 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
49012 @@ -5815,7 +5815,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
49013                                         tgt_id, lun_id, context);
49014         later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
49015         while (time_after(later, jiffies) && cnt) {
49016 -               schedule_timeout_uninterruptible(msecs_to_jiffies(20));
49017 +               schedule_msec_hrtimeout_uninterruptible((20));
49018                 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
49019         }
49020         if (cnt) {
49021 diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
49022 index fa1a714a78f0..bd31feb3d5e1 100644
49023 --- a/drivers/scsi/lpfc/lpfc_sli.c
49024 +++ b/drivers/scsi/lpfc/lpfc_sli.c
49025 @@ -5683,12 +5683,10 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
49026                         phba->sli4_hba.lnk_info.lnk_no,
49027                         phba->BIOSVersion);
49028  out_free_mboxq:
49029 -       if (rc != MBX_TIMEOUT) {
49030 -               if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
49031 -                       lpfc_sli4_mbox_cmd_free(phba, mboxq);
49032 -               else
49033 -                       mempool_free(mboxq, phba->mbox_mem_pool);
49034 -       }
49035 +       if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
49036 +               lpfc_sli4_mbox_cmd_free(phba, mboxq);
49037 +       else
49038 +               mempool_free(mboxq, phba->mbox_mem_pool);
49039         return rc;
49042 @@ -5789,12 +5787,10 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
49043         }
49045  out_free_mboxq:
49046 -       if (rc != MBX_TIMEOUT) {
49047 -               if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
49048 -                       lpfc_sli4_mbox_cmd_free(phba, mboxq);
49049 -               else
49050 -                       mempool_free(mboxq, phba->mbox_mem_pool);
49051 -       }
49052 +       if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
49053 +               lpfc_sli4_mbox_cmd_free(phba, mboxq);
49054 +       else
49055 +               mempool_free(mboxq, phba->mbox_mem_pool);
49056         return rc;
49059 @@ -11647,7 +11643,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
49060         icmd = &cmdiocb->iocb;
49061         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
49062             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
49063 -           (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
49064 +           cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED)
49065                 return IOCB_ABORTING;
49067         if (!pring) {
49068 @@ -11945,7 +11941,6 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
49069  /**
49070   * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
49071   * @vport: Pointer to virtual port.
49072 - * @pring: Pointer to driver SLI ring object.
49073   * @tgt_id: SCSI ID of the target.
49074   * @lun_id: LUN ID of the scsi device.
49075   * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
49076 @@ -11960,18 +11955,22 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
49077   * FCP iocbs associated with SCSI target specified by tgt_id parameter.
49078   * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
49079   * FCP iocbs associated with virtual port.
49080 + * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
49081 + * lpfc_sli4_calc_ring is used.
49082   * This function returns number of iocbs it failed to abort.
49083   * This function is called with no locks held.
49084   **/
49085  int
49086 -lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
49087 -                   uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
49088 +lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
49089 +                   lpfc_ctx_cmd abort_cmd)
49091         struct lpfc_hba *phba = vport->phba;
49092 +       struct lpfc_sli_ring *pring = NULL;
49093         struct lpfc_iocbq *iocbq;
49094         int errcnt = 0, ret_val = 0;
49095         unsigned long iflags;
49096         int i;
49097 +       void *fcp_cmpl = NULL;
49099         /* all I/Os are in process of being flushed */
49100         if (phba->hba_flag & HBA_IOQ_FLUSH)
49101 @@ -11985,8 +11984,15 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
49102                         continue;
49104                 spin_lock_irqsave(&phba->hbalock, iflags);
49105 +               if (phba->sli_rev == LPFC_SLI_REV3) {
49106 +                       pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
49107 +                       fcp_cmpl = lpfc_sli_abort_fcp_cmpl;
49108 +               } else if (phba->sli_rev == LPFC_SLI_REV4) {
49109 +                       pring = lpfc_sli4_calc_ring(phba, iocbq);
49110 +                       fcp_cmpl = lpfc_sli4_abort_fcp_cmpl;
49111 +               }
49112                 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
49113 -                                                    lpfc_sli_abort_fcp_cmpl);
49114 +                                                    fcp_cmpl);
49115                 spin_unlock_irqrestore(&phba->hbalock, iflags);
49116                 if (ret_val != IOCB_SUCCESS)
49117                         errcnt++;
49118 @@ -17072,8 +17078,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
49119                                 "2509 RQ_DESTROY mailbox failed with "
49120                                 "status x%x add_status x%x, mbx status x%x\n",
49121                                 shdr_status, shdr_add_status, rc);
49122 -               if (rc != MBX_TIMEOUT)
49123 -                       mempool_free(mbox, hrq->phba->mbox_mem_pool);
49124 +               mempool_free(mbox, hrq->phba->mbox_mem_pool);
49125                 return -ENXIO;
49126         }
49127         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
49128 @@ -17170,7 +17175,9 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
49129         shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
49130         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
49131         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
49132 -       if (rc != MBX_TIMEOUT)
49133 +       if (!phba->sli4_hba.intr_enable)
49134 +               mempool_free(mbox, phba->mbox_mem_pool);
49135 +       else if (rc != MBX_TIMEOUT)
49136                 mempool_free(mbox, phba->mbox_mem_pool);
49137         if (shdr_status || shdr_add_status || rc) {
49138                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
49139 @@ -17367,7 +17374,9 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
49140         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
49141         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
49142         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
49143 -       if (rc != MBX_TIMEOUT)
49144 +       if (!phba->sli4_hba.intr_enable)
49145 +               lpfc_sli4_mbox_cmd_free(phba, mbox);
49146 +       else if (rc != MBX_TIMEOUT)
49147                 lpfc_sli4_mbox_cmd_free(phba, mbox);
49148         if (shdr_status || shdr_add_status || rc) {
49149                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
49150 @@ -17480,7 +17489,9 @@ lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
49151         shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
49152         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
49153         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
49154 -       if (rc != MBX_TIMEOUT)
49155 +       if (!phba->sli4_hba.intr_enable)
49156 +               lpfc_sli4_mbox_cmd_free(phba, mbox);
49157 +       else if (rc != MBX_TIMEOUT)
49158                 lpfc_sli4_mbox_cmd_free(phba, mbox);
49159         if (shdr_status || shdr_add_status || rc) {
49160                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
49161 @@ -18064,7 +18075,6 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
49162         if (cmd_iocbq) {
49163                 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
49164                 lpfc_nlp_put(ndlp);
49165 -               lpfc_nlp_not_used(ndlp);
49166                 lpfc_sli_release_iocbq(phba, cmd_iocbq);
49167         }
49169 @@ -18831,8 +18841,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
49170         shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
49171         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
49172         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
49173 -       if (rc != MBX_TIMEOUT)
49174 -               mempool_free(mboxq, phba->mbox_mem_pool);
49175 +       mempool_free(mboxq, phba->mbox_mem_pool);
49176         if (shdr_status || shdr_add_status || rc) {
49177                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
49178                                 "2514 POST_RPI_HDR mailbox failed with "
49179 @@ -20076,7 +20085,9 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
49180                         break;
49181                 }
49182         }
49183 -       if (rc != MBX_TIMEOUT)
49184 +       if (!phba->sli4_hba.intr_enable)
49185 +               mempool_free(mbox, phba->mbox_mem_pool);
49186 +       else if (rc != MBX_TIMEOUT)
49187                 mempool_free(mbox, phba->mbox_mem_pool);
49188         if (shdr_status || shdr_add_status || rc) {
49189                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
49190 diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
49191 index ac0eef975f17..b6beacfd0f62 100644
49192 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c
49193 +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
49194 @@ -7252,6 +7252,8 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
49196         ioc_info(ioc, "sending diag reset !!\n");
49198 +       pci_cfg_access_lock(ioc->pdev);
49200         drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
49202         count = 0;
49203 @@ -7342,10 +7344,12 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
49204                 goto out;
49205         }
49207 +       pci_cfg_access_unlock(ioc->pdev);
49208         ioc_info(ioc, "diag reset: SUCCESS\n");
49209         return 0;
49211   out:
49212 +       pci_cfg_access_unlock(ioc->pdev);
49213         ioc_err(ioc, "diag reset: FAILED\n");
49214         return -EFAULT;
49216 diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
49217 index 44f9a05db94e..2ec11be62a82 100644
49218 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
49219 +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
49220 @@ -2507,7 +2507,7 @@ _ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
49221                     __func__, karg.unique_id);
49222                 return -EPERM;
49223         }
49224 -       memset(&karg.buffer_rel_condition, 0, sizeof(struct htb_rel_query));
49225 +       memset(&karg.rel_query, 0, sizeof(karg.rel_query));
49226         if ((ioc->diag_buffer_status[buffer_type] &
49227             MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
49228                 ioc_info(ioc, "%s: buffer_type(0x%02x) is not registered\n",
49229 @@ -2520,8 +2520,7 @@ _ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
49230                     __func__, buffer_type);
49231                 return -EPERM;
49232         }
49233 -       memcpy(&karg.buffer_rel_condition, &ioc->htb_rel,
49234 -           sizeof(struct  htb_rel_query));
49235 +       memcpy(&karg.rel_query, &ioc->htb_rel, sizeof(karg.rel_query));
49236  out:
49237         if (copy_to_user(arg, &karg, sizeof(struct mpt3_addnl_diag_query))) {
49238                 ioc_err(ioc, "%s: unable to write mpt3_addnl_diag_query data @ %p\n",
49239 diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
49240 index d2ccdafb8df2..8f6ffb40261c 100644
49241 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
49242 +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
49243 @@ -50,6 +50,8 @@
49244  #include <linux/miscdevice.h>
49245  #endif
49247 +#include "mpt3sas_base.h"
49249  #ifndef MPT2SAS_MINOR
49250  #define MPT2SAS_MINOR          (MPT_MINOR + 1)
49251  #endif
49252 @@ -436,19 +438,13 @@ struct mpt3_diag_read_buffer {
49253   * struct mpt3_addnl_diag_query - diagnostic buffer release reason
49254   * @hdr - generic header
49255   * @unique_id - unique id associated with this buffer.
49256 - * @buffer_rel_condition - Release condition ioctl/sysfs/reset
49257 - * @reserved1
49258 - * @trigger_type - Master/Event/scsi/MPI
49259 - * @trigger_info_dwords - Data Correspondig to trigger type
49260 + * @rel_query - release query.
49261   * @reserved2
49262   */
49263  struct mpt3_addnl_diag_query {
49264         struct mpt3_ioctl_header hdr;
49265         uint32_t unique_id;
49266 -       uint16_t buffer_rel_condition;
49267 -       uint16_t reserved1;
49268 -       uint32_t trigger_type;
49269 -       uint32_t trigger_info_dwords[2];
49270 +       struct htb_rel_query rel_query;
49271         uint32_t reserved2[2];
49272  };
49274 diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
49275 index 6aa6de729187..ae1973878cc7 100644
49276 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
49277 +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
49278 @@ -6483,6 +6483,9 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
49279                 if (!vphy)
49280                         return NULL;
49282 +               if (!port->vphys_mask)
49283 +                       INIT_LIST_HEAD(&port->vphys_list);
49285                 /*
49286                  * Enable bit corresponding to HBA phy number on its
49287                  * parent hba_port object's vphys_mask field.
49288 @@ -6490,7 +6493,6 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
49289                 port->vphys_mask |= (1 << phy_num);
49290                 vphy->phy_mask |= (1 << phy_num);
49292 -               INIT_LIST_HEAD(&port->vphys_list);
49293                 list_add_tail(&vphy->list, &port->vphys_list);
49295                 ioc_info(ioc,
49296 diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
49297 index 31e5455d280c..1b1a57f46989 100644
49298 --- a/drivers/scsi/pm8001/pm8001_hwi.c
49299 +++ b/drivers/scsi/pm8001/pm8001_hwi.c
49300 @@ -643,7 +643,7 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
49301   */
49302  static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
49304 -       u8 i = 0;
49305 +       u32 i = 0;
49306         u16 deviceid;
49307         pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
49308         /* 8081 controllers need BAR shift to access MPI space
49309 diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
49310 index 84315560e8e1..c6b0834e3806 100644
49311 --- a/drivers/scsi/pm8001/pm80xx_hwi.c
49312 +++ b/drivers/scsi/pm8001/pm80xx_hwi.c
49313 @@ -1502,9 +1502,9 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
49315         /* wait until Inbound DoorBell Clear Register toggled */
49316         if (IS_SPCV_12G(pm8001_ha->pdev)) {
49317 -               max_wait_count = 4 * 1000 * 1000;/* 4 sec */
49318 +               max_wait_count = 30 * 1000 * 1000; /* 30 sec */
49319         } else {
49320 -               max_wait_count = 2 * 1000 * 1000;/* 2 sec */
49321 +               max_wait_count = 15 * 1000 * 1000; /* 15 sec */
49322         }
49323         do {
49324                 udelay(1);
49325 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
49326 index 63391c9be05d..3aa9869f6fae 100644
49327 --- a/drivers/scsi/qla2xxx/qla_attr.c
49328 +++ b/drivers/scsi/qla2xxx/qla_attr.c
49329 @@ -2864,6 +2864,8 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
49330         vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
49332         if (IS_FWI2_CAPABLE(ha)) {
49333 +               int rval;
49335                 stats = dma_alloc_coherent(&ha->pdev->dev,
49336                     sizeof(*stats), &stats_dma, GFP_KERNEL);
49337                 if (!stats) {
49338 @@ -2873,7 +2875,11 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
49339                 }
49341                 /* reset firmware statistics */
49342 -               qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
49343 +               rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
49344 +               if (rval != QLA_SUCCESS)
49345 +                       ql_log(ql_log_warn, vha, 0x70de,
49346 +                              "Resetting ISP statistics failed: rval = %d\n",
49347 +                              rval);
49349                 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
49350                     stats, stats_dma);
49351 diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
49352 index bee8cf9f8123..aef2f7cc89d3 100644
49353 --- a/drivers/scsi/qla2xxx/qla_bsg.c
49354 +++ b/drivers/scsi/qla2xxx/qla_bsg.c
49355 @@ -25,10 +25,11 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
49356         struct bsg_job *bsg_job = sp->u.bsg_job;
49357         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
49359 +       sp->free(sp);
49361         bsg_reply->result = res;
49362         bsg_job_done(bsg_job, bsg_reply->result,
49363                        bsg_reply->reply_payload_rcv_len);
49364 -       sp->free(sp);
49367  void qla2x00_bsg_sp_free(srb_t *sp)
49368 @@ -2583,6 +2584,10 @@ qla2x00_get_host_stats(struct bsg_job *bsg_job)
49369         }
49371         data = kzalloc(response_len, GFP_KERNEL);
49372 +       if (!data) {
49373 +               kfree(req_data);
49374 +               return -ENOMEM;
49375 +       }
49377         ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
49378                                     data, response_len);
49379 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
49380 index f01f07116bd3..8cb0574cfa91 100644
49381 --- a/drivers/scsi/qla2xxx/qla_init.c
49382 +++ b/drivers/scsi/qla2xxx/qla_init.c
49383 @@ -1194,6 +1194,9 @@ static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
49385         struct qla_work_evt *e;
49387 +       if (vha->host->active_mode == MODE_TARGET)
49388 +               return QLA_FUNCTION_FAILED;
49390         e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
49391         if (!e)
49392                 return QLA_FUNCTION_FAILED;
49393 diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
49394 index 5e188375c871..af4831c9edf9 100644
49395 --- a/drivers/scsi/qla2xxx/qla_isr.c
49396 +++ b/drivers/scsi/qla2xxx/qla_isr.c
49397 @@ -4005,11 +4005,11 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
49398         if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
49399                 /* user wants to control IRQ setting for target mode */
49400                 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
49401 -                   min((u16)ha->msix_count, (u16)num_online_cpus()),
49402 +                   min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
49403                     PCI_IRQ_MSIX);
49404         } else
49405                 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
49406 -                   min((u16)ha->msix_count, (u16)num_online_cpus()),
49407 +                   min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
49408                     PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
49409                     &desc);
49411 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
49412 index 074392560f3d..0e07b98dfae8 100644
49413 --- a/drivers/scsi/qla2xxx/qla_os.c
49414 +++ b/drivers/scsi/qla2xxx/qla_os.c
49415 @@ -1013,8 +1013,6 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
49416         if (rval != QLA_SUCCESS) {
49417                 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
49418                     "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
49419 -               if (rval == QLA_INTERFACE_ERROR)
49420 -                       goto qc24_free_sp_fail_command;
49421                 goto qc24_host_busy_free_sp;
49422         }
49424 @@ -1026,11 +1024,6 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
49425  qc24_target_busy:
49426         return SCSI_MLQUEUE_TARGET_BUSY;
49428 -qc24_free_sp_fail_command:
49429 -       sp->free(sp);
49430 -       CMD_SP(cmd) = NULL;
49431 -       qla2xxx_rel_qpair_sp(sp->qpair, sp);
49433  qc24_fail_command:
49434         cmd->scsi_done(cmd);
49436 diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
49437 index a1dacb6e993e..c30f6047410f 100644
49438 --- a/drivers/scsi/smartpqi/smartpqi_init.c
49439 +++ b/drivers/scsi/smartpqi/smartpqi_init.c
49440 @@ -5488,6 +5488,8 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
49442                                 list_del(&io_request->request_list_entry);
49443                                 set_host_byte(scmd, DID_RESET);
49444 +                               pqi_free_io_request(io_request);
49445 +                               scsi_dma_unmap(scmd);
49446                                 pqi_scsi_done(scmd);
49447                         }
49449 @@ -5524,6 +5526,8 @@ static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
49451                                 list_del(&io_request->request_list_entry);
49452                                 set_host_byte(scmd, DID_RESET);
49453 +                               pqi_free_io_request(io_request);
49454 +                               scsi_dma_unmap(scmd);
49455                                 pqi_scsi_done(scmd);
49456                         }
49458 @@ -6598,6 +6602,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
49459         shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
49460         shost->unique_id = shost->irq;
49461         shost->nr_hw_queues = ctrl_info->num_queue_groups;
49462 +       shost->host_tagset = 1;
49463         shost->hostdata[0] = (unsigned long)ctrl_info;
49465         rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
49466 @@ -8216,6 +8221,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
49467                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49468                                0x152d, 0x8a37)
49469         },
49470 +       {
49471 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49472 +                              0x193d, 0x8460)
49473 +       },
49474         {
49475                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49476                                0x193d, 0x1104)
49477 @@ -8288,6 +8297,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
49478                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49479                                0x1bd4, 0x004f)
49480         },
49481 +       {
49482 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49483 +                              0x1bd4, 0x0051)
49484 +       },
49485 +       {
49486 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49487 +                              0x1bd4, 0x0052)
49488 +       },
49489 +       {
49490 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49491 +                              0x1bd4, 0x0053)
49492 +       },
49493 +       {
49494 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49495 +                              0x1bd4, 0x0054)
49496 +       },
49497         {
49498                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49499                                0x19e5, 0xd227)
49500 @@ -8448,6 +8473,122 @@ static const struct pci_device_id pqi_pci_id_table[] = {
49501                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49502                                PCI_VENDOR_ID_ADAPTEC2, 0x1380)
49503         },
49504 +       {
49505 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49506 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1400)
49507 +       },
49508 +       {
49509 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49510 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1402)
49511 +       },
49512 +       {
49513 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49514 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1410)
49515 +       },
49516 +       {
49517 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49518 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1411)
49519 +       },
49520 +       {
49521 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49522 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1412)
49523 +       },
49524 +       {
49525 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49526 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1420)
49527 +       },
49528 +       {
49529 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49530 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1430)
49531 +       },
49532 +       {
49533 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49534 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1440)
49535 +       },
49536 +       {
49537 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49538 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1441)
49539 +       },
49540 +       {
49541 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49542 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1450)
49543 +       },
49544 +       {
49545 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49546 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1452)
49547 +       },
49548 +       {
49549 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49550 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1460)
49551 +       },
49552 +       {
49553 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49554 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1461)
49555 +       },
49556 +       {
49557 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49558 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1462)
49559 +       },
49560 +       {
49561 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49562 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1470)
49563 +       },
49564 +       {
49565 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49566 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1471)
49567 +       },
49568 +       {
49569 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49570 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1472)
49571 +       },
49572 +       {
49573 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49574 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1480)
49575 +       },
49576 +       {
49577 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49578 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1490)
49579 +       },
49580 +       {
49581 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49582 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1491)
49583 +       },
49584 +       {
49585 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49586 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
49587 +       },
49588 +       {
49589 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49590 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
49591 +       },
49592 +       {
49593 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49594 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
49595 +       },
49596 +       {
49597 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49598 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
49599 +       },
49600 +       {
49601 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49602 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
49603 +       },
49604 +       {
49605 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49606 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
49607 +       },
49608 +       {
49609 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49610 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
49611 +       },
49612 +       {
49613 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49614 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
49615 +       },
49616 +       {
49617 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49618 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
49619 +       },
49620         {
49621                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49622                                PCI_VENDOR_ID_ADVANTECH, 0x8312)
49623 @@ -8512,6 +8653,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
49624                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49625                                PCI_VENDOR_ID_HP, 0x1001)
49626         },
49627 +       {
49628 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49629 +                              PCI_VENDOR_ID_HP, 0x1002)
49630 +       },
49631         {
49632                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49633                                PCI_VENDOR_ID_HP, 0x1100)
49634 @@ -8520,6 +8665,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
49635                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49636                                PCI_VENDOR_ID_HP, 0x1101)
49637         },
49638 +       {
49639 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49640 +                              0x1590, 0x0294)
49641 +       },
49642 +       {
49643 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49644 +                              0x1590, 0x02db)
49645 +       },
49646 +       {
49647 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49648 +                              0x1590, 0x02dc)
49649 +       },
49650 +       {
49651 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49652 +                              0x1590, 0x032e)
49653 +       },
49654         {
49655                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
49656                                0x1d8d, 0x0800)
49657 diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
49658 index 9e2e196bc202..97c6f81b1d2a 100644
49659 --- a/drivers/scsi/sni_53c710.c
49660 +++ b/drivers/scsi/sni_53c710.c
49661 @@ -58,6 +58,7 @@ static int snirm710_probe(struct platform_device *dev)
49662         struct NCR_700_Host_Parameters *hostdata;
49663         struct Scsi_Host *host;
49664         struct  resource *res;
49665 +       int rc;
49667         res = platform_get_resource(dev, IORESOURCE_MEM, 0);
49668         if (!res)
49669 @@ -83,7 +84,9 @@ static int snirm710_probe(struct platform_device *dev)
49670                 goto out_kfree;
49671         host->this_id = 7;
49672         host->base = base;
49673 -       host->irq = platform_get_irq(dev, 0);
49674 +       host->irq = rc = platform_get_irq(dev, 0);
49675 +       if (rc < 0)
49676 +               goto out_put_host;
49677         if(request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "snirm710", host)) {
49678                 printk(KERN_ERR "snirm710: request_irq failed!\n");
49679                 goto out_put_host;
49680 diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
49681 index 6dd0ff188bb4..aedf0b78f622 100644
49682 --- a/drivers/scsi/snic/snic_scsi.c
49683 +++ b/drivers/scsi/snic/snic_scsi.c
49684 @@ -2349,7 +2349,7 @@ snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
49686         /* Wait for all the IOs that are entered in Qcmd */
49687         while (atomic_read(&snic->ios_inflight))
49688 -               schedule_timeout(msecs_to_jiffies(1));
49689 +               schedule_msec_hrtimeout((1));
49691         ret = snic_issue_hba_reset(snic, sc);
49692         if (ret) {
49693 diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
49694 index 7de82f2c9757..d3489ac7ab28 100644
49695 --- a/drivers/scsi/sun3x_esp.c
49696 +++ b/drivers/scsi/sun3x_esp.c
49697 @@ -206,7 +206,9 @@ static int esp_sun3x_probe(struct platform_device *dev)
49698         if (!esp->command_block)
49699                 goto fail_unmap_regs_dma;
49701 -       host->irq = platform_get_irq(dev, 0);
49702 +       host->irq = err = platform_get_irq(dev, 0);
49703 +       if (err < 0)
49704 +               goto fail_unmap_command_block;
49705         err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
49706                           "SUN3X ESP", esp);
49707         if (err < 0)
49708 diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
49709 index 1a69949a4ea1..b56d9b4e5f03 100644
49710 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c
49711 +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
49712 @@ -377,7 +377,7 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
49714         irq = platform_get_irq(pdev, 0);
49715         if (irq < 0) {
49716 -               err = -ENODEV;
49717 +               err = irq;
49718                 goto out;
49719         }
49721 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
49722 index d3d05e997c13..0c71a159d08f 100644
49723 --- a/drivers/scsi/ufs/ufshcd.c
49724 +++ b/drivers/scsi/ufs/ufshcd.c
49725 @@ -8599,7 +8599,7 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
49726         } else if (!ufshcd_is_ufs_dev_active(hba)) {
49727                 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
49728                 vcc_off = true;
49729 -               if (!ufshcd_is_link_active(hba)) {
49730 +               if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
49731                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
49732                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
49733                 }
49734 @@ -8621,7 +8621,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
49735             !hba->dev_info.is_lu_power_on_wp) {
49736                 ret = ufshcd_setup_vreg(hba, true);
49737         } else if (!ufshcd_is_ufs_dev_active(hba)) {
49738 -               if (!ret && !ufshcd_is_link_active(hba)) {
49739 +               if (!ufshcd_is_link_active(hba)) {
49740                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
49741                         if (ret)
49742                                 goto vcc_disable;
49743 @@ -8978,10 +8978,13 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
49744         if (!hba->is_powered)
49745                 return 0;
49747 +       cancel_delayed_work_sync(&hba->rpm_dev_flush_recheck_work);
49749         if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
49750              hba->curr_dev_pwr_mode) &&
49751             (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
49752              hba->uic_link_state) &&
49753 +            pm_runtime_suspended(hba->dev) &&
49754              !hba->dev_info.b_rpm_dev_flush_capable)
49755                 goto out;
49757 diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
49758 index 20acac6342ef..5828f94b8a7d 100644
49759 --- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
49760 +++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
49761 @@ -95,8 +95,10 @@ static ssize_t snoop_file_read(struct file *file, char __user *buffer,
49762                         return -EINTR;
49763         }
49764         ret = kfifo_to_user(&chan->fifo, buffer, count, &copied);
49765 +       if (ret)
49766 +               return ret;
49768 -       return ret ? ret : copied;
49769 +       return copied;
49772  static __poll_t snoop_file_poll(struct file *file,
49773 diff --git a/drivers/soc/mediatek/mt8173-pm-domains.h b/drivers/soc/mediatek/mt8173-pm-domains.h
49774 index 3e8ee5dabb43..654c717e5467 100644
49775 --- a/drivers/soc/mediatek/mt8173-pm-domains.h
49776 +++ b/drivers/soc/mediatek/mt8173-pm-domains.h
49777 @@ -12,24 +12,28 @@
49779  static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
49780         [MT8173_POWER_DOMAIN_VDEC] = {
49781 +               .name = "vdec",
49782                 .sta_mask = PWR_STATUS_VDEC,
49783                 .ctl_offs = SPM_VDE_PWR_CON,
49784                 .sram_pdn_bits = GENMASK(11, 8),
49785                 .sram_pdn_ack_bits = GENMASK(12, 12),
49786         },
49787         [MT8173_POWER_DOMAIN_VENC] = {
49788 +               .name = "venc",
49789                 .sta_mask = PWR_STATUS_VENC,
49790                 .ctl_offs = SPM_VEN_PWR_CON,
49791                 .sram_pdn_bits = GENMASK(11, 8),
49792                 .sram_pdn_ack_bits = GENMASK(15, 12),
49793         },
49794         [MT8173_POWER_DOMAIN_ISP] = {
49795 +               .name = "isp",
49796                 .sta_mask = PWR_STATUS_ISP,
49797                 .ctl_offs = SPM_ISP_PWR_CON,
49798                 .sram_pdn_bits = GENMASK(11, 8),
49799                 .sram_pdn_ack_bits = GENMASK(13, 12),
49800         },
49801         [MT8173_POWER_DOMAIN_MM] = {
49802 +               .name = "mm",
49803                 .sta_mask = PWR_STATUS_DISP,
49804                 .ctl_offs = SPM_DIS_PWR_CON,
49805                 .sram_pdn_bits = GENMASK(11, 8),
49806 @@ -40,18 +44,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
49807                 },
49808         },
49809         [MT8173_POWER_DOMAIN_VENC_LT] = {
49810 +               .name = "venc_lt",
49811                 .sta_mask = PWR_STATUS_VENC_LT,
49812                 .ctl_offs = SPM_VEN2_PWR_CON,
49813                 .sram_pdn_bits = GENMASK(11, 8),
49814                 .sram_pdn_ack_bits = GENMASK(15, 12),
49815         },
49816         [MT8173_POWER_DOMAIN_AUDIO] = {
49817 +               .name = "audio",
49818                 .sta_mask = PWR_STATUS_AUDIO,
49819                 .ctl_offs = SPM_AUDIO_PWR_CON,
49820                 .sram_pdn_bits = GENMASK(11, 8),
49821                 .sram_pdn_ack_bits = GENMASK(15, 12),
49822         },
49823         [MT8173_POWER_DOMAIN_USB] = {
49824 +               .name = "usb",
49825                 .sta_mask = PWR_STATUS_USB,
49826                 .ctl_offs = SPM_USB_PWR_CON,
49827                 .sram_pdn_bits = GENMASK(11, 8),
49828 @@ -59,18 +66,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
49829                 .caps = MTK_SCPD_ACTIVE_WAKEUP,
49830         },
49831         [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
49832 +               .name = "mfg_async",
49833                 .sta_mask = PWR_STATUS_MFG_ASYNC,
49834                 .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
49835                 .sram_pdn_bits = GENMASK(11, 8),
49836                 .sram_pdn_ack_bits = 0,
49837         },
49838         [MT8173_POWER_DOMAIN_MFG_2D] = {
49839 +               .name = "mfg_2d",
49840                 .sta_mask = PWR_STATUS_MFG_2D,
49841                 .ctl_offs = SPM_MFG_2D_PWR_CON,
49842                 .sram_pdn_bits = GENMASK(11, 8),
49843                 .sram_pdn_ack_bits = GENMASK(13, 12),
49844         },
49845         [MT8173_POWER_DOMAIN_MFG] = {
49846 +               .name = "mfg",
49847                 .sta_mask = PWR_STATUS_MFG,
49848                 .ctl_offs = SPM_MFG_PWR_CON,
49849                 .sram_pdn_bits = GENMASK(13, 8),
49850 diff --git a/drivers/soc/mediatek/mt8183-pm-domains.h b/drivers/soc/mediatek/mt8183-pm-domains.h
49851 index aa5230e6c12f..98a9940d05fb 100644
49852 --- a/drivers/soc/mediatek/mt8183-pm-domains.h
49853 +++ b/drivers/soc/mediatek/mt8183-pm-domains.h
49854 @@ -12,12 +12,14 @@
49856  static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
49857         [MT8183_POWER_DOMAIN_AUDIO] = {
49858 +               .name = "audio",
49859                 .sta_mask = PWR_STATUS_AUDIO,
49860                 .ctl_offs = 0x0314,
49861                 .sram_pdn_bits = GENMASK(11, 8),
49862                 .sram_pdn_ack_bits = GENMASK(15, 12),
49863         },
49864         [MT8183_POWER_DOMAIN_CONN] = {
49865 +               .name = "conn",
49866                 .sta_mask = PWR_STATUS_CONN,
49867                 .ctl_offs = 0x032c,
49868                 .sram_pdn_bits = 0,
49869 @@ -28,12 +30,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
49870                 },
49871         },
49872         [MT8183_POWER_DOMAIN_MFG_ASYNC] = {
49873 +               .name = "mfg_async",
49874                 .sta_mask = PWR_STATUS_MFG_ASYNC,
49875                 .ctl_offs = 0x0334,
49876                 .sram_pdn_bits = 0,
49877                 .sram_pdn_ack_bits = 0,
49878         },
49879         [MT8183_POWER_DOMAIN_MFG] = {
49880 +               .name = "mfg",
49881                 .sta_mask = PWR_STATUS_MFG,
49882                 .ctl_offs = 0x0338,
49883                 .sram_pdn_bits = GENMASK(8, 8),
49884 @@ -41,18 +45,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
49885                 .caps = MTK_SCPD_DOMAIN_SUPPLY,
49886         },
49887         [MT8183_POWER_DOMAIN_MFG_CORE0] = {
49888 +               .name = "mfg_core0",
49889                 .sta_mask = BIT(7),
49890                 .ctl_offs = 0x034c,
49891                 .sram_pdn_bits = GENMASK(8, 8),
49892                 .sram_pdn_ack_bits = GENMASK(12, 12),
49893         },
49894         [MT8183_POWER_DOMAIN_MFG_CORE1] = {
49895 +               .name = "mfg_core1",
49896                 .sta_mask = BIT(20),
49897                 .ctl_offs = 0x0310,
49898                 .sram_pdn_bits = GENMASK(8, 8),
49899                 .sram_pdn_ack_bits = GENMASK(12, 12),
49900         },
49901         [MT8183_POWER_DOMAIN_MFG_2D] = {
49902 +               .name = "mfg_2d",
49903                 .sta_mask = PWR_STATUS_MFG_2D,
49904                 .ctl_offs = 0x0348,
49905                 .sram_pdn_bits = GENMASK(8, 8),
49906 @@ -65,6 +72,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
49907                 },
49908         },
49909         [MT8183_POWER_DOMAIN_DISP] = {
49910 +               .name = "disp",
49911                 .sta_mask = PWR_STATUS_DISP,
49912                 .ctl_offs = 0x030c,
49913                 .sram_pdn_bits = GENMASK(8, 8),
49914 @@ -83,6 +91,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
49915                 },
49916         },
49917         [MT8183_POWER_DOMAIN_CAM] = {
49918 +               .name = "cam",
49919                 .sta_mask = BIT(25),
49920                 .ctl_offs = 0x0344,
49921                 .sram_pdn_bits = GENMASK(9, 8),
49922 @@ -105,6 +114,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
49923                 },
49924         },
49925         [MT8183_POWER_DOMAIN_ISP] = {
49926 +               .name = "isp",
49927                 .sta_mask = PWR_STATUS_ISP,
49928                 .ctl_offs = 0x0308,
49929                 .sram_pdn_bits = GENMASK(9, 8),
49930 @@ -127,6 +137,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
49931                 },
49932         },
49933         [MT8183_POWER_DOMAIN_VDEC] = {
49934 +               .name = "vdec",
49935                 .sta_mask = BIT(31),
49936                 .ctl_offs = 0x0300,
49937                 .sram_pdn_bits = GENMASK(8, 8),
49938 @@ -139,6 +150,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
49939                 },
49940         },
49941         [MT8183_POWER_DOMAIN_VENC] = {
49942 +               .name = "venc",
49943                 .sta_mask = PWR_STATUS_VENC,
49944                 .ctl_offs = 0x0304,
49945                 .sram_pdn_bits = GENMASK(11, 8),
49946 @@ -151,6 +163,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
49947                 },
49948         },
49949         [MT8183_POWER_DOMAIN_VPU_TOP] = {
49950 +               .name = "vpu_top",
49951                 .sta_mask = BIT(26),
49952                 .ctl_offs = 0x0324,
49953                 .sram_pdn_bits = GENMASK(8, 8),
49954 @@ -177,6 +190,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
49955                 },
49956         },
49957         [MT8183_POWER_DOMAIN_VPU_CORE0] = {
49958 +               .name = "vpu_core0",
49959                 .sta_mask = BIT(27),
49960                 .ctl_offs = 0x33c,
49961                 .sram_pdn_bits = GENMASK(11, 8),
49962 @@ -194,6 +208,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
49963                 .caps = MTK_SCPD_SRAM_ISO,
49964         },
49965         [MT8183_POWER_DOMAIN_VPU_CORE1] = {
49966 +               .name = "vpu_core1",
49967                 .sta_mask = BIT(28),
49968                 .ctl_offs = 0x0340,
49969                 .sram_pdn_bits = GENMASK(11, 8),
49970 diff --git a/drivers/soc/mediatek/mt8192-pm-domains.h b/drivers/soc/mediatek/mt8192-pm-domains.h
49971 index 0fdf6dc6231f..543dda70de01 100644
49972 --- a/drivers/soc/mediatek/mt8192-pm-domains.h
49973 +++ b/drivers/soc/mediatek/mt8192-pm-domains.h
49974 @@ -12,6 +12,7 @@
49976  static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
49977         [MT8192_POWER_DOMAIN_AUDIO] = {
49978 +               .name = "audio",
49979                 .sta_mask = BIT(21),
49980                 .ctl_offs = 0x0354,
49981                 .sram_pdn_bits = GENMASK(8, 8),
49982 @@ -24,6 +25,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
49983                 },
49984         },
49985         [MT8192_POWER_DOMAIN_CONN] = {
49986 +               .name = "conn",
49987                 .sta_mask = PWR_STATUS_CONN,
49988                 .ctl_offs = 0x0304,
49989                 .sram_pdn_bits = 0,
49990 @@ -45,12 +47,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
49991                 .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
49992         },
49993         [MT8192_POWER_DOMAIN_MFG0] = {
49994 +               .name = "mfg0",
49995                 .sta_mask = BIT(2),
49996                 .ctl_offs = 0x0308,
49997                 .sram_pdn_bits = GENMASK(8, 8),
49998                 .sram_pdn_ack_bits = GENMASK(12, 12),
49999         },
50000         [MT8192_POWER_DOMAIN_MFG1] = {
50001 +               .name = "mfg1",
50002                 .sta_mask = BIT(3),
50003                 .ctl_offs = 0x030c,
50004                 .sram_pdn_bits = GENMASK(8, 8),
50005 @@ -75,36 +79,42 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
50006                 },
50007         },
50008         [MT8192_POWER_DOMAIN_MFG2] = {
50009 +               .name = "mfg2",
50010                 .sta_mask = BIT(4),
50011                 .ctl_offs = 0x0310,
50012                 .sram_pdn_bits = GENMASK(8, 8),
50013                 .sram_pdn_ack_bits = GENMASK(12, 12),
50014         },
50015         [MT8192_POWER_DOMAIN_MFG3] = {
50016 +               .name = "mfg3",
50017                 .sta_mask = BIT(5),
50018                 .ctl_offs = 0x0314,
50019                 .sram_pdn_bits = GENMASK(8, 8),
50020                 .sram_pdn_ack_bits = GENMASK(12, 12),
50021         },
50022         [MT8192_POWER_DOMAIN_MFG4] = {
50023 +               .name = "mfg4",
50024                 .sta_mask = BIT(6),
50025                 .ctl_offs = 0x0318,
50026                 .sram_pdn_bits = GENMASK(8, 8),
50027                 .sram_pdn_ack_bits = GENMASK(12, 12),
50028         },
50029         [MT8192_POWER_DOMAIN_MFG5] = {
50030 +               .name = "mfg5",
50031                 .sta_mask = BIT(7),
50032                 .ctl_offs = 0x031c,
50033                 .sram_pdn_bits = GENMASK(8, 8),
50034                 .sram_pdn_ack_bits = GENMASK(12, 12),
50035         },
50036         [MT8192_POWER_DOMAIN_MFG6] = {
50037 +               .name = "mfg6",
50038                 .sta_mask = BIT(8),
50039                 .ctl_offs = 0x0320,
50040                 .sram_pdn_bits = GENMASK(8, 8),
50041                 .sram_pdn_ack_bits = GENMASK(12, 12),
50042         },
50043         [MT8192_POWER_DOMAIN_DISP] = {
50044 +               .name = "disp",
50045                 .sta_mask = BIT(20),
50046                 .ctl_offs = 0x0350,
50047                 .sram_pdn_bits = GENMASK(8, 8),
50048 @@ -133,6 +143,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
50049                 },
50050         },
50051         [MT8192_POWER_DOMAIN_IPE] = {
50052 +               .name = "ipe",
50053                 .sta_mask = BIT(14),
50054                 .ctl_offs = 0x0338,
50055                 .sram_pdn_bits = GENMASK(8, 8),
50056 @@ -149,6 +160,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
50057                 },
50058         },
50059         [MT8192_POWER_DOMAIN_ISP] = {
50060 +               .name = "isp",
50061                 .sta_mask = BIT(12),
50062                 .ctl_offs = 0x0330,
50063                 .sram_pdn_bits = GENMASK(8, 8),
50064 @@ -165,6 +177,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
50065                 },
50066         },
50067         [MT8192_POWER_DOMAIN_ISP2] = {
50068 +               .name = "isp2",
50069                 .sta_mask = BIT(13),
50070                 .ctl_offs = 0x0334,
50071                 .sram_pdn_bits = GENMASK(8, 8),
50072 @@ -181,6 +194,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
50073                 },
50074         },
50075         [MT8192_POWER_DOMAIN_MDP] = {
50076 +               .name = "mdp",
50077                 .sta_mask = BIT(19),
50078                 .ctl_offs = 0x034c,
50079                 .sram_pdn_bits = GENMASK(8, 8),
50080 @@ -197,6 +211,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
50081                 },
50082         },
50083         [MT8192_POWER_DOMAIN_VENC] = {
50084 +               .name = "venc",
50085                 .sta_mask = BIT(17),
50086                 .ctl_offs = 0x0344,
50087                 .sram_pdn_bits = GENMASK(8, 8),
50088 @@ -213,6 +228,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
50089                 },
50090         },
50091         [MT8192_POWER_DOMAIN_VDEC] = {
50092 +               .name = "vdec",
50093                 .sta_mask = BIT(15),
50094                 .ctl_offs = 0x033c,
50095                 .sram_pdn_bits = GENMASK(8, 8),
50096 @@ -229,12 +245,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
50097                 },
50098         },
50099         [MT8192_POWER_DOMAIN_VDEC2] = {
50100 +               .name = "vdec2",
50101                 .sta_mask = BIT(16),
50102                 .ctl_offs = 0x0340,
50103                 .sram_pdn_bits = GENMASK(8, 8),
50104                 .sram_pdn_ack_bits = GENMASK(12, 12),
50105         },
50106         [MT8192_POWER_DOMAIN_CAM] = {
50107 +               .name = "cam",
50108                 .sta_mask = BIT(23),
50109                 .ctl_offs = 0x035c,
50110                 .sram_pdn_bits = GENMASK(8, 8),
50111 @@ -263,18 +281,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
50112                 },
50113         },
50114         [MT8192_POWER_DOMAIN_CAM_RAWA] = {
50115 +               .name = "cam_rawa",
50116                 .sta_mask = BIT(24),
50117                 .ctl_offs = 0x0360,
50118                 .sram_pdn_bits = GENMASK(8, 8),
50119                 .sram_pdn_ack_bits = GENMASK(12, 12),
50120         },
50121         [MT8192_POWER_DOMAIN_CAM_RAWB] = {
50122 +               .name = "cam_rawb",
50123                 .sta_mask = BIT(25),
50124                 .ctl_offs = 0x0364,
50125                 .sram_pdn_bits = GENMASK(8, 8),
50126                 .sram_pdn_ack_bits = GENMASK(12, 12),
50127         },
50128         [MT8192_POWER_DOMAIN_CAM_RAWC] = {
50129 +               .name = "cam_rawc",
50130                 .sta_mask = BIT(26),
50131                 .ctl_offs = 0x0368,
50132                 .sram_pdn_bits = GENMASK(8, 8),
50133 diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
50134 index b7f697666bdd..0af00efa0ef8 100644
50135 --- a/drivers/soc/mediatek/mtk-pm-domains.c
50136 +++ b/drivers/soc/mediatek/mtk-pm-domains.c
50137 @@ -438,7 +438,11 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
50138                 goto err_unprepare_subsys_clocks;
50139         }
50141 -       pd->genpd.name = node->name;
50142 +       if (!pd->data->name)
50143 +               pd->genpd.name = node->name;
50144 +       else
50145 +               pd->genpd.name = pd->data->name;
50147         pd->genpd.power_off = scpsys_power_off;
50148         pd->genpd.power_on = scpsys_power_on;
50150 @@ -487,8 +491,9 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
50152                 child_pd = scpsys_add_one_domain(scpsys, child);
50153                 if (IS_ERR(child_pd)) {
50154 -                       dev_err_probe(scpsys->dev, PTR_ERR(child_pd),
50155 -                                     "%pOF: failed to get child domain id\n", child);
50156 +                       ret = PTR_ERR(child_pd);
50157 +                       dev_err_probe(scpsys->dev, ret, "%pOF: failed to get child domain id\n",
50158 +                                     child);
50159                         goto err_put_node;
50160                 }
50162 diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h
50163 index 141dc76054e6..21a4e113bbec 100644
50164 --- a/drivers/soc/mediatek/mtk-pm-domains.h
50165 +++ b/drivers/soc/mediatek/mtk-pm-domains.h
50166 @@ -76,6 +76,7 @@ struct scpsys_bus_prot_data {
50168  /**
50169   * struct scpsys_domain_data - scp domain data for power on/off flow
50170 + * @name: The name of the power domain.
50171   * @sta_mask: The mask for power on/off status bit.
50172   * @ctl_offs: The offset for main power control register.
50173   * @sram_pdn_bits: The mask for sram power control bits.
50174 @@ -85,6 +86,7 @@ struct scpsys_bus_prot_data {
50175   * @bp_smi: bus protection for smi subsystem
50176   */
50177  struct scpsys_domain_data {
50178 +       const char *name;
50179         u32 sta_mask;
50180         int ctl_offs;
50181         u32 sram_pdn_bits;
50182 diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
50183 index 24cd193dec55..eba7f76f9d61 100644
50184 --- a/drivers/soc/qcom/mdt_loader.c
50185 +++ b/drivers/soc/qcom/mdt_loader.c
50186 @@ -230,6 +230,14 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
50187                         break;
50188                 }
50190 +               if (phdr->p_filesz > phdr->p_memsz) {
50191 +                       dev_err(dev,
50192 +                               "refusing to load segment %d with p_filesz > p_memsz\n",
50193 +                               i);
50194 +                       ret = -EINVAL;
50195 +                       break;
50196 +               }
50198                 ptr = mem_region + offset;
50200                 if (phdr->p_filesz && phdr->p_offset < fw->size) {
50201 @@ -253,6 +261,15 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
50202                                 break;
50203                         }
50205 +                       if (seg_fw->size != phdr->p_filesz) {
50206 +                               dev_err(dev,
50207 +                                       "failed to load segment %d from truncated file %s\n",
50208 +                                       i, fw_name);
50209 +                               release_firmware(seg_fw);
50210 +                               ret = -EINVAL;
50211 +                               break;
50212 +                       }
50214                         release_firmware(seg_fw);
50215                 }
50217 diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
50218 index 209dcdca923f..915d5bc3d46e 100644
50219 --- a/drivers/soc/qcom/pdr_interface.c
50220 +++ b/drivers/soc/qcom/pdr_interface.c
50221 @@ -153,7 +153,7 @@ static int pdr_register_listener(struct pdr_handle *pdr,
50222         if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
50223                 pr_err("PDR: %s register listener failed: 0x%x\n",
50224                        pds->service_path, resp.resp.error);
50225 -               return ret;
50226 +               return -EREMOTEIO;
50227         }
50229         pds->state = resp.curr_state;
50230 diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
50231 index df9a5ca8c99c..0118bd986f90 100644
50232 --- a/drivers/soc/tegra/pmc.c
50233 +++ b/drivers/soc/tegra/pmc.c
50234 @@ -317,6 +317,8 @@ struct tegra_pmc_soc {
50235                                    bool invert);
50236         int (*irq_set_wake)(struct irq_data *data, unsigned int on);
50237         int (*irq_set_type)(struct irq_data *data, unsigned int type);
50238 +       int (*powergate_set)(struct tegra_pmc *pmc, unsigned int id,
50239 +                            bool new_state);
50241         const char * const *reset_sources;
50242         unsigned int num_reset_sources;
50243 @@ -517,6 +519,63 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
50244         return -ENODEV;
50247 +static int tegra20_powergate_set(struct tegra_pmc *pmc, unsigned int id,
50248 +                                bool new_state)
50250 +       unsigned int retries = 100;
50251 +       bool status;
50252 +       int ret;
50254 +       /*
50255 +        * As per TRM documentation, the toggle command will be dropped by PMC
50256 +        * if there is contention with a HW-initiated toggling (i.e. CPU core
50257 +        * power-gated), the command should be retried in that case.
50258 +        */
50259 +       do {
50260 +               tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
50262 +               /* wait for PMC to execute the command */
50263 +               ret = readx_poll_timeout(tegra_powergate_state, id, status,
50264 +                                        status == new_state, 1, 10);
50265 +       } while (ret == -ETIMEDOUT && retries--);
50267 +       return ret;
50270 +static inline bool tegra_powergate_toggle_ready(struct tegra_pmc *pmc)
50272 +       return !(tegra_pmc_readl(pmc, PWRGATE_TOGGLE) & PWRGATE_TOGGLE_START);
50275 +static int tegra114_powergate_set(struct tegra_pmc *pmc, unsigned int id,
50276 +                                 bool new_state)
50278 +       bool status;
50279 +       int err;
50281 +       /* wait while PMC power gating is contended */
50282 +       err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
50283 +                                status == true, 1, 100);
50284 +       if (err)
50285 +               return err;
50287 +       tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
50289 +       /* wait for PMC to accept the command */
50290 +       err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
50291 +                                status == true, 1, 100);
50292 +       if (err)
50293 +               return err;
50295 +       /* wait for PMC to execute the command */
50296 +       err = readx_poll_timeout(tegra_powergate_state, id, status,
50297 +                                status == new_state, 10, 100000);
50298 +       if (err)
50299 +               return err;
50301 +       return 0;
50304  /**
50305   * tegra_powergate_set() - set the state of a partition
50306   * @pmc: power management controller
50307 @@ -526,7 +585,6 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
50308  static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
50309                                bool new_state)
50311 -       bool status;
50312         int err;
50314         if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
50315 @@ -539,10 +597,7 @@ static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
50316                 return 0;
50317         }
50319 -       tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
50321 -       err = readx_poll_timeout(tegra_powergate_state, id, status,
50322 -                                status == new_state, 10, 100000);
50323 +       err = pmc->soc->powergate_set(pmc, id, new_state);
50325         mutex_unlock(&pmc->powergates_lock);
50327 @@ -2699,6 +2754,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
50328         .regs = &tegra20_pmc_regs,
50329         .init = tegra20_pmc_init,
50330         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
50331 +       .powergate_set = tegra20_powergate_set,
50332         .reset_sources = NULL,
50333         .num_reset_sources = 0,
50334         .reset_levels = NULL,
50335 @@ -2757,6 +2813,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
50336         .regs = &tegra20_pmc_regs,
50337         .init = tegra20_pmc_init,
50338         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
50339 +       .powergate_set = tegra20_powergate_set,
50340         .reset_sources = tegra30_reset_sources,
50341         .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
50342         .reset_levels = NULL,
50343 @@ -2811,6 +2868,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
50344         .regs = &tegra20_pmc_regs,
50345         .init = tegra20_pmc_init,
50346         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
50347 +       .powergate_set = tegra114_powergate_set,
50348         .reset_sources = tegra30_reset_sources,
50349         .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
50350         .reset_levels = NULL,
50351 @@ -2925,6 +2983,7 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
50352         .regs = &tegra20_pmc_regs,
50353         .init = tegra20_pmc_init,
50354         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
50355 +       .powergate_set = tegra114_powergate_set,
50356         .reset_sources = tegra30_reset_sources,
50357         .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
50358         .reset_levels = NULL,
50359 @@ -3048,6 +3107,7 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
50360         .regs = &tegra20_pmc_regs,
50361         .init = tegra20_pmc_init,
50362         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
50363 +       .powergate_set = tegra114_powergate_set,
50364         .irq_set_wake = tegra210_pmc_irq_set_wake,
50365         .irq_set_type = tegra210_pmc_irq_set_type,
50366         .reset_sources = tegra210_reset_sources,
50367 diff --git a/drivers/soc/tegra/regulators-tegra30.c b/drivers/soc/tegra/regulators-tegra30.c
50368 index 7f21f31de09d..0e776b20f625 100644
50369 --- a/drivers/soc/tegra/regulators-tegra30.c
50370 +++ b/drivers/soc/tegra/regulators-tegra30.c
50371 @@ -178,7 +178,7 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
50372          * survive the voltage drop if it's running on a higher frequency.
50373          */
50374         if (!cpu_min_uV_consumers)
50375 -               cpu_min_uV = cpu_uV;
50376 +               cpu_min_uV = max(cpu_uV, cpu_min_uV);
50378         /*
50379          * Bootloader shall set up voltages correctly, but if it
50380 diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
50381 index 46885429928a..4ec29338ce9a 100644
50382 --- a/drivers/soundwire/bus.c
50383 +++ b/drivers/soundwire/bus.c
50384 @@ -705,7 +705,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
50385         struct sdw_slave *slave, *_s;
50386         struct sdw_slave_id id;
50387         struct sdw_msg msg;
50388 -       bool found = false;
50389 +       bool found;
50390         int count = 0, ret;
50391         u64 addr;
50393 @@ -737,6 +737,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
50395                 sdw_extract_slave_id(bus, addr, &id);
50397 +               found = false;
50398                 /* Now compare with entries */
50399                 list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
50400                         if (sdw_compare_devid(slave, id) == 0) {
50401 diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
50402 index d05442e646a3..57c59a33ce61 100644
50403 --- a/drivers/soundwire/cadence_master.c
50404 +++ b/drivers/soundwire/cadence_master.c
50405 @@ -1450,10 +1450,12 @@ int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake)
50406         }
50408         /* Prepare slaves for clock stop */
50409 -       ret = sdw_bus_prep_clk_stop(&cdns->bus);
50410 -       if (ret < 0) {
50411 -               dev_err(cdns->dev, "prepare clock stop failed %d", ret);
50412 -               return ret;
50413 +       if (slave_present) {
50414 +               ret = sdw_bus_prep_clk_stop(&cdns->bus);
50415 +               if (ret < 0 && ret != -ENODATA) {
50416 +                       dev_err(cdns->dev, "prepare clock stop failed %d\n", ret);
50417 +                       return ret;
50418 +               }
50419         }
50421         /*
50422 diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
50423 index 1099b5d1262b..a418c3c7001c 100644
50424 --- a/drivers/soundwire/stream.c
50425 +++ b/drivers/soundwire/stream.c
50426 @@ -1375,8 +1375,16 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
50427         }
50429         ret = sdw_config_stream(&slave->dev, stream, stream_config, true);
50430 -       if (ret)
50431 +       if (ret) {
50432 +               /*
50433 +                * sdw_release_master_stream will release s_rt in slave_rt_list in
50434 +                * stream_error case, but s_rt is only added to slave_rt_list
50435 +                * when sdw_config_stream is successful, so free s_rt explicitly
50436 +                * when sdw_config_stream is failed.
50437 +                */
50438 +               kfree(s_rt);
50439                 goto stream_error;
50440 +       }
50442         list_add_tail(&s_rt->m_rt_node, &m_rt->slave_rt_list);
50444 diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
50445 index eb9a243e9526..98ace748cd98 100644
50446 --- a/drivers/spi/spi-ath79.c
50447 +++ b/drivers/spi/spi-ath79.c
50448 @@ -156,8 +156,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
50450         master->use_gpio_descriptors = true;
50451         master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
50452 -       master->setup = spi_bitbang_setup;
50453 -       master->cleanup = spi_bitbang_cleanup;
50454 +       master->flags = SPI_MASTER_GPIO_SS;
50455         if (pdata) {
50456                 master->bus_num = pdata->bus_num;
50457                 master->num_chipselect = pdata->num_chipselect;
50458 diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
50459 index 75b33d7d14b0..9a4d942fafcf 100644
50460 --- a/drivers/spi/spi-dln2.c
50461 +++ b/drivers/spi/spi-dln2.c
50462 @@ -780,7 +780,7 @@ static int dln2_spi_probe(struct platform_device *pdev)
50464  static int dln2_spi_remove(struct platform_device *pdev)
50466 -       struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
50467 +       struct spi_master *master = platform_get_drvdata(pdev);
50468         struct dln2_spi *dln2 = spi_master_get_devdata(master);
50470         pm_runtime_disable(&pdev->dev);
50471 diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
50472 index a2886ee44e4c..5d98611dd999 100644
50473 --- a/drivers/spi/spi-fsl-lpspi.c
50474 +++ b/drivers/spi/spi-fsl-lpspi.c
50475 @@ -200,7 +200,7 @@ static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
50476                                 spi_controller_get_devdata(controller);
50477         int ret;
50479 -       ret = pm_runtime_get_sync(fsl_lpspi->dev);
50480 +       ret = pm_runtime_resume_and_get(fsl_lpspi->dev);
50481         if (ret < 0) {
50482                 dev_err(fsl_lpspi->dev, "failed to enable clock\n");
50483                 return ret;
50484 diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
50485 index e4a8d203f940..d0e5aa18b7ba 100644
50486 --- a/drivers/spi/spi-fsl-spi.c
50487 +++ b/drivers/spi/spi-fsl-spi.c
50488 @@ -707,6 +707,11 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
50489         struct resource mem;
50490         int irq, type;
50491         int ret;
50492 +       bool spisel_boot = false;
50493 +#if IS_ENABLED(CONFIG_FSL_SOC)
50494 +       struct mpc8xxx_spi_probe_info *pinfo = NULL;
50495 +#endif
50498         ret = of_mpc8xxx_spi_probe(ofdev);
50499         if (ret)
50500 @@ -715,9 +720,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
50501         type = fsl_spi_get_type(&ofdev->dev);
50502         if (type == TYPE_FSL) {
50503                 struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
50504 -               bool spisel_boot = false;
50505  #if IS_ENABLED(CONFIG_FSL_SOC)
50506 -               struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
50507 +               pinfo = to_of_pinfo(pdata);
50509                 spisel_boot = of_property_read_bool(np, "fsl,spisel_boot");
50510                 if (spisel_boot) {
50511 @@ -746,15 +750,24 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
50513         ret = of_address_to_resource(np, 0, &mem);
50514         if (ret)
50515 -               return ret;
50516 +               goto unmap_out;
50518         irq = platform_get_irq(ofdev, 0);
50519 -       if (irq < 0)
50520 -               return irq;
50521 +       if (irq < 0) {
50522 +               ret = irq;
50523 +               goto unmap_out;
50524 +       }
50526         master = fsl_spi_probe(dev, &mem, irq);
50528         return PTR_ERR_OR_ZERO(master);
50530 +unmap_out:
50531 +#if IS_ENABLED(CONFIG_FSL_SOC)
50532 +       if (spisel_boot)
50533 +               iounmap(pinfo->immr_spi_cs);
50534 +#endif
50535 +       return ret;
50538  static int of_fsl_spi_remove(struct platform_device *ofdev)
50539 diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
50540 index 36a4922a134a..ccd817ee4917 100644
50541 --- a/drivers/spi/spi-omap-100k.c
50542 +++ b/drivers/spi/spi-omap-100k.c
50543 @@ -424,7 +424,7 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
50545  static int omap1_spi100k_remove(struct platform_device *pdev)
50547 -       struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
50548 +       struct spi_master *master = platform_get_drvdata(pdev);
50549         struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
50551         pm_runtime_disable(&pdev->dev);
50552 @@ -438,7 +438,7 @@ static int omap1_spi100k_remove(struct platform_device *pdev)
50553  #ifdef CONFIG_PM
50554  static int omap1_spi100k_runtime_suspend(struct device *dev)
50556 -       struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
50557 +       struct spi_master *master = dev_get_drvdata(dev);
50558         struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
50560         clk_disable_unprepare(spi100k->ick);
50561 @@ -449,7 +449,7 @@ static int omap1_spi100k_runtime_suspend(struct device *dev)
50563  static int omap1_spi100k_runtime_resume(struct device *dev)
50565 -       struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
50566 +       struct spi_master *master = dev_get_drvdata(dev);
50567         struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
50568         int ret;
50570 diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
50571 index 8dcb2e70735c..d39dec6d1c91 100644
50572 --- a/drivers/spi/spi-qup.c
50573 +++ b/drivers/spi/spi-qup.c
50574 @@ -1263,7 +1263,7 @@ static int spi_qup_remove(struct platform_device *pdev)
50575         struct spi_qup *controller = spi_master_get_devdata(master);
50576         int ret;
50578 -       ret = pm_runtime_get_sync(&pdev->dev);
50579 +       ret = pm_runtime_resume_and_get(&pdev->dev);
50580         if (ret < 0)
50581                 return ret;
50583 diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
50584 index 936ef54e0903..0d75080da648 100644
50585 --- a/drivers/spi/spi-rockchip.c
50586 +++ b/drivers/spi/spi-rockchip.c
50587 @@ -476,7 +476,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
50588         return 1;
50591 -static void rockchip_spi_config(struct rockchip_spi *rs,
50592 +static int rockchip_spi_config(struct rockchip_spi *rs,
50593                 struct spi_device *spi, struct spi_transfer *xfer,
50594                 bool use_dma, bool slave_mode)
50596 @@ -521,7 +521,9 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
50597                  * ctlr->bits_per_word_mask, so this shouldn't
50598                  * happen
50599                  */
50600 -               unreachable();
50601 +               dev_err(rs->dev, "unknown bits per word: %d\n",
50602 +                       xfer->bits_per_word);
50603 +               return -EINVAL;
50604         }
50606         if (use_dma) {
50607 @@ -554,6 +556,8 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
50608          */
50609         writel_relaxed(2 * DIV_ROUND_UP(rs->freq, 2 * xfer->speed_hz),
50610                         rs->regs + ROCKCHIP_SPI_BAUDR);
50612 +       return 0;
50615  static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
50616 @@ -577,6 +581,7 @@ static int rockchip_spi_transfer_one(
50617                 struct spi_transfer *xfer)
50619         struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
50620 +       int ret;
50621         bool use_dma;
50623         WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
50624 @@ -596,7 +601,9 @@ static int rockchip_spi_transfer_one(
50626         use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false;
50628 -       rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
50629 +       ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
50630 +       if (ret)
50631 +               return ret;
50633         if (use_dma)
50634                 return rockchip_spi_prepare_dma(rs, ctlr, xfer);
50635 diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
50636 index 947e6b9dc9f4..2786470a5201 100644
50637 --- a/drivers/spi/spi-stm32-qspi.c
50638 +++ b/drivers/spi/spi-stm32-qspi.c
50639 @@ -727,21 +727,31 @@ static int __maybe_unused stm32_qspi_suspend(struct device *dev)
50641         pinctrl_pm_select_sleep_state(dev);
50643 -       return 0;
50644 +       return pm_runtime_force_suspend(dev);
50647  static int __maybe_unused stm32_qspi_resume(struct device *dev)
50649         struct stm32_qspi *qspi = dev_get_drvdata(dev);
50650 +       int ret;
50652 +       ret = pm_runtime_force_resume(dev);
50653 +       if (ret < 0)
50654 +               return ret;
50656         pinctrl_pm_select_default_state(dev);
50657 -       clk_prepare_enable(qspi->clk);
50659 +       ret = pm_runtime_get_sync(dev);
50660 +       if (ret < 0) {
50661 +               pm_runtime_put_noidle(dev);
50662 +               return ret;
50663 +       }
50665         writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
50666         writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
50668 -       pm_runtime_mark_last_busy(qspi->dev);
50669 -       pm_runtime_put_autosuspend(qspi->dev);
50670 +       pm_runtime_mark_last_busy(dev);
50671 +       pm_runtime_put_autosuspend(dev);
50673         return 0;
50675 diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
50676 index 25c076461011..7f0244a246e9 100644
50677 --- a/drivers/spi/spi-stm32.c
50678 +++ b/drivers/spi/spi-stm32.c
50679 @@ -1803,7 +1803,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
50680         struct reset_control *rst;
50681         int ret;
50683 -       master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
50684 +       master = devm_spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
50685         if (!master) {
50686                 dev_err(&pdev->dev, "spi master allocation failed\n");
50687                 return -ENOMEM;
50688 @@ -1821,18 +1821,16 @@ static int stm32_spi_probe(struct platform_device *pdev)
50690         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
50691         spi->base = devm_ioremap_resource(&pdev->dev, res);
50692 -       if (IS_ERR(spi->base)) {
50693 -               ret = PTR_ERR(spi->base);
50694 -               goto err_master_put;
50695 -       }
50696 +       if (IS_ERR(spi->base))
50697 +               return PTR_ERR(spi->base);
50699         spi->phys_addr = (dma_addr_t)res->start;
50701         spi->irq = platform_get_irq(pdev, 0);
50702 -       if (spi->irq <= 0) {
50703 -               ret = dev_err_probe(&pdev->dev, spi->irq, "failed to get irq\n");
50704 -               goto err_master_put;
50705 -       }
50706 +       if (spi->irq <= 0)
50707 +               return dev_err_probe(&pdev->dev, spi->irq,
50708 +                                    "failed to get irq\n");
50710         ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
50711                                         spi->cfg->irq_handler_event,
50712                                         spi->cfg->irq_handler_thread,
50713 @@ -1840,20 +1838,20 @@ static int stm32_spi_probe(struct platform_device *pdev)
50714         if (ret) {
50715                 dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
50716                         ret);
50717 -               goto err_master_put;
50718 +               return ret;
50719         }
50721         spi->clk = devm_clk_get(&pdev->dev, NULL);
50722         if (IS_ERR(spi->clk)) {
50723                 ret = PTR_ERR(spi->clk);
50724                 dev_err(&pdev->dev, "clk get failed: %d\n", ret);
50725 -               goto err_master_put;
50726 +               return ret;
50727         }
50729         ret = clk_prepare_enable(spi->clk);
50730         if (ret) {
50731                 dev_err(&pdev->dev, "clk enable failed: %d\n", ret);
50732 -               goto err_master_put;
50733 +               return ret;
50734         }
50735         spi->clk_rate = clk_get_rate(spi->clk);
50736         if (!spi->clk_rate) {
50737 @@ -1929,7 +1927,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
50738         pm_runtime_set_active(&pdev->dev);
50739         pm_runtime_enable(&pdev->dev);
50741 -       ret = devm_spi_register_master(&pdev->dev, master);
50742 +       ret = spi_register_master(master);
50743         if (ret) {
50744                 dev_err(&pdev->dev, "spi master registration failed: %d\n",
50745                         ret);
50746 @@ -1949,8 +1947,6 @@ static int stm32_spi_probe(struct platform_device *pdev)
50747                 dma_release_channel(spi->dma_rx);
50748  err_clk_disable:
50749         clk_disable_unprepare(spi->clk);
50750 -err_master_put:
50751 -       spi_master_put(master);
50753         return ret;
50755 @@ -1960,6 +1956,7 @@ static int stm32_spi_remove(struct platform_device *pdev)
50756         struct spi_master *master = platform_get_drvdata(pdev);
50757         struct stm32_spi *spi = spi_master_get_devdata(master);
50759 +       spi_unregister_master(master);
50760         spi->cfg->disable(spi);
50762         if (master->dma_tx)
50763 diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
50764 index 9417385c0921..e06aafe169e0 100644
50765 --- a/drivers/spi/spi-ti-qspi.c
50766 +++ b/drivers/spi/spi-ti-qspi.c
50767 @@ -733,6 +733,17 @@ static int ti_qspi_runtime_resume(struct device *dev)
50768         return 0;
50771 +static void ti_qspi_dma_cleanup(struct ti_qspi *qspi)
50773 +       if (qspi->rx_bb_addr)
50774 +               dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
50775 +                                 qspi->rx_bb_addr,
50776 +                                 qspi->rx_bb_dma_addr);
50778 +       if (qspi->rx_chan)
50779 +               dma_release_channel(qspi->rx_chan);
50782  static const struct of_device_id ti_qspi_match[] = {
50783         {.compatible = "ti,dra7xxx-qspi" },
50784         {.compatible = "ti,am4372-qspi" },
50785 @@ -886,6 +897,8 @@ static int ti_qspi_probe(struct platform_device *pdev)
50786         if (!ret)
50787                 return 0;
50789 +       ti_qspi_dma_cleanup(qspi);
50791         pm_runtime_disable(&pdev->dev);
50792  free_master:
50793         spi_master_put(master);
50794 @@ -904,12 +917,7 @@ static int ti_qspi_remove(struct platform_device *pdev)
50795         pm_runtime_put_sync(&pdev->dev);
50796         pm_runtime_disable(&pdev->dev);
50798 -       if (qspi->rx_bb_addr)
50799 -               dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
50800 -                                 qspi->rx_bb_addr,
50801 -                                 qspi->rx_bb_dma_addr);
50802 -       if (qspi->rx_chan)
50803 -               dma_release_channel(qspi->rx_chan);
50804 +       ti_qspi_dma_cleanup(qspi);
50806         return 0;
50808 diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
50809 index c8fa6ee18ae7..7162387b9f96 100644
50810 --- a/drivers/spi/spi-zynqmp-gqspi.c
50811 +++ b/drivers/spi/spi-zynqmp-gqspi.c
50812 @@ -157,6 +157,7 @@ enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
50813   * @data_completion:   completion structure
50814   */
50815  struct zynqmp_qspi {
50816 +       struct spi_controller *ctlr;
50817         void __iomem *regs;
50818         struct clk *refclk;
50819         struct clk *pclk;
50820 @@ -173,6 +174,7 @@ struct zynqmp_qspi {
50821         u32 genfifoentry;
50822         enum mode_type mode;
50823         struct completion data_completion;
50824 +       struct mutex op_lock;
50825  };
50827  /**
50828 @@ -486,24 +488,10 @@ static int zynqmp_qspi_setup_op(struct spi_device *qspi)
50830         struct spi_controller *ctlr = qspi->master;
50831         struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
50832 -       struct device *dev = &ctlr->dev;
50833 -       int ret;
50835         if (ctlr->busy)
50836                 return -EBUSY;
50838 -       ret = clk_enable(xqspi->refclk);
50839 -       if (ret) {
50840 -               dev_err(dev, "Cannot enable device clock.\n");
50841 -               return ret;
50842 -       }
50844 -       ret = clk_enable(xqspi->pclk);
50845 -       if (ret) {
50846 -               dev_err(dev, "Cannot enable APB clock.\n");
50847 -               clk_disable(xqspi->refclk);
50848 -               return ret;
50849 -       }
50850         zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
50852         return 0;
50853 @@ -520,7 +508,7 @@ static void zynqmp_qspi_filltxfifo(struct zynqmp_qspi *xqspi, int size)
50855         u32 count = 0, intermediate;
50857 -       while ((xqspi->bytes_to_transfer > 0) && (count < size)) {
50858 +       while ((xqspi->bytes_to_transfer > 0) && (count < size) && (xqspi->txbuf)) {
50859                 memcpy(&intermediate, xqspi->txbuf, 4);
50860                 zynqmp_gqspi_write(xqspi, GQSPI_TXD_OFST, intermediate);
50862 @@ -579,7 +567,7 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
50863                 genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
50864                 genfifoentry |= GQSPI_GENFIFO_TX;
50865                 transfer_len = xqspi->bytes_to_transfer;
50866 -       } else {
50867 +       } else if (xqspi->rxbuf) {
50868                 genfifoentry &= ~GQSPI_GENFIFO_TX;
50869                 genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
50870                 genfifoentry |= GQSPI_GENFIFO_RX;
50871 @@ -587,6 +575,11 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
50872                         transfer_len = xqspi->dma_rx_bytes;
50873                 else
50874                         transfer_len = xqspi->bytes_to_receive;
50875 +       } else {
50876 +               /* Sending dummy circles here */
50877 +               genfifoentry &= ~(GQSPI_GENFIFO_TX | GQSPI_GENFIFO_RX);
50878 +               genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
50879 +               transfer_len = xqspi->bytes_to_transfer;
50880         }
50881         genfifoentry |= zynqmp_qspi_selectspimode(xqspi, nbits);
50882         xqspi->genfifoentry = genfifoentry;
50883 @@ -738,7 +731,7 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
50884   * zynqmp_qspi_setuprxdma - This function sets up the RX DMA operation
50885   * @xqspi:     xqspi is a pointer to the GQSPI instance.
50886   */
50887 -static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
50888 +static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
50890         u32 rx_bytes, rx_rem, config_reg;
50891         dma_addr_t addr;
50892 @@ -752,7 +745,7 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
50893                 zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
50894                 xqspi->mode = GQSPI_MODE_IO;
50895                 xqspi->dma_rx_bytes = 0;
50896 -               return;
50897 +               return 0;
50898         }
50900         rx_rem = xqspi->bytes_to_receive % 4;
50901 @@ -760,8 +753,10 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
50903         addr = dma_map_single(xqspi->dev, (void *)xqspi->rxbuf,
50904                               rx_bytes, DMA_FROM_DEVICE);
50905 -       if (dma_mapping_error(xqspi->dev, addr))
50906 +       if (dma_mapping_error(xqspi->dev, addr)) {
50907                 dev_err(xqspi->dev, "ERR:rxdma:memory not mapped\n");
50908 +               return -ENOMEM;
50909 +       }
50911         xqspi->dma_rx_bytes = rx_bytes;
50912         xqspi->dma_addr = addr;
50913 @@ -782,6 +777,8 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
50915         /* Write the number of bytes to transfer */
50916         zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_SIZE_OFST, rx_bytes);
50918 +       return 0;
50921  /**
50922 @@ -818,11 +815,17 @@ static void zynqmp_qspi_write_op(struct zynqmp_qspi *xqspi, u8 tx_nbits,
50923   * @genfifoentry:      genfifoentry is pointer to the variable in which
50924   *                     GENFIFO mask is returned to calling function
50925   */
50926 -static void zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
50927 +static int zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
50928                                 u32 genfifoentry)
50930 +       int ret;
50932 +       ret = zynqmp_qspi_setuprxdma(xqspi);
50933 +       if (ret)
50934 +               return ret;
50935         zynqmp_qspi_fillgenfifo(xqspi, rx_nbits, genfifoentry);
50936 -       zynqmp_qspi_setuprxdma(xqspi);
50938 +       return 0;
50941  /**
50942 @@ -835,10 +838,13 @@ static void zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
50943   */
50944  static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
50946 -       struct spi_controller *ctlr = dev_get_drvdata(dev);
50947 -       struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
50948 +       struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
50949 +       struct spi_controller *ctlr = xqspi->ctlr;
50950 +       int ret;
50952 -       spi_controller_suspend(ctlr);
50953 +       ret = spi_controller_suspend(ctlr);
50954 +       if (ret)
50955 +               return ret;
50957         zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
50959 @@ -856,27 +862,13 @@ static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
50960   */
50961  static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
50963 -       struct spi_controller *ctlr = dev_get_drvdata(dev);
50964 -       struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
50965 -       int ret = 0;
50967 -       ret = clk_enable(xqspi->pclk);
50968 -       if (ret) {
50969 -               dev_err(dev, "Cannot enable APB clock.\n");
50970 -               return ret;
50971 -       }
50972 +       struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
50973 +       struct spi_controller *ctlr = xqspi->ctlr;
50975 -       ret = clk_enable(xqspi->refclk);
50976 -       if (ret) {
50977 -               dev_err(dev, "Cannot enable device clock.\n");
50978 -               clk_disable(xqspi->pclk);
50979 -               return ret;
50980 -       }
50981 +       zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
50983         spi_controller_resume(ctlr);
50985 -       clk_disable(xqspi->refclk);
50986 -       clk_disable(xqspi->pclk);
50987         return 0;
50990 @@ -890,10 +882,10 @@ static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
50991   */
50992  static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
50994 -       struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_get_drvdata(dev);
50995 +       struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
50997 -       clk_disable(xqspi->refclk);
50998 -       clk_disable(xqspi->pclk);
50999 +       clk_disable_unprepare(xqspi->refclk);
51000 +       clk_disable_unprepare(xqspi->pclk);
51002         return 0;
51004 @@ -908,19 +900,19 @@ static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
51005   */
51006  static int __maybe_unused zynqmp_runtime_resume(struct device *dev)
51008 -       struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_get_drvdata(dev);
51009 +       struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
51010         int ret;
51012 -       ret = clk_enable(xqspi->pclk);
51013 +       ret = clk_prepare_enable(xqspi->pclk);
51014         if (ret) {
51015                 dev_err(dev, "Cannot enable APB clock.\n");
51016                 return ret;
51017         }
51019 -       ret = clk_enable(xqspi->refclk);
51020 +       ret = clk_prepare_enable(xqspi->refclk);
51021         if (ret) {
51022                 dev_err(dev, "Cannot enable device clock.\n");
51023 -               clk_disable(xqspi->pclk);
51024 +               clk_disable_unprepare(xqspi->pclk);
51025                 return ret;
51026         }
51028 @@ -944,25 +936,23 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
51029         struct zynqmp_qspi *xqspi = spi_controller_get_devdata
51030                                     (mem->spi->master);
51031         int err = 0, i;
51032 -       u8 *tmpbuf;
51033         u32 genfifoentry = 0;
51034 +       u16 opcode = op->cmd.opcode;
51035 +       u64 opaddr;
51037         dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
51038                 op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
51039                 op->dummy.buswidth, op->data.buswidth);
51041 +       mutex_lock(&xqspi->op_lock);
51042         zynqmp_qspi_config_op(xqspi, mem->spi);
51043         zynqmp_qspi_chipselect(mem->spi, false);
51044         genfifoentry |= xqspi->genfifocs;
51045         genfifoentry |= xqspi->genfifobus;
51047         if (op->cmd.opcode) {
51048 -               tmpbuf = kzalloc(op->cmd.nbytes, GFP_KERNEL | GFP_DMA);
51049 -               if (!tmpbuf)
51050 -                       return -ENOMEM;
51051 -               tmpbuf[0] = op->cmd.opcode;
51052                 reinit_completion(&xqspi->data_completion);
51053 -               xqspi->txbuf = tmpbuf;
51054 +               xqspi->txbuf = &opcode;
51055                 xqspi->rxbuf = NULL;
51056                 xqspi->bytes_to_transfer = op->cmd.nbytes;
51057                 xqspi->bytes_to_receive = 0;
51058 @@ -973,16 +963,15 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
51059                 zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
51060                                    GQSPI_IER_GENFIFOEMPTY_MASK |
51061                                    GQSPI_IER_TXNOT_FULL_MASK);
51062 -               if (!wait_for_completion_interruptible_timeout
51063 +               if (!wait_for_completion_timeout
51064                     (&xqspi->data_completion, msecs_to_jiffies(1000))) {
51065                         err = -ETIMEDOUT;
51066 -                       kfree(tmpbuf);
51067                         goto return_err;
51068                 }
51069 -               kfree(tmpbuf);
51070         }
51072         if (op->addr.nbytes) {
51073 +               xqspi->txbuf = &opaddr;
51074                 for (i = 0; i < op->addr.nbytes; i++) {
51075                         *(((u8 *)xqspi->txbuf) + i) = op->addr.val >>
51076                                         (8 * (op->addr.nbytes - i - 1));
51077 @@ -1001,7 +990,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
51078                                    GQSPI_IER_TXEMPTY_MASK |
51079                                    GQSPI_IER_GENFIFOEMPTY_MASK |
51080                                    GQSPI_IER_TXNOT_FULL_MASK);
51081 -               if (!wait_for_completion_interruptible_timeout
51082 +               if (!wait_for_completion_timeout
51083                     (&xqspi->data_completion, msecs_to_jiffies(1000))) {
51084                         err = -ETIMEDOUT;
51085                         goto return_err;
51086 @@ -1009,32 +998,23 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
51087         }
51089         if (op->dummy.nbytes) {
51090 -               tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL | GFP_DMA);
51091 -               if (!tmpbuf)
51092 -                       return -ENOMEM;
51093 -               memset(tmpbuf, 0xff, op->dummy.nbytes);
51094 -               reinit_completion(&xqspi->data_completion);
51095 -               xqspi->txbuf = tmpbuf;
51096 +               xqspi->txbuf = NULL;
51097                 xqspi->rxbuf = NULL;
51098 -               xqspi->bytes_to_transfer = op->dummy.nbytes;
51099 +               /*
51100 +                * xqspi->bytes_to_transfer here represents the dummy circles
51101 +                * which need to be sent.
51102 +                */
51103 +               xqspi->bytes_to_transfer = op->dummy.nbytes * 8 / op->dummy.buswidth;
51104                 xqspi->bytes_to_receive = 0;
51105 -               zynqmp_qspi_write_op(xqspi, op->dummy.buswidth,
51106 +               /*
51107 +                * Using op->data.buswidth instead of op->dummy.buswidth here because
51108 +                * we need to use it to configure the correct SPI mode.
51109 +                */
51110 +               zynqmp_qspi_write_op(xqspi, op->data.buswidth,
51111                                      genfifoentry);
51112                 zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
51113                                    zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
51114                                    GQSPI_CFG_START_GEN_FIFO_MASK);
51115 -               zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
51116 -                                  GQSPI_IER_TXEMPTY_MASK |
51117 -                                  GQSPI_IER_GENFIFOEMPTY_MASK |
51118 -                                  GQSPI_IER_TXNOT_FULL_MASK);
51119 -               if (!wait_for_completion_interruptible_timeout
51120 -                   (&xqspi->data_completion, msecs_to_jiffies(1000))) {
51121 -                       err = -ETIMEDOUT;
51122 -                       kfree(tmpbuf);
51123 -                       goto return_err;
51124 -               }
51126 -               kfree(tmpbuf);
51127         }
51129         if (op->data.nbytes) {
51130 @@ -1059,8 +1039,11 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
51131                         xqspi->rxbuf = (u8 *)op->data.buf.in;
51132                         xqspi->bytes_to_receive = op->data.nbytes;
51133                         xqspi->bytes_to_transfer = 0;
51134 -                       zynqmp_qspi_read_op(xqspi, op->data.buswidth,
51135 +                       err = zynqmp_qspi_read_op(xqspi, op->data.buswidth,
51136                                             genfifoentry);
51137 +                       if (err)
51138 +                               goto return_err;
51140                         zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
51141                                            zynqmp_gqspi_read
51142                                            (xqspi, GQSPI_CONFIG_OFST) |
51143 @@ -1076,7 +1059,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
51144                                                    GQSPI_IER_RXEMPTY_MASK);
51145                         }
51146                 }
51147 -               if (!wait_for_completion_interruptible_timeout
51148 +               if (!wait_for_completion_timeout
51149                     (&xqspi->data_completion, msecs_to_jiffies(1000)))
51150                         err = -ETIMEDOUT;
51151         }
51152 @@ -1084,6 +1067,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
51153  return_err:
51155         zynqmp_qspi_chipselect(mem->spi, true);
51156 +       mutex_unlock(&xqspi->op_lock);
51158         return err;
51160 @@ -1120,6 +1104,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
51162         xqspi = spi_controller_get_devdata(ctlr);
51163         xqspi->dev = dev;
51164 +       xqspi->ctlr = ctlr;
51165         platform_set_drvdata(pdev, xqspi);
51167         xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
51168 @@ -1135,13 +1120,11 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
51169                 goto remove_master;
51170         }
51172 -       init_completion(&xqspi->data_completion);
51174         xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
51175         if (IS_ERR(xqspi->refclk)) {
51176                 dev_err(dev, "ref_clk clock not found.\n");
51177                 ret = PTR_ERR(xqspi->refclk);
51178 -               goto clk_dis_pclk;
51179 +               goto remove_master;
51180         }
51182         ret = clk_prepare_enable(xqspi->pclk);
51183 @@ -1156,15 +1139,24 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
51184                 goto clk_dis_pclk;
51185         }
51187 +       init_completion(&xqspi->data_completion);
51189 +       mutex_init(&xqspi->op_lock);
51191         pm_runtime_use_autosuspend(&pdev->dev);
51192         pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
51193         pm_runtime_set_active(&pdev->dev);
51194         pm_runtime_enable(&pdev->dev);
51196 +       ret = pm_runtime_get_sync(&pdev->dev);
51197 +       if (ret < 0) {
51198 +               dev_err(&pdev->dev, "Failed to pm_runtime_get_sync: %d\n", ret);
51199 +               goto clk_dis_all;
51200 +       }
51202         /* QSPI controller initializations */
51203         zynqmp_qspi_init_hw(xqspi);
51205 -       pm_runtime_mark_last_busy(&pdev->dev);
51206 -       pm_runtime_put_autosuspend(&pdev->dev);
51207         xqspi->irq = platform_get_irq(pdev, 0);
51208         if (xqspi->irq <= 0) {
51209                 ret = -ENXIO;
51210 @@ -1178,6 +1170,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
51211                 goto clk_dis_all;
51212         }
51214 +       dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
51215         ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
51216         ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS;
51217         ctlr->mem_ops = &zynqmp_qspi_mem_ops;
51218 @@ -1187,6 +1180,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
51219         ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
51220                             SPI_TX_DUAL | SPI_TX_QUAD;
51221         ctlr->dev.of_node = np;
51222 +       ctlr->auto_runtime_pm = true;
51224         ret = devm_spi_register_controller(&pdev->dev, ctlr);
51225         if (ret) {
51226 @@ -1194,9 +1188,13 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
51227                 goto clk_dis_all;
51228         }
51230 +       pm_runtime_mark_last_busy(&pdev->dev);
51231 +       pm_runtime_put_autosuspend(&pdev->dev);
51233         return 0;
51235  clk_dis_all:
51236 +       pm_runtime_put_sync(&pdev->dev);
51237         pm_runtime_set_suspended(&pdev->dev);
51238         pm_runtime_disable(&pdev->dev);
51239         clk_disable_unprepare(xqspi->refclk);
51240 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
51241 index b08efe88ccd6..8da4fe475b84 100644
51242 --- a/drivers/spi/spi.c
51243 +++ b/drivers/spi/spi.c
51244 @@ -795,7 +795,7 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
51246  /*-------------------------------------------------------------------------*/
51248 -static void spi_set_cs(struct spi_device *spi, bool enable)
51249 +static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
51251         bool enable1 = enable;
51253 @@ -803,7 +803,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
51254          * Avoid calling into the driver (or doing delays) if the chip select
51255          * isn't actually changing from the last time this was called.
51256          */
51257 -       if ((spi->controller->last_cs_enable == enable) &&
51258 +       if (!force && (spi->controller->last_cs_enable == enable) &&
51259             (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
51260                 return;
51262 @@ -1253,7 +1253,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
51263         struct spi_statistics *statm = &ctlr->statistics;
51264         struct spi_statistics *stats = &msg->spi->statistics;
51266 -       spi_set_cs(msg->spi, true);
51267 +       spi_set_cs(msg->spi, true, false);
51269         SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
51270         SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
51271 @@ -1321,9 +1321,9 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
51272                                          &msg->transfers)) {
51273                                 keep_cs = true;
51274                         } else {
51275 -                               spi_set_cs(msg->spi, false);
51276 +                               spi_set_cs(msg->spi, false, false);
51277                                 _spi_transfer_cs_change_delay(msg, xfer);
51278 -                               spi_set_cs(msg->spi, true);
51279 +                               spi_set_cs(msg->spi, true, false);
51280                         }
51281                 }
51283 @@ -1332,7 +1332,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
51285  out:
51286         if (ret != 0 || !keep_cs)
51287 -               spi_set_cs(msg->spi, false);
51288 +               spi_set_cs(msg->spi, false, false);
51290         if (msg->status == -EINPROGRESS)
51291                 msg->status = ret;
51292 @@ -2496,6 +2496,7 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
51294         ctlr = __spi_alloc_controller(dev, size, slave);
51295         if (ctlr) {
51296 +               ctlr->devm_allocated = true;
51297                 *ptr = ctlr;
51298                 devres_add(dev, ptr);
51299         } else {
51300 @@ -2842,11 +2843,6 @@ int devm_spi_register_controller(struct device *dev,
51302  EXPORT_SYMBOL_GPL(devm_spi_register_controller);
51304 -static int devm_spi_match_controller(struct device *dev, void *res, void *ctlr)
51306 -       return *(struct spi_controller **)res == ctlr;
51309  static int __unregister(struct device *dev, void *null)
51311         spi_unregister_device(to_spi_device(dev));
51312 @@ -2893,8 +2889,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
51313         /* Release the last reference on the controller if its driver
51314          * has not yet been converted to devm_spi_alloc_master/slave().
51315          */
51316 -       if (!devres_find(ctlr->dev.parent, devm_spi_release_controller,
51317 -                        devm_spi_match_controller, ctlr))
51318 +       if (!ctlr->devm_allocated)
51319                 put_device(&ctlr->dev);
51321         /* free bus id */
51322 @@ -3423,11 +3418,11 @@ int spi_setup(struct spi_device *spi)
51323                  */
51324                 status = 0;
51326 -               spi_set_cs(spi, false);
51327 +               spi_set_cs(spi, false, true);
51328                 pm_runtime_mark_last_busy(spi->controller->dev.parent);
51329                 pm_runtime_put_autosuspend(spi->controller->dev.parent);
51330         } else {
51331 -               spi_set_cs(spi, false);
51332 +               spi_set_cs(spi, false, true);
51333         }
51335         mutex_unlock(&spi->controller->io_mutex);
51336 diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
51337 index 70498adb1575..5c35653ed36d 100644
51338 --- a/drivers/staging/android/Kconfig
51339 +++ b/drivers/staging/android/Kconfig
51340 @@ -4,7 +4,7 @@ menu "Android"
51341  if ANDROID
51343  config ASHMEM
51344 -       bool "Enable the Anonymous Shared Memory Subsystem"
51345 +       tristate "Enable the Anonymous Shared Memory Subsystem"
51346         depends on SHMEM
51347         help
51348           The ashmem subsystem is a new shared memory allocator, similar to
51349 diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
51350 index e9a55a5e6529..3d794218dd4b 100644
51351 --- a/drivers/staging/android/Makefile
51352 +++ b/drivers/staging/android/Makefile
51353 @@ -1,4 +1,5 @@
51354  # SPDX-License-Identifier: GPL-2.0
51355  ccflags-y += -I$(src)                  # needed for trace events
51357 -obj-$(CONFIG_ASHMEM)                   += ashmem.o
51358 +ashmem_linux-y                         += ashmem.o
51359 +obj-$(CONFIG_ASHMEM)                   += ashmem_linux.o
51360 diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
51361 index d66a64e42273..e28d9a2ce7f1 100644
51362 --- a/drivers/staging/android/ashmem.c
51363 +++ b/drivers/staging/android/ashmem.c
51364 @@ -19,6 +19,7 @@
51365  #include <linux/security.h>
51366  #include <linux/mm.h>
51367  #include <linux/mman.h>
51368 +#include <linux/module.h>
51369  #include <linux/uaccess.h>
51370  #include <linux/personality.h>
51371  #include <linux/bitops.h>
51372 @@ -964,4 +965,18 @@ static int __init ashmem_init(void)
51373  out:
51374         return ret;
51376 -device_initcall(ashmem_init);
51378 +static void __exit ashmem_exit(void)
51380 +       misc_deregister(&ashmem_misc);
51381 +       unregister_shrinker(&ashmem_shrinker);
51382 +       kmem_cache_destroy(ashmem_range_cachep);
51383 +       kmem_cache_destroy(ashmem_area_cachep);
51386 +module_init(ashmem_init);
51387 +module_exit(ashmem_exit);
51389 +MODULE_AUTHOR("Google, Inc.");
51390 +MODULE_DESCRIPTION("Driver for Android shared memory device");
51391 +MODULE_LICENSE("GPL v2");
51392 diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
51393 index 4f80a4991f95..c164c8524909 100644
51394 --- a/drivers/staging/comedi/drivers/ni_mio_common.c
51395 +++ b/drivers/staging/comedi/drivers/ni_mio_common.c
51396 @@ -4747,7 +4747,7 @@ static int cs5529_wait_for_idle(struct comedi_device *dev)
51397                 if ((status & NI67XX_CAL_STATUS_BUSY) == 0)
51398                         break;
51399                 set_current_state(TASK_INTERRUPTIBLE);
51400 -               if (schedule_timeout(1))
51401 +               if (schedule_min_hrtimeout())
51402                         return -EIO;
51403         }
51404         if (i == timeout) {
51405 diff --git a/drivers/staging/comedi/drivers/tests/ni_routes_test.c b/drivers/staging/comedi/drivers/tests/ni_routes_test.c
51406 index 4061b3b5f8e9..68defeb53de4 100644
51407 --- a/drivers/staging/comedi/drivers/tests/ni_routes_test.c
51408 +++ b/drivers/staging/comedi/drivers/tests/ni_routes_test.c
51409 @@ -217,7 +217,8 @@ void test_ni_assign_device_routes(void)
51410         const u8 *table, *oldtable;
51412         init_pci_6070e();
51413 -       ni_assign_device_routes(ni_eseries, pci_6070e, &private.routing_tables);
51414 +       ni_assign_device_routes(ni_eseries, pci_6070e, NULL,
51415 +                               &private.routing_tables);
51416         devroutes = private.routing_tables.valid_routes;
51417         table = private.routing_tables.route_values;
51419 @@ -253,7 +254,8 @@ void test_ni_assign_device_routes(void)
51420         olddevroutes = devroutes;
51421         oldtable = table;
51422         init_pci_6220();
51423 -       ni_assign_device_routes(ni_mseries, pci_6220, &private.routing_tables);
51424 +       ni_assign_device_routes(ni_mseries, pci_6220, NULL,
51425 +                               &private.routing_tables);
51426         devroutes = private.routing_tables.valid_routes;
51427         table = private.routing_tables.route_values;
51429 diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
51430 index c368082aae1a..0f4655d7d520 100644
51431 --- a/drivers/staging/fwserial/fwserial.c
51432 +++ b/drivers/staging/fwserial/fwserial.c
51433 @@ -1218,13 +1218,12 @@ static int get_serial_info(struct tty_struct *tty,
51434         struct fwtty_port *port = tty->driver_data;
51436         mutex_lock(&port->port.mutex);
51437 -       ss->type =  PORT_UNKNOWN;
51438 -       ss->line =  port->port.tty->index;
51439 -       ss->flags = port->port.flags;
51440 -       ss->xmit_fifo_size = FWTTY_PORT_TXFIFO_LEN;
51441 +       ss->line = port->index;
51442         ss->baud_base = 400000000;
51443 -       ss->close_delay = port->port.close_delay;
51444 +       ss->close_delay = jiffies_to_msecs(port->port.close_delay) / 10;
51445 +       ss->closing_wait = 3000;
51446         mutex_unlock(&port->port.mutex);
51448         return 0;
51451 @@ -1232,20 +1231,20 @@ static int set_serial_info(struct tty_struct *tty,
51452                            struct serial_struct *ss)
51454         struct fwtty_port *port = tty->driver_data;
51455 +       unsigned int cdelay;
51457 -       if (ss->irq != 0 || ss->port != 0 || ss->custom_divisor != 0 ||
51458 -           ss->baud_base != 400000000)
51459 -               return -EPERM;
51460 +       cdelay = msecs_to_jiffies(ss->close_delay * 10);
51462         mutex_lock(&port->port.mutex);
51463         if (!capable(CAP_SYS_ADMIN)) {
51464 -               if (((ss->flags & ~ASYNC_USR_MASK) !=
51465 +               if (cdelay != port->port.close_delay ||
51466 +                   ((ss->flags & ~ASYNC_USR_MASK) !=
51467                      (port->port.flags & ~ASYNC_USR_MASK))) {
51468                         mutex_unlock(&port->port.mutex);
51469                         return -EPERM;
51470                 }
51471         }
51472 -       port->port.close_delay = ss->close_delay * HZ / 100;
51473 +       port->port.close_delay = cdelay;
51474         mutex_unlock(&port->port.mutex);
51476         return 0;
51477 diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
51478 index 607378bfebb7..a520f7f213db 100644
51479 --- a/drivers/staging/greybus/uart.c
51480 +++ b/drivers/staging/greybus/uart.c
51481 @@ -614,10 +614,12 @@ static int get_serial_info(struct tty_struct *tty,
51482         ss->line = gb_tty->minor;
51483         ss->xmit_fifo_size = 16;
51484         ss->baud_base = 9600;
51485 -       ss->close_delay = gb_tty->port.close_delay / 10;
51486 +       ss->close_delay = jiffies_to_msecs(gb_tty->port.close_delay) / 10;
51487         ss->closing_wait =
51488                 gb_tty->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
51489 -               ASYNC_CLOSING_WAIT_NONE : gb_tty->port.closing_wait / 10;
51490 +               ASYNC_CLOSING_WAIT_NONE :
51491 +               jiffies_to_msecs(gb_tty->port.closing_wait) / 10;
51493         return 0;
51496 @@ -629,17 +631,16 @@ static int set_serial_info(struct tty_struct *tty,
51497         unsigned int close_delay;
51498         int retval = 0;
51500 -       close_delay = ss->close_delay * 10;
51501 +       close_delay = msecs_to_jiffies(ss->close_delay * 10);
51502         closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
51503 -                       ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
51504 +                       ASYNC_CLOSING_WAIT_NONE :
51505 +                       msecs_to_jiffies(ss->closing_wait * 10);
51507         mutex_lock(&gb_tty->port.mutex);
51508         if (!capable(CAP_SYS_ADMIN)) {
51509                 if ((close_delay != gb_tty->port.close_delay) ||
51510                     (closing_wait != gb_tty->port.closing_wait))
51511                         retval = -EPERM;
51512 -               else
51513 -                       retval = -EOPNOTSUPP;
51514         } else {
51515                 gb_tty->port.close_delay = close_delay;
51516                 gb_tty->port.closing_wait = closing_wait;
51517 diff --git a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
51518 index 7ca7378b1859..0ab67b2aec67 100644
51519 --- a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
51520 +++ b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
51521 @@ -843,8 +843,10 @@ static int lm3554_probe(struct i2c_client *client)
51522                 return -ENOMEM;
51524         flash->pdata = lm3554_platform_data_func(client);
51525 -       if (IS_ERR(flash->pdata))
51526 -               return PTR_ERR(flash->pdata);
51527 +       if (IS_ERR(flash->pdata)) {
51528 +               err = PTR_ERR(flash->pdata);
51529 +               goto fail1;
51530 +       }
51532         v4l2_i2c_subdev_init(&flash->sd, client, &lm3554_ops);
51533         flash->sd.internal_ops = &lm3554_internal_ops;
51534 @@ -856,7 +858,7 @@ static int lm3554_probe(struct i2c_client *client)
51535                                    ARRAY_SIZE(lm3554_controls));
51536         if (ret) {
51537                 dev_err(&client->dev, "error initialize a ctrl_handler.\n");
51538 -               goto fail2;
51539 +               goto fail3;
51540         }
51542         for (i = 0; i < ARRAY_SIZE(lm3554_controls); i++)
51543 @@ -865,14 +867,14 @@ static int lm3554_probe(struct i2c_client *client)
51545         if (flash->ctrl_handler.error) {
51546                 dev_err(&client->dev, "ctrl_handler error.\n");
51547 -               goto fail2;
51548 +               goto fail3;
51549         }
51551         flash->sd.ctrl_handler = &flash->ctrl_handler;
51552         err = media_entity_pads_init(&flash->sd.entity, 0, NULL);
51553         if (err) {
51554                 dev_err(&client->dev, "error initialize a media entity.\n");
51555 -               goto fail1;
51556 +               goto fail2;
51557         }
51559         flash->sd.entity.function = MEDIA_ENT_F_FLASH;
51560 @@ -884,14 +886,15 @@ static int lm3554_probe(struct i2c_client *client)
51561         err = lm3554_gpio_init(client);
51562         if (err) {
51563                 dev_err(&client->dev, "gpio request/direction_output fail");
51564 -               goto fail2;
51565 +               goto fail3;
51566         }
51567         return atomisp_register_i2c_module(&flash->sd, NULL, LED_FLASH);
51568 -fail2:
51569 +fail3:
51570         media_entity_cleanup(&flash->sd.entity);
51571         v4l2_ctrl_handler_free(&flash->ctrl_handler);
51572 -fail1:
51573 +fail2:
51574         v4l2_device_unregister_subdev(&flash->sd);
51575 +fail1:
51576         kfree(flash);
51578         return err;
51579 diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
51580 index 453bb6913550..f1e6b2597853 100644
51581 --- a/drivers/staging/media/atomisp/pci/atomisp_fops.c
51582 +++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
51583 @@ -221,6 +221,9 @@ int atomisp_q_video_buffers_to_css(struct atomisp_sub_device *asd,
51584         unsigned long irqflags;
51585         int err = 0;
51587 +       if (WARN_ON(css_pipe_id >= IA_CSS_PIPE_ID_NUM))
51588 +               return -EINVAL;
51590         while (pipe->buffers_in_css < ATOMISP_CSS_Q_DEPTH) {
51591                 struct videobuf_buffer *vb;
51593 diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
51594 index 2ae50decfc8b..9da82855552d 100644
51595 --- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
51596 +++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
51597 @@ -948,10 +948,8 @@ int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
51598                 dev_dbg(isp->dev, "allocating %d dis buffers\n", count);
51599                 while (count--) {
51600                         dis_buf = kzalloc(sizeof(struct atomisp_dis_buf), GFP_KERNEL);
51601 -                       if (!dis_buf) {
51602 -                               kfree(s3a_buf);
51603 +                       if (!dis_buf)
51604                                 goto error;
51605 -                       }
51606                         if (atomisp_css_allocate_stat_buffers(
51607                                 asd, stream_id, NULL, dis_buf, NULL)) {
51608                                 kfree(dis_buf);
51609 diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
51610 index f13af2329f48..0168f9839c90 100644
51611 --- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
51612 +++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
51613 @@ -857,16 +857,17 @@ static void free_private_pages(struct hmm_buffer_object *bo,
51614         kfree(bo->page_obj);
51617 -static void free_user_pages(struct hmm_buffer_object *bo)
51618 +static void free_user_pages(struct hmm_buffer_object *bo,
51619 +                           unsigned int page_nr)
51621         int i;
51623         hmm_mem_stat.usr_size -= bo->pgnr;
51625         if (bo->mem_type == HMM_BO_MEM_TYPE_PFN) {
51626 -               unpin_user_pages(bo->pages, bo->pgnr);
51627 +               unpin_user_pages(bo->pages, page_nr);
51628         } else {
51629 -               for (i = 0; i < bo->pgnr; i++)
51630 +               for (i = 0; i < page_nr; i++)
51631                         put_page(bo->pages[i]);
51632         }
51633         kfree(bo->pages);
51634 @@ -942,6 +943,8 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
51635                 dev_err(atomisp_dev,
51636                         "get_user_pages err: bo->pgnr = %d, pgnr actually pinned = %d.\n",
51637                         bo->pgnr, page_nr);
51638 +               if (page_nr < 0)
51639 +                       page_nr = 0;
51640                 goto out_of_mem;
51641         }
51643 @@ -954,7 +957,7 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
51645  out_of_mem:
51647 -       free_user_pages(bo);
51648 +       free_user_pages(bo, page_nr);
51650         return -ENOMEM;
51652 @@ -1037,7 +1040,7 @@ void hmm_bo_free_pages(struct hmm_buffer_object *bo)
51653         if (bo->type == HMM_BO_PRIVATE)
51654                 free_private_pages(bo, &dynamic_pool, &reserved_pool);
51655         else if (bo->type == HMM_BO_USER)
51656 -               free_user_pages(bo);
51657 +               free_user_pages(bo, bo->pgnr);
51658         else
51659                 dev_err(atomisp_dev, "invalid buffer type.\n");
51660         mutex_unlock(&bo->mutex);
51661 diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
51662 index e10ce103a5b4..94a0467d673b 100644
51663 --- a/drivers/staging/media/imx/imx-media-capture.c
51664 +++ b/drivers/staging/media/imx/imx-media-capture.c
51665 @@ -557,7 +557,7 @@ static int capture_validate_fmt(struct capture_priv *priv)
51666                 priv->vdev.fmt.fmt.pix.height != f.fmt.pix.height ||
51667                 priv->vdev.cc->cs != cc->cs ||
51668                 priv->vdev.compose.width != compose.width ||
51669 -               priv->vdev.compose.height != compose.height) ? -EINVAL : 0;
51670 +               priv->vdev.compose.height != compose.height) ? -EPIPE : 0;
51673  static int capture_start_streaming(struct vb2_queue *vq, unsigned int count)
51674 diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
51675 index 60aa02eb7d2a..6d9c49b39531 100644
51676 --- a/drivers/staging/media/ipu3/ipu3-v4l2.c
51677 +++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
51678 @@ -686,6 +686,7 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
51680         dev_dbg(dev, "IPU3 pipe %u pipe_id = %u", pipe, css_pipe->pipe_id);
51682 +       css_q = imgu_node_to_queue(node);
51683         for (i = 0; i < IPU3_CSS_QUEUES; i++) {
51684                 unsigned int inode = imgu_map_node(imgu, i);
51686 @@ -693,6 +694,18 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
51687                 if (inode == IMGU_NODE_STAT_3A || inode == IMGU_NODE_PARAMS)
51688                         continue;
51690 +               /* CSS expects some format on OUT queue */
51691 +               if (i != IPU3_CSS_QUEUE_OUT &&
51692 +                   !imgu_pipe->nodes[inode].enabled) {
51693 +                       fmts[i] = NULL;
51694 +                       continue;
51695 +               }
51697 +               if (i == css_q) {
51698 +                       fmts[i] = &f->fmt.pix_mp;
51699 +                       continue;
51700 +               }
51702                 if (try) {
51703                         fmts[i] = kmemdup(&imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp,
51704                                           sizeof(struct v4l2_pix_format_mplane),
51705 @@ -705,10 +718,6 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
51706                         fmts[i] = &imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp;
51707                 }
51709 -               /* CSS expects some format on OUT queue */
51710 -               if (i != IPU3_CSS_QUEUE_OUT &&
51711 -                   !imgu_pipe->nodes[inode].enabled)
51712 -                       fmts[i] = NULL;
51713         }
51715         if (!try) {
51716 @@ -725,16 +734,10 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
51717                 rects[IPU3_CSS_RECT_GDC]->height = pad_fmt.height;
51718         }
51720 -       /*
51721 -        * imgu doesn't set the node to the value given by user
51722 -        * before we return success from this function, so set it here.
51723 -        */
51724 -       css_q = imgu_node_to_queue(node);
51725         if (!fmts[css_q]) {
51726                 ret = -EINVAL;
51727                 goto out;
51728         }
51729 -       *fmts[css_q] = f->fmt.pix_mp;
51731         if (try)
51732                 ret = imgu_css_fmt_try(&imgu->css, fmts, rects, pipe);
51733 @@ -745,15 +748,18 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
51734         if (ret < 0)
51735                 goto out;
51737 -       if (try)
51738 -               f->fmt.pix_mp = *fmts[css_q];
51739 -       else
51740 -               f->fmt = imgu_pipe->nodes[node].vdev_fmt.fmt;
51741 +       /*
51742 +        * imgu doesn't set the node to the value given by user
51743 +        * before we return success from this function, so set it here.
51744 +        */
51745 +       if (!try)
51746 +               imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp = f->fmt.pix_mp;
51748  out:
51749         if (try) {
51750                 for (i = 0; i < IPU3_CSS_QUEUES; i++)
51751 -                       kfree(fmts[i]);
51752 +                       if (i != css_q)
51753 +                               kfree(fmts[i]);
51754         }
51756         return ret;
51757 diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
51758 index dae9073e7d3c..085397045b36 100644
51759 --- a/drivers/staging/media/omap4iss/iss.c
51760 +++ b/drivers/staging/media/omap4iss/iss.c
51761 @@ -1236,8 +1236,10 @@ static int iss_probe(struct platform_device *pdev)
51762         if (ret < 0)
51763                 goto error;
51765 -       if (!omap4iss_get(iss))
51766 +       if (!omap4iss_get(iss)) {
51767 +               ret = -EINVAL;
51768                 goto error;
51769 +       }
51771         ret = iss_reset(iss);
51772         if (ret < 0)
51773 diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
51774 index d3eb81ee8dc2..d821661d30f3 100644
51775 --- a/drivers/staging/media/rkvdec/rkvdec.c
51776 +++ b/drivers/staging/media/rkvdec/rkvdec.c
51777 @@ -55,16 +55,13 @@ static const struct v4l2_ctrl_ops rkvdec_ctrl_ops = {
51779  static const struct rkvdec_ctrl_desc rkvdec_h264_ctrl_descs[] = {
51780         {
51781 -               .mandatory = true,
51782                 .cfg.id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
51783         },
51784         {
51785 -               .mandatory = true,
51786                 .cfg.id = V4L2_CID_STATELESS_H264_SPS,
51787                 .cfg.ops = &rkvdec_ctrl_ops,
51788         },
51789         {
51790 -               .mandatory = true,
51791                 .cfg.id = V4L2_CID_STATELESS_H264_PPS,
51792         },
51793         {
51794 @@ -585,25 +582,7 @@ static const struct vb2_ops rkvdec_queue_ops = {
51796  static int rkvdec_request_validate(struct media_request *req)
51798 -       struct media_request_object *obj;
51799 -       const struct rkvdec_ctrls *ctrls;
51800 -       struct v4l2_ctrl_handler *hdl;
51801 -       struct rkvdec_ctx *ctx = NULL;
51802 -       unsigned int count, i;
51803 -       int ret;
51805 -       list_for_each_entry(obj, &req->objects, list) {
51806 -               if (vb2_request_object_is_buffer(obj)) {
51807 -                       struct vb2_buffer *vb;
51809 -                       vb = container_of(obj, struct vb2_buffer, req_obj);
51810 -                       ctx = vb2_get_drv_priv(vb->vb2_queue);
51811 -                       break;
51812 -               }
51813 -       }
51815 -       if (!ctx)
51816 -               return -EINVAL;
51817 +       unsigned int count;
51819         count = vb2_request_buffer_cnt(req);
51820         if (!count)
51821 @@ -611,31 +590,6 @@ static int rkvdec_request_validate(struct media_request *req)
51822         else if (count > 1)
51823                 return -EINVAL;
51825 -       hdl = v4l2_ctrl_request_hdl_find(req, &ctx->ctrl_hdl);
51826 -       if (!hdl)
51827 -               return -ENOENT;
51829 -       ret = 0;
51830 -       ctrls = ctx->coded_fmt_desc->ctrls;
51831 -       for (i = 0; ctrls && i < ctrls->num_ctrls; i++) {
51832 -               u32 id = ctrls->ctrls[i].cfg.id;
51833 -               struct v4l2_ctrl *ctrl;
51835 -               if (!ctrls->ctrls[i].mandatory)
51836 -                       continue;
51838 -               ctrl = v4l2_ctrl_request_hdl_ctrl_find(hdl, id);
51839 -               if (!ctrl) {
51840 -                       ret = -ENOENT;
51841 -                       break;
51842 -               }
51843 -       }
51845 -       v4l2_ctrl_request_hdl_put(hdl);
51847 -       if (ret)
51848 -               return ret;
51850         return vb2_request_validate(req);
51853 @@ -1118,7 +1072,7 @@ static struct platform_driver rkvdec_driver = {
51854         .remove = rkvdec_remove,
51855         .driver = {
51856                    .name = "rkvdec",
51857 -                  .of_match_table = of_match_ptr(of_rkvdec_match),
51858 +                  .of_match_table = of_rkvdec_match,
51859                    .pm = &rkvdec_pm_ops,
51860         },
51861  };
51862 diff --git a/drivers/staging/media/rkvdec/rkvdec.h b/drivers/staging/media/rkvdec/rkvdec.h
51863 index 77a137cca88e..52ac3874c5e5 100644
51864 --- a/drivers/staging/media/rkvdec/rkvdec.h
51865 +++ b/drivers/staging/media/rkvdec/rkvdec.h
51866 @@ -25,7 +25,6 @@
51867  struct rkvdec_ctx;
51869  struct rkvdec_ctrl_desc {
51870 -       u32 mandatory : 1;
51871         struct v4l2_ctrl_config cfg;
51872  };
51874 diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
51875 index 7718c561823f..92ace87c1c7d 100644
51876 --- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
51877 +++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
51878 @@ -443,16 +443,17 @@
51879  #define VE_DEC_H265_STATUS_STCD_BUSY           BIT(21)
51880  #define VE_DEC_H265_STATUS_WB_BUSY             BIT(20)
51881  #define VE_DEC_H265_STATUS_BS_DMA_BUSY         BIT(19)
51882 -#define VE_DEC_H265_STATUS_IQIT_BUSY           BIT(18)
51883 +#define VE_DEC_H265_STATUS_IT_BUSY             BIT(18)
51884  #define VE_DEC_H265_STATUS_INTER_BUSY          BIT(17)
51885  #define VE_DEC_H265_STATUS_MORE_DATA           BIT(16)
51886 -#define VE_DEC_H265_STATUS_VLD_BUSY            BIT(14)
51887 -#define VE_DEC_H265_STATUS_DEBLOCKING_BUSY     BIT(13)
51888 -#define VE_DEC_H265_STATUS_DEBLOCKING_DRAM_BUSY        BIT(12)
51889 -#define VE_DEC_H265_STATUS_INTRA_BUSY          BIT(11)
51890 -#define VE_DEC_H265_STATUS_SAO_BUSY            BIT(10)
51891 -#define VE_DEC_H265_STATUS_MVP_BUSY            BIT(9)
51892 -#define VE_DEC_H265_STATUS_SWDEC_BUSY          BIT(8)
51893 +#define VE_DEC_H265_STATUS_DBLK_BUSY           BIT(15)
51894 +#define VE_DEC_H265_STATUS_IREC_BUSY           BIT(14)
51895 +#define VE_DEC_H265_STATUS_INTRA_BUSY          BIT(13)
51896 +#define VE_DEC_H265_STATUS_MCRI_BUSY           BIT(12)
51897 +#define VE_DEC_H265_STATUS_IQIT_BUSY           BIT(11)
51898 +#define VE_DEC_H265_STATUS_MVP_BUSY            BIT(10)
51899 +#define VE_DEC_H265_STATUS_IS_BUSY             BIT(9)
51900 +#define VE_DEC_H265_STATUS_VLD_BUSY            BIT(8)
51901  #define VE_DEC_H265_STATUS_OVER_TIME           BIT(3)
51902  #define VE_DEC_H265_STATUS_VLD_DATA_REQ                BIT(2)
51903  #define VE_DEC_H265_STATUS_ERROR               BIT(1)
51904 diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
51905 index 5516be3af898..c1d52190e1bd 100644
51906 --- a/drivers/staging/qlge/qlge_main.c
51907 +++ b/drivers/staging/qlge/qlge_main.c
51908 @@ -4550,7 +4550,7 @@ static int qlge_probe(struct pci_dev *pdev,
51909         struct net_device *ndev = NULL;
51910         struct devlink *devlink;
51911         static int cards_found;
51912 -       int err = 0;
51913 +       int err;
51915         devlink = devlink_alloc(&qlge_devlink_ops, sizeof(struct qlge_adapter));
51916         if (!devlink)
51917 @@ -4561,8 +4561,10 @@ static int qlge_probe(struct pci_dev *pdev,
51918         ndev = alloc_etherdev_mq(sizeof(struct qlge_netdev_priv),
51919                                  min(MAX_CPUS,
51920                                      netif_get_num_default_rss_queues()));
51921 -       if (!ndev)
51922 +       if (!ndev) {
51923 +               err = -ENOMEM;
51924                 goto devlink_free;
51925 +       }
51927         ndev_priv = netdev_priv(ndev);
51928         ndev_priv->qdev = qdev;
51929 diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
51930 index 9fc4adc83d77..b5a313649f44 100644
51931 --- a/drivers/staging/rtl8192u/r8192U_core.c
51932 +++ b/drivers/staging/rtl8192u/r8192U_core.c
51933 @@ -3210,7 +3210,7 @@ static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
51934                              u32 *TotalRxDataNum)
51936         u16                     SlotIndex;
51937 -       u8                      i;
51938 +       u16                     i;
51940         *TotalRxBcnNum = 0;
51941         *TotalRxDataNum = 0;
51942 diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
51943 index 898add4d1fc8..0aa9dd467349 100644
51944 --- a/drivers/staging/rts5208/rtsx.c
51945 +++ b/drivers/staging/rts5208/rtsx.c
51946 @@ -477,7 +477,7 @@ static int rtsx_polling_thread(void *__dev)
51948         for (;;) {
51949                 set_current_state(TASK_INTERRUPTIBLE);
51950 -               schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
51951 +               schedule_msec_hrtimeout((POLLING_INTERVAL));
51953                 /* lock the device pointers */
51954                 mutex_lock(&dev->dev_mutex);
51955 diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
51956 index 0433536930a9..d8726f28843f 100644
51957 --- a/drivers/staging/unisys/visornic/visornic_main.c
51958 +++ b/drivers/staging/unisys/visornic/visornic_main.c
51959 @@ -549,7 +549,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev,
51960                 }
51961                 set_current_state(TASK_INTERRUPTIBLE);
51962                 spin_unlock_irqrestore(&devdata->priv_lock, flags);
51963 -               wait += schedule_timeout(msecs_to_jiffies(10));
51964 +               wait += schedule_msec_hrtimeout((10));
51965                 spin_lock_irqsave(&devdata->priv_lock, flags);
51966         }
51968 @@ -560,7 +560,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev,
51969                 while (1) {
51970                         set_current_state(TASK_INTERRUPTIBLE);
51971                         spin_unlock_irqrestore(&devdata->priv_lock, flags);
51972 -                       schedule_timeout(msecs_to_jiffies(10));
51973 +                       schedule_msec_hrtimeout((10));
51974                         spin_lock_irqsave(&devdata->priv_lock, flags);
51975                         if (atomic_read(&devdata->usage))
51976                                 break;
51977 @@ -714,7 +714,7 @@ static int visornic_enable_with_timeout(struct net_device *netdev,
51978                 }
51979                 set_current_state(TASK_INTERRUPTIBLE);
51980                 spin_unlock_irqrestore(&devdata->priv_lock, flags);
51981 -               wait += schedule_timeout(msecs_to_jiffies(10));
51982 +               wait += schedule_msec_hrtimeout((10));
51983                 spin_lock_irqsave(&devdata->priv_lock, flags);
51984         }
51986 diff --git a/drivers/staging/wimax/i2400m/op-rfkill.c b/drivers/staging/wimax/i2400m/op-rfkill.c
51987 index fbddf2e18c14..44698a1aae87 100644
51988 --- a/drivers/staging/wimax/i2400m/op-rfkill.c
51989 +++ b/drivers/staging/wimax/i2400m/op-rfkill.c
51990 @@ -86,7 +86,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
51991         if (cmd == NULL)
51992                 goto error_alloc;
51993         cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_RF_CONTROL);
51994 -       cmd->hdr.length = sizeof(cmd->sw_rf);
51995 +       cmd->hdr.length = cpu_to_le16(sizeof(cmd->sw_rf));
51996         cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
51997         cmd->sw_rf.hdr.type = cpu_to_le16(I2400M_TLV_RF_OPERATION);
51998         cmd->sw_rf.hdr.length = cpu_to_le16(sizeof(cmd->sw_rf.status));
51999 diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
52000 index 9ee797b8cb7e..508b49b0eaf5 100644
52001 --- a/drivers/target/target_core_pscsi.c
52002 +++ b/drivers/target/target_core_pscsi.c
52003 @@ -620,8 +620,9 @@ static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
52004                         unsigned char *buf;
52006                         buf = transport_kmap_data_sg(cmd);
52007 -                       if (!buf)
52008 +                       if (!buf) {
52009                                 ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
52010 +                       }
52012                         if (cdb[0] == MODE_SENSE_10) {
52013                                 if (!(buf[3] & 0x80))
52014 diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
52015 index 319a1e701163..ddb8f9ecf307 100644
52016 --- a/drivers/tee/optee/core.c
52017 +++ b/drivers/tee/optee/core.c
52018 @@ -79,16 +79,6 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
52019                                 return rc;
52020                         p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
52021                         p->u.memref.shm = shm;
52023 -                       /* Check that the memref is covered by the shm object */
52024 -                       if (p->u.memref.size) {
52025 -                               size_t o = p->u.memref.shm_offs +
52026 -                                          p->u.memref.size - 1;
52028 -                               rc = tee_shm_get_pa(shm, o, NULL);
52029 -                               if (rc)
52030 -                                       return rc;
52031 -                       }
52032                         break;
52033                 case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
52034                 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
52035 diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
52036 index 10af3341e5ea..6956581ed7a4 100644
52037 --- a/drivers/thermal/cpufreq_cooling.c
52038 +++ b/drivers/thermal/cpufreq_cooling.c
52039 @@ -125,7 +125,7 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
52041         int i;
52043 -       for (i = cpufreq_cdev->max_level; i >= 0; i--) {
52044 +       for (i = cpufreq_cdev->max_level; i > 0; i--) {
52045                 if (power >= cpufreq_cdev->em->table[i].power)
52046                         break;
52047         }
52048 diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
52049 index aaa07180ab48..645432ce6365 100644
52050 --- a/drivers/thermal/gov_fair_share.c
52051 +++ b/drivers/thermal/gov_fair_share.c
52052 @@ -82,6 +82,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
52053         int total_instance = 0;
52054         int cur_trip_level = get_trip_level(tz);
52056 +       mutex_lock(&tz->lock);
52058         list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
52059                 if (instance->trip != trip)
52060                         continue;
52061 @@ -110,6 +112,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
52062                 mutex_unlock(&instance->cdev->lock);
52063                 thermal_cdev_update(cdev);
52064         }
52066 +       mutex_unlock(&tz->lock);
52067         return 0;
52070 diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
52071 index d8ce3a687b80..3c4c0516e58a 100644
52072 --- a/drivers/thermal/qcom/tsens.c
52073 +++ b/drivers/thermal/qcom/tsens.c
52074 @@ -755,8 +755,10 @@ int __init init_common(struct tsens_priv *priv)
52075                 for (i = VER_MAJOR; i <= VER_STEP; i++) {
52076                         priv->rf[i] = devm_regmap_field_alloc(dev, priv->srot_map,
52077                                                               priv->fields[i]);
52078 -                       if (IS_ERR(priv->rf[i]))
52079 -                               return PTR_ERR(priv->rf[i]);
52080 +                       if (IS_ERR(priv->rf[i])) {
52081 +                               ret = PTR_ERR(priv->rf[i]);
52082 +                               goto err_put_device;
52083 +                       }
52084                 }
52085                 ret = regmap_field_read(priv->rf[VER_MINOR], &ver_minor);
52086                 if (ret)
52087 diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
52088 index 69ef12f852b7..5b76f9a1280d 100644
52089 --- a/drivers/thermal/thermal_of.c
52090 +++ b/drivers/thermal/thermal_of.c
52091 @@ -704,14 +704,17 @@ static int thermal_of_populate_bind_params(struct device_node *np,
52093         count = of_count_phandle_with_args(np, "cooling-device",
52094                                            "#cooling-cells");
52095 -       if (!count) {
52096 +       if (count <= 0) {
52097                 pr_err("Add a cooling_device property with at least one device\n");
52098 +               ret = -ENOENT;
52099                 goto end;
52100         }
52102         __tcbp = kcalloc(count, sizeof(*__tcbp), GFP_KERNEL);
52103 -       if (!__tcbp)
52104 +       if (!__tcbp) {
52105 +               ret = -ENOMEM;
52106                 goto end;
52107 +       }
52109         for (i = 0; i < count; i++) {
52110                 ret = of_parse_phandle_with_args(np, "cooling-device",
52111 diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
52112 index 18b78ea110ef..ecda5e18d23f 100644
52113 --- a/drivers/tty/amiserial.c
52114 +++ b/drivers/tty/amiserial.c
52115 @@ -970,6 +970,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
52116         if (!serial_isroot()) {
52117                 if ((ss->baud_base != state->baud_base) ||
52118                     (ss->close_delay != port->close_delay) ||
52119 +                   (ss->closing_wait != port->closing_wait) ||
52120                     (ss->xmit_fifo_size != state->xmit_fifo_size) ||
52121                     ((ss->flags & ~ASYNC_USR_MASK) !=
52122                      (port->flags & ~ASYNC_USR_MASK))) {
52123 diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
52124 index 9f13f7d49dd7..f9f14104bd2c 100644
52125 --- a/drivers/tty/moxa.c
52126 +++ b/drivers/tty/moxa.c
52127 @@ -2040,7 +2040,7 @@ static int moxa_get_serial_info(struct tty_struct *tty,
52128         ss->line = info->port.tty->index,
52129         ss->flags = info->port.flags,
52130         ss->baud_base = 921600,
52131 -       ss->close_delay = info->port.close_delay;
52132 +       ss->close_delay = jiffies_to_msecs(info->port.close_delay) / 10;
52133         mutex_unlock(&info->port.mutex);
52134         return 0;
52136 @@ -2050,6 +2050,7 @@ static int moxa_set_serial_info(struct tty_struct *tty,
52137                 struct serial_struct *ss)
52139         struct moxa_port *info = tty->driver_data;
52140 +       unsigned int close_delay;
52142         if (tty->index == MAX_PORTS)
52143                 return -EINVAL;
52144 @@ -2061,19 +2062,24 @@ static int moxa_set_serial_info(struct tty_struct *tty,
52145                         ss->baud_base != 921600)
52146                 return -EPERM;
52148 +       close_delay = msecs_to_jiffies(ss->close_delay * 10);
52150         mutex_lock(&info->port.mutex);
52151         if (!capable(CAP_SYS_ADMIN)) {
52152 -               if (((ss->flags & ~ASYNC_USR_MASK) !=
52153 +               if (close_delay != info->port.close_delay ||
52154 +                   ss->type != info->type ||
52155 +                   ((ss->flags & ~ASYNC_USR_MASK) !=
52156                      (info->port.flags & ~ASYNC_USR_MASK))) {
52157                         mutex_unlock(&info->port.mutex);
52158                         return -EPERM;
52159                 }
52160 -       }
52161 -       info->port.close_delay = ss->close_delay * HZ / 100;
52162 +       } else {
52163 +               info->port.close_delay = close_delay;
52165 -       MoxaSetFifo(info, ss->type == PORT_16550A);
52166 +               MoxaSetFifo(info, ss->type == PORT_16550A);
52168 -       info->type = ss->type;
52169 +               info->type = ss->type;
52170 +       }
52171         mutex_unlock(&info->port.mutex);
52172         return 0;
52174 diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
52175 index 4203b64bccdb..2d8e76263a25 100644
52176 --- a/drivers/tty/mxser.c
52177 +++ b/drivers/tty/mxser.c
52178 @@ -1208,19 +1208,26 @@ static int mxser_get_serial_info(struct tty_struct *tty,
52180         struct mxser_port *info = tty->driver_data;
52181         struct tty_port *port = &info->port;
52182 +       unsigned int closing_wait, close_delay;
52184         if (tty->index == MXSER_PORTS)
52185                 return -ENOTTY;
52187         mutex_lock(&port->mutex);
52189 +       close_delay = jiffies_to_msecs(info->port.close_delay) / 10;
52190 +       closing_wait = info->port.closing_wait;
52191 +       if (closing_wait != ASYNC_CLOSING_WAIT_NONE)
52192 +               closing_wait = jiffies_to_msecs(closing_wait) / 10;
52194         ss->type = info->type,
52195         ss->line = tty->index,
52196         ss->port = info->ioaddr,
52197         ss->irq = info->board->irq,
52198         ss->flags = info->port.flags,
52199         ss->baud_base = info->baud_base,
52200 -       ss->close_delay = info->port.close_delay,
52201 -       ss->closing_wait = info->port.closing_wait,
52202 +       ss->close_delay = close_delay;
52203 +       ss->closing_wait = closing_wait;
52204         ss->custom_divisor = info->custom_divisor,
52205         mutex_unlock(&port->mutex);
52206         return 0;
52207 @@ -1233,7 +1240,7 @@ static int mxser_set_serial_info(struct tty_struct *tty,
52208         struct tty_port *port = &info->port;
52209         speed_t baud;
52210         unsigned long sl_flags;
52211 -       unsigned int flags;
52212 +       unsigned int flags, close_delay, closing_wait;
52213         int retval = 0;
52215         if (tty->index == MXSER_PORTS)
52216 @@ -1255,9 +1262,15 @@ static int mxser_set_serial_info(struct tty_struct *tty,
52218         flags = port->flags & ASYNC_SPD_MASK;
52220 +       close_delay = msecs_to_jiffies(ss->close_delay * 10);
52221 +       closing_wait = ss->closing_wait;
52222 +       if (closing_wait != ASYNC_CLOSING_WAIT_NONE)
52223 +               closing_wait = msecs_to_jiffies(closing_wait * 10);
52225         if (!capable(CAP_SYS_ADMIN)) {
52226                 if ((ss->baud_base != info->baud_base) ||
52227 -                               (ss->close_delay != info->port.close_delay) ||
52228 +                               (close_delay != info->port.close_delay) ||
52229 +                               (closing_wait != info->port.closing_wait) ||
52230                                 ((ss->flags & ~ASYNC_USR_MASK) != (info->port.flags & ~ASYNC_USR_MASK))) {
52231                         mutex_unlock(&port->mutex);
52232                         return -EPERM;
52233 @@ -1271,8 +1284,8 @@ static int mxser_set_serial_info(struct tty_struct *tty,
52234                  */
52235                 port->flags = ((port->flags & ~ASYNC_FLAGS) |
52236                                 (ss->flags & ASYNC_FLAGS));
52237 -               port->close_delay = ss->close_delay * HZ / 100;
52238 -               port->closing_wait = ss->closing_wait * HZ / 100;
52239 +               port->close_delay = close_delay;
52240 +               port->closing_wait = closing_wait;
52241                 if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST &&
52242                                 (ss->baud_base != info->baud_base ||
52243                                 ss->custom_divisor !=
52244 @@ -1284,11 +1297,11 @@ static int mxser_set_serial_info(struct tty_struct *tty,
52245                         baud = ss->baud_base / ss->custom_divisor;
52246                         tty_encode_baud_rate(tty, baud, baud);
52247                 }
52248 -       }
52250 -       info->type = ss->type;
52251 +               info->type = ss->type;
52253 -       process_txrx_fifo(info);
52254 +               process_txrx_fifo(info);
52255 +       }
52257         if (tty_port_initialized(port)) {
52258                 if (flags != (port->flags & ASYNC_SPD_MASK)) {
52259 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
52260 index 51dafc06f541..2406653d38b7 100644
52261 --- a/drivers/tty/n_gsm.c
52262 +++ b/drivers/tty/n_gsm.c
52263 @@ -2384,8 +2384,18 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
52264                 /* Don't register device 0 - this is the control channel and not
52265                    a usable tty interface */
52266                 base = mux_num_to_base(gsm); /* Base for this MUX */
52267 -               for (i = 1; i < NUM_DLCI; i++)
52268 -                       tty_register_device(gsm_tty_driver, base + i, NULL);
52269 +               for (i = 1; i < NUM_DLCI; i++) {
52270 +                       struct device *dev;
52272 +                       dev = tty_register_device(gsm_tty_driver,
52273 +                                                       base + i, NULL);
52274 +                       if (IS_ERR(dev)) {
52275 +                               for (i--; i >= 1; i--)
52276 +                                       tty_unregister_device(gsm_tty_driver,
52277 +                                                               base + i);
52278 +                               return PTR_ERR(dev);
52279 +                       }
52280 +               }
52281         }
52282         return ret;
52284 diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
52285 index 64842f3539e1..0b06770642cb 100644
52286 --- a/drivers/tty/serial/liteuart.c
52287 +++ b/drivers/tty/serial/liteuart.c
52288 @@ -270,8 +270,8 @@ static int liteuart_probe(struct platform_device *pdev)
52290         /* get membase */
52291         port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
52292 -       if (!port->membase)
52293 -               return -ENXIO;
52294 +       if (IS_ERR(port->membase))
52295 +               return PTR_ERR(port->membase);
52297         /* values not from device tree */
52298         port->dev = &pdev->dev;
52299 diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
52300 index 76b94d0ff586..84e8158088cd 100644
52301 --- a/drivers/tty/serial/omap-serial.c
52302 +++ b/drivers/tty/serial/omap-serial.c
52303 @@ -159,6 +159,8 @@ struct uart_omap_port {
52304         u32                     calc_latency;
52305         struct work_struct      qos_work;
52306         bool                    is_suspending;
52308 +       unsigned int            rs485_tx_filter_count;
52309  };
52311  #define to_uart_omap_port(p) ((container_of((p), struct uart_omap_port, port)))
52312 @@ -302,7 +304,8 @@ static void serial_omap_stop_tx(struct uart_port *port)
52313                         serial_out(up, UART_OMAP_SCR, up->scr);
52314                         res = (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) ?
52315                                 1 : 0;
52316 -                       if (gpiod_get_value(up->rts_gpiod) != res) {
52317 +                       if (up->rts_gpiod &&
52318 +                           gpiod_get_value(up->rts_gpiod) != res) {
52319                                 if (port->rs485.delay_rts_after_send > 0)
52320                                         mdelay(
52321                                         port->rs485.delay_rts_after_send);
52322 @@ -328,19 +331,6 @@ static void serial_omap_stop_tx(struct uart_port *port)
52323                 serial_out(up, UART_IER, up->ier);
52324         }
52326 -       if ((port->rs485.flags & SER_RS485_ENABLED) &&
52327 -           !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
52328 -               /*
52329 -                * Empty the RX FIFO, we are not interested in anything
52330 -                * received during the half-duplex transmission.
52331 -                */
52332 -               serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_RCVR);
52333 -               /* Re-enable RX interrupts */
52334 -               up->ier |= UART_IER_RLSI | UART_IER_RDI;
52335 -               up->port.read_status_mask |= UART_LSR_DR;
52336 -               serial_out(up, UART_IER, up->ier);
52337 -       }
52339         pm_runtime_mark_last_busy(up->dev);
52340         pm_runtime_put_autosuspend(up->dev);
52342 @@ -366,6 +356,10 @@ static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
52343                 serial_out(up, UART_TX, up->port.x_char);
52344                 up->port.icount.tx++;
52345                 up->port.x_char = 0;
52346 +               if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
52347 +                   !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
52348 +                       up->rs485_tx_filter_count++;
52350                 return;
52351         }
52352         if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
52353 @@ -377,6 +371,10 @@ static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
52354                 serial_out(up, UART_TX, xmit->buf[xmit->tail]);
52355                 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
52356                 up->port.icount.tx++;
52357 +               if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
52358 +                   !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
52359 +                       up->rs485_tx_filter_count++;
52361                 if (uart_circ_empty(xmit))
52362                         break;
52363         } while (--count > 0);
52364 @@ -411,7 +409,7 @@ static void serial_omap_start_tx(struct uart_port *port)
52366                 /* if rts not already enabled */
52367                 res = (port->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0;
52368 -               if (gpiod_get_value(up->rts_gpiod) != res) {
52369 +               if (up->rts_gpiod && gpiod_get_value(up->rts_gpiod) != res) {
52370                         gpiod_set_value(up->rts_gpiod, res);
52371                         if (port->rs485.delay_rts_before_send > 0)
52372                                 mdelay(port->rs485.delay_rts_before_send);
52373 @@ -420,7 +418,7 @@ static void serial_omap_start_tx(struct uart_port *port)
52375         if ((port->rs485.flags & SER_RS485_ENABLED) &&
52376             !(port->rs485.flags & SER_RS485_RX_DURING_TX))
52377 -               serial_omap_stop_rx(port);
52378 +               up->rs485_tx_filter_count = 0;
52380         serial_omap_enable_ier_thri(up);
52381         pm_runtime_mark_last_busy(up->dev);
52382 @@ -491,8 +489,13 @@ static void serial_omap_rlsi(struct uart_omap_port *up, unsigned int lsr)
52383          * Read one data character out to avoid stalling the receiver according
52384          * to the table 23-246 of the omap4 TRM.
52385          */
52386 -       if (likely(lsr & UART_LSR_DR))
52387 +       if (likely(lsr & UART_LSR_DR)) {
52388                 serial_in(up, UART_RX);
52389 +               if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
52390 +                   !(up->port.rs485.flags & SER_RS485_RX_DURING_TX) &&
52391 +                   up->rs485_tx_filter_count)
52392 +                       up->rs485_tx_filter_count--;
52393 +       }
52395         up->port.icount.rx++;
52396         flag = TTY_NORMAL;
52397 @@ -543,6 +546,13 @@ static void serial_omap_rdi(struct uart_omap_port *up, unsigned int lsr)
52398                 return;
52400         ch = serial_in(up, UART_RX);
52401 +       if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
52402 +           !(up->port.rs485.flags & SER_RS485_RX_DURING_TX) &&
52403 +           up->rs485_tx_filter_count) {
52404 +               up->rs485_tx_filter_count--;
52405 +               return;
52406 +       }
52408         flag = TTY_NORMAL;
52409         up->port.icount.rx++;
52411 @@ -1407,18 +1417,13 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
52412         /* store new config */
52413         port->rs485 = *rs485;
52415 -       /*
52416 -        * Just as a precaution, only allow rs485
52417 -        * to be enabled if the gpio pin is valid
52418 -        */
52419         if (up->rts_gpiod) {
52420                 /* enable / disable rts */
52421                 val = (port->rs485.flags & SER_RS485_ENABLED) ?
52422                         SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
52423                 val = (port->rs485.flags & val) ? 1 : 0;
52424                 gpiod_set_value(up->rts_gpiod, val);
52425 -       } else
52426 -               port->rs485.flags &= ~SER_RS485_ENABLED;
52427 +       }
52429         /* Enable interrupts */
52430         up->ier = mode;
52431 diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
52432 index f86ec2d2635b..9adb8362578c 100644
52433 --- a/drivers/tty/serial/sc16is7xx.c
52434 +++ b/drivers/tty/serial/sc16is7xx.c
52435 @@ -1196,7 +1196,7 @@ static int sc16is7xx_probe(struct device *dev,
52436         ret = regmap_read(regmap,
52437                           SC16IS7XX_LSR_REG << SC16IS7XX_REG_SHIFT, &val);
52438         if (ret < 0)
52439 -               return ret;
52440 +               return -EPROBE_DEFER;
52442         /* Alloc port structure */
52443         s = devm_kzalloc(dev, struct_size(s, p, devtype->nr_uart), GFP_KERNEL);
52444 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
52445 index ba31e97d3d96..43f02ed055d5 100644
52446 --- a/drivers/tty/serial/serial_core.c
52447 +++ b/drivers/tty/serial/serial_core.c
52448 @@ -1305,7 +1305,7 @@ static int uart_set_rs485_config(struct uart_port *port,
52449         unsigned long flags;
52451         if (!port->rs485_config)
52452 -               return -ENOIOCTLCMD;
52453 +               return -ENOTTY;
52455         if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
52456                 return -EFAULT;
52457 @@ -1329,7 +1329,7 @@ static int uart_get_iso7816_config(struct uart_port *port,
52458         struct serial_iso7816 aux;
52460         if (!port->iso7816_config)
52461 -               return -ENOIOCTLCMD;
52462 +               return -ENOTTY;
52464         spin_lock_irqsave(&port->lock, flags);
52465         aux = port->iso7816;
52466 @@ -1349,7 +1349,7 @@ static int uart_set_iso7816_config(struct uart_port *port,
52467         unsigned long flags;
52469         if (!port->iso7816_config)
52470 -               return -ENOIOCTLCMD;
52471 +               return -ENOTTY;
52473         if (copy_from_user(&iso7816, iso7816_user, sizeof(*iso7816_user)))
52474                 return -EFAULT;
52475 diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
52476 index b3675cf25a69..99dfa884cbef 100644
52477 --- a/drivers/tty/serial/stm32-usart.c
52478 +++ b/drivers/tty/serial/stm32-usart.c
52479 @@ -214,12 +214,14 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
52480         struct tty_port *tport = &port->state->port;
52481         struct stm32_port *stm32_port = to_stm32_port(port);
52482         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
52483 -       unsigned long c;
52484 +       unsigned long c, flags;
52485         u32 sr;
52486         char flag;
52488 -       if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
52489 -               pm_wakeup_event(tport->tty->dev, 0);
52490 +       if (threaded)
52491 +               spin_lock_irqsave(&port->lock, flags);
52492 +       else
52493 +               spin_lock(&port->lock);
52495         while (stm32_usart_pending_rx(port, &sr, &stm32_port->last_res,
52496                                       threaded)) {
52497 @@ -276,9 +278,12 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
52498                 uart_insert_char(port, sr, USART_SR_ORE, c, flag);
52499         }
52501 -       spin_unlock(&port->lock);
52502 +       if (threaded)
52503 +               spin_unlock_irqrestore(&port->lock, flags);
52504 +       else
52505 +               spin_unlock(&port->lock);
52507         tty_flip_buffer_push(tport);
52508 -       spin_lock(&port->lock);
52511  static void stm32_usart_tx_dma_complete(void *arg)
52512 @@ -286,12 +291,16 @@ static void stm32_usart_tx_dma_complete(void *arg)
52513         struct uart_port *port = arg;
52514         struct stm32_port *stm32port = to_stm32_port(port);
52515         const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
52516 +       unsigned long flags;
52518 +       dmaengine_terminate_async(stm32port->tx_ch);
52519         stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
52520         stm32port->tx_dma_busy = false;
52522         /* Let's see if we have pending data to send */
52523 +       spin_lock_irqsave(&port->lock, flags);
52524         stm32_usart_transmit_chars(port);
52525 +       spin_unlock_irqrestore(&port->lock, flags);
52528  static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
52529 @@ -455,29 +464,34 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
52530  static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
52532         struct uart_port *port = ptr;
52533 +       struct tty_port *tport = &port->state->port;
52534         struct stm32_port *stm32_port = to_stm32_port(port);
52535         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
52536         u32 sr;
52538 -       spin_lock(&port->lock);
52540         sr = readl_relaxed(port->membase + ofs->isr);
52542         if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
52543                 writel_relaxed(USART_ICR_RTOCF,
52544                                port->membase + ofs->icr);
52546 -       if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG)
52547 +       if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
52548 +               /* Clear wake up flag and disable wake up interrupt */
52549                 writel_relaxed(USART_ICR_WUCF,
52550                                port->membase + ofs->icr);
52551 +               stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
52552 +               if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
52553 +                       pm_wakeup_event(tport->tty->dev, 0);
52554 +       }
52556         if ((sr & USART_SR_RXNE) && !(stm32_port->rx_ch))
52557                 stm32_usart_receive_chars(port, false);
52559 -       if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch))
52560 +       if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
52561 +               spin_lock(&port->lock);
52562                 stm32_usart_transmit_chars(port);
52564 -       spin_unlock(&port->lock);
52565 +               spin_unlock(&port->lock);
52566 +       }
52568         if (stm32_port->rx_ch)
52569                 return IRQ_WAKE_THREAD;
52570 @@ -490,13 +504,9 @@ static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
52571         struct uart_port *port = ptr;
52572         struct stm32_port *stm32_port = to_stm32_port(port);
52574 -       spin_lock(&port->lock);
52576         if (stm32_port->rx_ch)
52577                 stm32_usart_receive_chars(port, true);
52579 -       spin_unlock(&port->lock);
52581         return IRQ_HANDLED;
52584 @@ -505,7 +515,10 @@ static unsigned int stm32_usart_tx_empty(struct uart_port *port)
52585         struct stm32_port *stm32_port = to_stm32_port(port);
52586         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
52588 -       return readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE;
52589 +       if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
52590 +               return TIOCSER_TEMT;
52592 +       return 0;
52595  static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
52596 @@ -634,6 +647,7 @@ static int stm32_usart_startup(struct uart_port *port)
52598         struct stm32_port *stm32_port = to_stm32_port(port);
52599         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
52600 +       const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
52601         const char *name = to_platform_device(port->dev)->name;
52602         u32 val;
52603         int ret;
52604 @@ -646,21 +660,10 @@ static int stm32_usart_startup(struct uart_port *port)
52606         /* RX FIFO Flush */
52607         if (ofs->rqr != UNDEF_REG)
52608 -               stm32_usart_set_bits(port, ofs->rqr, USART_RQR_RXFRQ);
52610 -       /* Tx and RX FIFO configuration */
52611 -       if (stm32_port->fifoen) {
52612 -               val = readl_relaxed(port->membase + ofs->cr3);
52613 -               val &= ~(USART_CR3_TXFTCFG_MASK | USART_CR3_RXFTCFG_MASK);
52614 -               val |= USART_CR3_TXFTCFG_HALF << USART_CR3_TXFTCFG_SHIFT;
52615 -               val |= USART_CR3_RXFTCFG_HALF << USART_CR3_RXFTCFG_SHIFT;
52616 -               writel_relaxed(val, port->membase + ofs->cr3);
52617 -       }
52618 +               writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
52620 -       /* RX FIFO enabling */
52621 -       val = stm32_port->cr1_irq | USART_CR1_RE;
52622 -       if (stm32_port->fifoen)
52623 -               val |= USART_CR1_FIFOEN;
52624 +       /* RX enabling */
52625 +       val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
52626         stm32_usart_set_bits(port, ofs->cr1, val);
52628         return 0;
52629 @@ -691,6 +694,11 @@ static void stm32_usart_shutdown(struct uart_port *port)
52630         if (ret)
52631                 dev_err(port->dev, "Transmission is not complete\n");
52633 +       /* flush RX & TX FIFO */
52634 +       if (ofs->rqr != UNDEF_REG)
52635 +               writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
52636 +                              port->membase + ofs->rqr);
52638         stm32_usart_clr_bits(port, ofs->cr1, val);
52640         free_irq(port->irq, port);
52641 @@ -737,8 +745,9 @@ static void stm32_usart_set_termios(struct uart_port *port,
52642         unsigned int baud, bits;
52643         u32 usartdiv, mantissa, fraction, oversampling;
52644         tcflag_t cflag = termios->c_cflag;
52645 -       u32 cr1, cr2, cr3;
52646 +       u32 cr1, cr2, cr3, isr;
52647         unsigned long flags;
52648 +       int ret;
52650         if (!stm32_port->hw_flow_control)
52651                 cflag &= ~CRTSCTS;
52652 @@ -747,21 +756,36 @@ static void stm32_usart_set_termios(struct uart_port *port,
52654         spin_lock_irqsave(&port->lock, flags);
52656 +       ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
52657 +                                               isr,
52658 +                                               (isr & USART_SR_TC),
52659 +                                               10, 100000);
52661 +       /* Send the TC error message only when ISR_TC is not set. */
52662 +       if (ret)
52663 +               dev_err(port->dev, "Transmission is not complete\n");
52665         /* Stop serial port and reset value */
52666         writel_relaxed(0, port->membase + ofs->cr1);
52668         /* flush RX & TX FIFO */
52669         if (ofs->rqr != UNDEF_REG)
52670 -               stm32_usart_set_bits(port, ofs->rqr,
52671 -                                    USART_RQR_TXFRQ | USART_RQR_RXFRQ);
52672 +               writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
52673 +                              port->membase + ofs->rqr);
52675         cr1 = USART_CR1_TE | USART_CR1_RE;
52676         if (stm32_port->fifoen)
52677                 cr1 |= USART_CR1_FIFOEN;
52678         cr2 = 0;
52680 +       /* Tx and RX FIFO configuration */
52681         cr3 = readl_relaxed(port->membase + ofs->cr3);
52682 -       cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTCFG_MASK | USART_CR3_RXFTIE
52683 -               | USART_CR3_TXFTCFG_MASK;
52684 +       cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE;
52685 +       if (stm32_port->fifoen) {
52686 +               cr3 &= ~(USART_CR3_TXFTCFG_MASK | USART_CR3_RXFTCFG_MASK);
52687 +               cr3 |= USART_CR3_TXFTCFG_HALF << USART_CR3_TXFTCFG_SHIFT;
52688 +               cr3 |= USART_CR3_RXFTCFG_HALF << USART_CR3_RXFTCFG_SHIFT;
52689 +       }
52691         if (cflag & CSTOPB)
52692                 cr2 |= USART_CR2_STOP_2B;
52693 @@ -817,12 +841,6 @@ static void stm32_usart_set_termios(struct uart_port *port,
52694                 cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
52695         }
52697 -       /* Handle modem control interrupts */
52698 -       if (UART_ENABLE_MS(port, termios->c_cflag))
52699 -               stm32_usart_enable_ms(port);
52700 -       else
52701 -               stm32_usart_disable_ms(port);
52703         usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
52705         /*
52706 @@ -892,12 +910,24 @@ static void stm32_usart_set_termios(struct uart_port *port,
52707                 cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
52708         }
52710 +       /* Configure wake up from low power on start bit detection */
52711 +       if (stm32_port->wakeirq > 0) {
52712 +               cr3 &= ~USART_CR3_WUS_MASK;
52713 +               cr3 |= USART_CR3_WUS_START_BIT;
52714 +       }
52716         writel_relaxed(cr3, port->membase + ofs->cr3);
52717         writel_relaxed(cr2, port->membase + ofs->cr2);
52718         writel_relaxed(cr1, port->membase + ofs->cr1);
52720         stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
52721         spin_unlock_irqrestore(&port->lock, flags);
52723 +       /* Handle modem control interrupts */
52724 +       if (UART_ENABLE_MS(port, termios->c_cflag))
52725 +               stm32_usart_enable_ms(port);
52726 +       else
52727 +               stm32_usart_disable_ms(port);
52730  static const char *stm32_usart_type(struct uart_port *port)
52731 @@ -1252,10 +1282,6 @@ static int stm32_usart_serial_probe(struct platform_device *pdev)
52732                 device_set_wakeup_enable(&pdev->dev, false);
52733         }
52735 -       ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
52736 -       if (ret)
52737 -               goto err_wirq;
52739         ret = stm32_usart_of_dma_rx_probe(stm32port, pdev);
52740         if (ret)
52741                 dev_info(&pdev->dev, "interrupt mode used for rx (no dma)\n");
52742 @@ -1269,11 +1295,40 @@ static int stm32_usart_serial_probe(struct platform_device *pdev)
52743         pm_runtime_get_noresume(&pdev->dev);
52744         pm_runtime_set_active(&pdev->dev);
52745         pm_runtime_enable(&pdev->dev);
52747 +       ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
52748 +       if (ret)
52749 +               goto err_port;
52751         pm_runtime_put_sync(&pdev->dev);
52753         return 0;
52755 -err_wirq:
52756 +err_port:
52757 +       pm_runtime_disable(&pdev->dev);
52758 +       pm_runtime_set_suspended(&pdev->dev);
52759 +       pm_runtime_put_noidle(&pdev->dev);
52761 +       if (stm32port->rx_ch) {
52762 +               dmaengine_terminate_async(stm32port->rx_ch);
52763 +               dma_release_channel(stm32port->rx_ch);
52764 +       }
52766 +       if (stm32port->rx_dma_buf)
52767 +               dma_free_coherent(&pdev->dev,
52768 +                                 RX_BUF_L, stm32port->rx_buf,
52769 +                                 stm32port->rx_dma_buf);
52771 +       if (stm32port->tx_ch) {
52772 +               dmaengine_terminate_async(stm32port->tx_ch);
52773 +               dma_release_channel(stm32port->tx_ch);
52774 +       }
52776 +       if (stm32port->tx_dma_buf)
52777 +               dma_free_coherent(&pdev->dev,
52778 +                                 TX_BUF_L, stm32port->tx_buf,
52779 +                                 stm32port->tx_dma_buf);
52781         if (stm32port->wakeirq > 0)
52782                 dev_pm_clear_wake_irq(&pdev->dev);
52784 @@ -1295,11 +1350,20 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
52785         int err;
52787         pm_runtime_get_sync(&pdev->dev);
52788 +       err = uart_remove_one_port(&stm32_usart_driver, port);
52789 +       if (err)
52790 +               return(err);
52792 +       pm_runtime_disable(&pdev->dev);
52793 +       pm_runtime_set_suspended(&pdev->dev);
52794 +       pm_runtime_put_noidle(&pdev->dev);
52796         stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
52798 -       if (stm32_port->rx_ch)
52799 +       if (stm32_port->rx_ch) {
52800 +               dmaengine_terminate_async(stm32_port->rx_ch);
52801                 dma_release_channel(stm32_port->rx_ch);
52802 +       }
52804         if (stm32_port->rx_dma_buf)
52805                 dma_free_coherent(&pdev->dev,
52806 @@ -1308,8 +1372,10 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
52808         stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
52810 -       if (stm32_port->tx_ch)
52811 +       if (stm32_port->tx_ch) {
52812 +               dmaengine_terminate_async(stm32_port->tx_ch);
52813                 dma_release_channel(stm32_port->tx_ch);
52814 +       }
52816         if (stm32_port->tx_dma_buf)
52817                 dma_free_coherent(&pdev->dev,
52818 @@ -1323,12 +1389,7 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
52820         stm32_usart_deinit_port(stm32_port);
52822 -       err = uart_remove_one_port(&stm32_usart_driver, port);
52824 -       pm_runtime_disable(&pdev->dev);
52825 -       pm_runtime_put_noidle(&pdev->dev);
52827 -       return err;
52828 +       return 0;
52831  #ifdef CONFIG_SERIAL_STM32_CONSOLE
52832 @@ -1436,23 +1497,20 @@ static void __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
52834         struct stm32_port *stm32_port = to_stm32_port(port);
52835         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
52836 -       const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
52837 -       u32 val;
52839         if (stm32_port->wakeirq <= 0)
52840                 return;
52842 +       /*
52843 +        * Enable low-power wake-up and wake-up irq if argument is set to
52844 +        * "enable", disable low-power wake-up and wake-up irq otherwise
52845 +        */
52846         if (enable) {
52847 -               stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
52848                 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
52849 -               val = readl_relaxed(port->membase + ofs->cr3);
52850 -               val &= ~USART_CR3_WUS_MASK;
52851 -               /* Enable Wake up interrupt from low power on start bit */
52852 -               val |= USART_CR3_WUS_START_BIT | USART_CR3_WUFIE;
52853 -               writel_relaxed(val, port->membase + ofs->cr3);
52854 -               stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
52855 +               stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
52856         } else {
52857                 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
52858 +               stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
52859         }
52862 diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
52863 index cb4f327c46db..94b568aa46bb 100644
52864 --- a/drivers/tty/serial/stm32-usart.h
52865 +++ b/drivers/tty/serial/stm32-usart.h
52866 @@ -127,9 +127,6 @@ struct stm32_usart_info stm32h7_info = {
52867  /* Dummy bits */
52868  #define USART_SR_DUMMY_RX      BIT(16)
52870 -/* USART_ICR (F7) */
52871 -#define USART_CR_TC            BIT(6)
52873  /* USART_DR */
52874  #define USART_DR_MASK          GENMASK(8, 0)
52876 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
52877 index 391bada4cedb..adbcbfa11b29 100644
52878 --- a/drivers/tty/tty_io.c
52879 +++ b/drivers/tty/tty_io.c
52880 @@ -2530,14 +2530,14 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
52881   *     @p: pointer to result
52882   *
52883   *     Obtain the modem status bits from the tty driver if the feature
52884 - *     is supported. Return -EINVAL if it is not available.
52885 + *     is supported. Return -ENOTTY if it is not available.
52886   *
52887   *     Locking: none (up to the driver)
52888   */
52890  static int tty_tiocmget(struct tty_struct *tty, int __user *p)
52892 -       int retval = -EINVAL;
52893 +       int retval = -ENOTTY;
52895         if (tty->ops->tiocmget) {
52896                 retval = tty->ops->tiocmget(tty);
52897 @@ -2555,7 +2555,7 @@ static int tty_tiocmget(struct tty_struct *tty, int __user *p)
52898   *     @p: pointer to desired bits
52899   *
52900   *     Set the modem status bits from the tty driver if the feature
52901 - *     is supported. Return -EINVAL if it is not available.
52902 + *     is supported. Return -ENOTTY if it is not available.
52903   *
52904   *     Locking: none (up to the driver)
52905   */
52906 @@ -2567,7 +2567,7 @@ static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
52907         unsigned int set, clear, val;
52909         if (tty->ops->tiocmset == NULL)
52910 -               return -EINVAL;
52911 +               return -ENOTTY;
52913         retval = get_user(val, p);
52914         if (retval)
52915 @@ -2607,7 +2607,7 @@ int tty_get_icount(struct tty_struct *tty,
52916         if (tty->ops->get_icount)
52917                 return tty->ops->get_icount(tty, icount);
52918         else
52919 -               return -EINVAL;
52920 +               return -ENOTTY;
52922  EXPORT_SYMBOL_GPL(tty_get_icount);
52924 diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
52925 index 4de1c6ddb8ff..803da2d111c8 100644
52926 --- a/drivers/tty/tty_ioctl.c
52927 +++ b/drivers/tty/tty_ioctl.c
52928 @@ -774,8 +774,8 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
52929         case TCSETX:
52930         case TCSETXW:
52931         case TCSETXF:
52932 -               return -EINVAL;
52933 -#endif         
52934 +               return -ENOTTY;
52935 +#endif
52936         case TIOCGSOFTCAR:
52937                 copy_termios(real_tty, &kterm);
52938                 ret = put_user((kterm.c_cflag & CLOCAL) ? 1 : 0,
52939 diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
52940 index 284b07224c55..0cc360da5426 100644
52941 --- a/drivers/tty/vt/vt.c
52942 +++ b/drivers/tty/vt/vt.c
52943 @@ -1381,6 +1381,7 @@ struct vc_data *vc_deallocate(unsigned int currcons)
52944                 atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, &param);
52945                 vcs_remove_sysfs(currcons);
52946                 visual_deinit(vc);
52947 +               con_free_unimap(vc);
52948                 put_pid(vc->vt_pid);
52949                 vc_uniscr_set(vc, NULL);
52950                 kfree(vc->vc_screenbuf);
52951 diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
52952 index d7d4bdd57f46..56707b6b0f57 100644
52953 --- a/drivers/usb/cdns3/cdnsp-gadget.c
52954 +++ b/drivers/usb/cdns3/cdnsp-gadget.c
52955 @@ -727,7 +727,7 @@ int cdnsp_reset_device(struct cdnsp_device *pdev)
52956          * are in Disabled state.
52957          */
52958         for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i)
52959 -               pdev->eps[i].ep_state |= EP_STOPPED;
52960 +               pdev->eps[i].ep_state |= EP_STOPPED | EP_UNCONFIGURED;
52962         trace_cdnsp_handle_cmd_reset_dev(slot_ctx);
52964 @@ -942,6 +942,7 @@ static int cdnsp_gadget_ep_enable(struct usb_ep *ep,
52966         pep = to_cdnsp_ep(ep);
52967         pdev = pep->pdev;
52968 +       pep->ep_state &= ~EP_UNCONFIGURED;
52970         if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED,
52971                           "%s is already enabled\n", pep->name))
52972 @@ -1023,9 +1024,13 @@ static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
52973                 goto finish;
52974         }
52976 -       cdnsp_cmd_stop_ep(pdev, pep);
52977         pep->ep_state |= EP_DIS_IN_RROGRESS;
52978 -       cdnsp_cmd_flush_ep(pdev, pep);
52980 +       /* Endpoint was unconfigured by Reset Device command. */
52981 +       if (!(pep->ep_state & EP_UNCONFIGURED)) {
52982 +               cdnsp_cmd_stop_ep(pdev, pep);
52983 +               cdnsp_cmd_flush_ep(pdev, pep);
52984 +       }
52986         /* Remove all queued USB requests. */
52987         while (!list_empty(&pep->pending_list)) {
52988 @@ -1043,10 +1048,12 @@ static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
52990         cdnsp_endpoint_zero(pdev, pep);
52992 -       ret = cdnsp_update_eps_configuration(pdev, pep);
52993 +       if (!(pep->ep_state & EP_UNCONFIGURED))
52994 +               ret = cdnsp_update_eps_configuration(pdev, pep);
52996         cdnsp_free_endpoint_rings(pdev, pep);
52998 -       pep->ep_state &= ~EP_ENABLED;
52999 +       pep->ep_state &= ~(EP_ENABLED | EP_UNCONFIGURED);
53000         pep->ep_state |= EP_STOPPED;
53002  finish:
53003 diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
53004 index 6bbb26548c04..783ca8ffde00 100644
53005 --- a/drivers/usb/cdns3/cdnsp-gadget.h
53006 +++ b/drivers/usb/cdns3/cdnsp-gadget.h
53007 @@ -835,6 +835,7 @@ struct cdnsp_ep {
53008  #define EP_WEDGE               BIT(4)
53009  #define EP0_HALTED_STATUS      BIT(5)
53010  #define EP_HAS_STREAMS         BIT(6)
53011 +#define EP_UNCONFIGURED                BIT(7)
53013         bool skip;
53014  };
53015 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
53016 index 3fda1ec961d7..c103961c3fae 100644
53017 --- a/drivers/usb/class/cdc-acm.c
53018 +++ b/drivers/usb/class/cdc-acm.c
53019 @@ -929,8 +929,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
53021         struct acm *acm = tty->driver_data;
53023 -       ss->xmit_fifo_size = acm->writesize;
53024 -       ss->baud_base = le32_to_cpu(acm->line.dwDTERate);
53025 +       ss->line = acm->minor;
53026         ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
53027         ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
53028                                 ASYNC_CLOSING_WAIT_NONE :
53029 @@ -942,7 +941,6 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
53031         struct acm *acm = tty->driver_data;
53032         unsigned int closing_wait, close_delay;
53033 -       unsigned int old_closing_wait, old_close_delay;
53034         int retval = 0;
53036         close_delay = msecs_to_jiffies(ss->close_delay * 10);
53037 @@ -950,20 +948,12 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
53038                         ASYNC_CLOSING_WAIT_NONE :
53039                         msecs_to_jiffies(ss->closing_wait * 10);
53041 -       /* we must redo the rounding here, so that the values match */
53042 -       old_close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
53043 -       old_closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
53044 -                               ASYNC_CLOSING_WAIT_NONE :
53045 -                               jiffies_to_msecs(acm->port.closing_wait) / 10;
53047         mutex_lock(&acm->port.mutex);
53049         if (!capable(CAP_SYS_ADMIN)) {
53050 -               if ((ss->close_delay != old_close_delay) ||
53051 -                   (ss->closing_wait != old_closing_wait))
53052 +               if ((close_delay != acm->port.close_delay) ||
53053 +                   (closing_wait != acm->port.closing_wait))
53054                         retval = -EPERM;
53055 -               else
53056 -                       retval = -EOPNOTSUPP;
53057         } else {
53058                 acm->port.close_delay  = close_delay;
53059                 acm->port.closing_wait = closing_wait;
53060 @@ -1634,12 +1624,13 @@ static int acm_resume(struct usb_interface *intf)
53061         struct urb *urb;
53062         int rv = 0;
53064 -       acm_unpoison_urbs(acm);
53065         spin_lock_irq(&acm->write_lock);
53067         if (--acm->susp_count)
53068                 goto out;
53070 +       acm_unpoison_urbs(acm);
53072         if (tty_port_initialized(&acm->port)) {
53073                 rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);
53075 diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
53076 index 508b1c3f8b73..d1e4a7379beb 100644
53077 --- a/drivers/usb/class/cdc-wdm.c
53078 +++ b/drivers/usb/class/cdc-wdm.c
53079 @@ -321,12 +321,23 @@ static void wdm_int_callback(struct urb *urb)
53083 -static void kill_urbs(struct wdm_device *desc)
53084 +static void poison_urbs(struct wdm_device *desc)
53086         /* the order here is essential */
53087 -       usb_kill_urb(desc->command);
53088 -       usb_kill_urb(desc->validity);
53089 -       usb_kill_urb(desc->response);
53090 +       usb_poison_urb(desc->command);
53091 +       usb_poison_urb(desc->validity);
53092 +       usb_poison_urb(desc->response);
53095 +static void unpoison_urbs(struct wdm_device *desc)
53097 +       /*
53098 +        *  the order here is not essential
53099 +        *  it is symmetrical just to be nice
53100 +        */
53101 +       usb_unpoison_urb(desc->response);
53102 +       usb_unpoison_urb(desc->validity);
53103 +       usb_unpoison_urb(desc->command);
53106  static void free_urbs(struct wdm_device *desc)
53107 @@ -741,11 +752,12 @@ static int wdm_release(struct inode *inode, struct file *file)
53108         if (!desc->count) {
53109                 if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
53110                         dev_dbg(&desc->intf->dev, "wdm_release: cleanup\n");
53111 -                       kill_urbs(desc);
53112 +                       poison_urbs(desc);
53113                         spin_lock_irq(&desc->iuspin);
53114                         desc->resp_count = 0;
53115                         spin_unlock_irq(&desc->iuspin);
53116                         desc->manage_power(desc->intf, 0);
53117 +                       unpoison_urbs(desc);
53118                 } else {
53119                         /* must avoid dev_printk here as desc->intf is invalid */
53120                         pr_debug(KBUILD_MODNAME " %s: device gone - cleaning up\n", __func__);
53121 @@ -1037,9 +1049,9 @@ static void wdm_disconnect(struct usb_interface *intf)
53122         wake_up_all(&desc->wait);
53123         mutex_lock(&desc->rlock);
53124         mutex_lock(&desc->wlock);
53125 +       poison_urbs(desc);
53126         cancel_work_sync(&desc->rxwork);
53127         cancel_work_sync(&desc->service_outs_intr);
53128 -       kill_urbs(desc);
53129         mutex_unlock(&desc->wlock);
53130         mutex_unlock(&desc->rlock);
53132 @@ -1080,9 +1092,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
53133                 set_bit(WDM_SUSPENDING, &desc->flags);
53134                 spin_unlock_irq(&desc->iuspin);
53135                 /* callback submits work - order is essential */
53136 -               kill_urbs(desc);
53137 +               poison_urbs(desc);
53138                 cancel_work_sync(&desc->rxwork);
53139                 cancel_work_sync(&desc->service_outs_intr);
53140 +               unpoison_urbs(desc);
53141         }
53142         if (!PMSG_IS_AUTO(message)) {
53143                 mutex_unlock(&desc->wlock);
53144 @@ -1140,7 +1153,7 @@ static int wdm_pre_reset(struct usb_interface *intf)
53145         wake_up_all(&desc->wait);
53146         mutex_lock(&desc->rlock);
53147         mutex_lock(&desc->wlock);
53148 -       kill_urbs(desc);
53149 +       poison_urbs(desc);
53150         cancel_work_sync(&desc->rxwork);
53151         cancel_work_sync(&desc->service_outs_intr);
53152         return 0;
53153 @@ -1151,6 +1164,7 @@ static int wdm_post_reset(struct usb_interface *intf)
53154         struct wdm_device *desc = wdm_find_device(intf);
53155         int rv;
53157 +       unpoison_urbs(desc);
53158         clear_bit(WDM_OVERFLOW, &desc->flags);
53159         clear_bit(WDM_RESETTING, &desc->flags);
53160         rv = recover_from_urb_loss(desc);
53161 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
53162 index 7f71218cc1e5..13fe37fbbd2c 100644
53163 --- a/drivers/usb/core/hub.c
53164 +++ b/drivers/usb/core/hub.c
53165 @@ -3556,7 +3556,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
53166         u16             portchange, portstatus;
53168         if (!test_and_set_bit(port1, hub->child_usage_bits)) {
53169 -               status = pm_runtime_get_sync(&port_dev->dev);
53170 +               status = pm_runtime_resume_and_get(&port_dev->dev);
53171                 if (status < 0) {
53172                         dev_dbg(&udev->dev, "can't resume usb port, status %d\n",
53173                                         status);
53174 @@ -3593,9 +3593,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
53175                  * sequence.
53176                  */
53177                 status = hub_port_status(hub, port1, &portstatus, &portchange);
53179 -               /* TRSMRCY = 10 msec */
53180 -               msleep(10);
53181         }
53183   SuspendCleared:
53184 @@ -3610,6 +3607,9 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
53185                                 usb_clear_port_feature(hub->hdev, port1,
53186                                                 USB_PORT_FEAT_C_SUSPEND);
53187                 }
53189 +               /* TRSMRCY = 10 msec */
53190 +               msleep(10);
53191         }
53193         if (udev->persist_enabled)
53194 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
53195 index 76ac5d6555ae..21e7522655ac 100644
53196 --- a/drivers/usb/core/quirks.c
53197 +++ b/drivers/usb/core/quirks.c
53198 @@ -406,6 +406,7 @@ static const struct usb_device_id usb_quirk_list[] = {
53200         /* Realtek hub in Dell WD19 (Type-C) */
53201         { USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM },
53202 +       { USB_DEVICE(0x0bda, 0x5487), .driver_info = USB_QUIRK_RESET_RESUME },
53204         /* Generic RTL8153 based ethernet adapters */
53205         { USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM },
53206 @@ -438,6 +439,9 @@ static const struct usb_device_id usb_quirk_list[] = {
53207         { USB_DEVICE(0x17ef, 0xa012), .driver_info =
53208                         USB_QUIRK_DISCONNECT_SUSPEND },
53210 +       /* Lenovo ThinkPad USB-C Dock Gen2 Ethernet (RTL8153 GigE) */
53211 +       { USB_DEVICE(0x17ef, 0xa387), .driver_info = USB_QUIRK_NO_LPM },
53213         /* BUILDWIN Photo Frame */
53214         { USB_DEVICE(0x1908, 0x1315), .driver_info =
53215                         USB_QUIRK_HONOR_BNUMINTERFACES },
53216 diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
53217 index 7161344c6522..641e4251cb7f 100644
53218 --- a/drivers/usb/dwc2/core.h
53219 +++ b/drivers/usb/dwc2/core.h
53220 @@ -112,6 +112,7 @@ struct dwc2_hsotg_req;
53221   * @debugfs: File entry for debugfs file for this endpoint.
53222   * @dir_in: Set to true if this endpoint is of the IN direction, which
53223   *          means that it is sending data to the Host.
53224 + * @map_dir: Set to the value of dir_in when the DMA buffer is mapped.
53225   * @index: The index for the endpoint registers.
53226   * @mc: Multi Count - number of transactions per microframe
53227   * @interval: Interval for periodic endpoints, in frames or microframes.
53228 @@ -161,6 +162,7 @@ struct dwc2_hsotg_ep {
53229         unsigned short          fifo_index;
53231         unsigned char           dir_in;
53232 +       unsigned char           map_dir;
53233         unsigned char           index;
53234         unsigned char           mc;
53235         u16                     interval;
53236 diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
53237 index 55f1d14fc414..510fd0572feb 100644
53238 --- a/drivers/usb/dwc2/core_intr.c
53239 +++ b/drivers/usb/dwc2/core_intr.c
53240 @@ -307,6 +307,7 @@ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg)
53241  static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
53243         int ret;
53244 +       u32 hprt0;
53246         /* Clear interrupt */
53247         dwc2_writel(hsotg, GINTSTS_SESSREQINT, GINTSTS);
53248 @@ -327,6 +328,13 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
53249                  * established
53250                  */
53251                 dwc2_hsotg_disconnect(hsotg);
53252 +       } else {
53253 +               /* Turn on the port power bit. */
53254 +               hprt0 = dwc2_read_hprt0(hsotg);
53255 +               hprt0 |= HPRT0_PWR;
53256 +               dwc2_writel(hsotg, hprt0, HPRT0);
53257 +               /* Connect hcd after port power is set. */
53258 +               dwc2_hcd_connect(hsotg);
53259         }
53262 @@ -652,6 +660,71 @@ static u32 dwc2_read_common_intr(struct dwc2_hsotg *hsotg)
53263                 return 0;
53266 +/**
53267 + * dwc_handle_gpwrdn_disc_det() - Handles the gpwrdn disconnect detect.
53268 + * Exits hibernation without restoring registers.
53269 + *
53270 + * @hsotg: Programming view of DWC_otg controller
53271 + * @gpwrdn: GPWRDN register
53272 + */
53273 +static inline void dwc_handle_gpwrdn_disc_det(struct dwc2_hsotg *hsotg,
53274 +                                             u32 gpwrdn)
53276 +       u32 gpwrdn_tmp;
53278 +       /* Switch-on voltage to the core */
53279 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
53280 +       gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
53281 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
53282 +       udelay(5);
53284 +       /* Reset core */
53285 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
53286 +       gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
53287 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
53288 +       udelay(5);
53290 +       /* Disable Power Down Clamp */
53291 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
53292 +       gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
53293 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
53294 +       udelay(5);
53296 +       /* Deassert reset core */
53297 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
53298 +       gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
53299 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
53300 +       udelay(5);
53302 +       /* Disable PMU interrupt */
53303 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
53304 +       gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
53305 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
53307 +       /* De-assert Wakeup Logic */
53308 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
53309 +       gpwrdn_tmp &= ~GPWRDN_PMUACTV;
53310 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
53312 +       hsotg->hibernated = 0;
53313 +       hsotg->bus_suspended = 0;
53315 +       if (gpwrdn & GPWRDN_IDSTS) {
53316 +               hsotg->op_state = OTG_STATE_B_PERIPHERAL;
53317 +               dwc2_core_init(hsotg, false);
53318 +               dwc2_enable_global_interrupts(hsotg);
53319 +               dwc2_hsotg_core_init_disconnected(hsotg, false);
53320 +               dwc2_hsotg_core_connect(hsotg);
53321 +       } else {
53322 +               hsotg->op_state = OTG_STATE_A_HOST;
53324 +               /* Initialize the Core for Host mode */
53325 +               dwc2_core_init(hsotg, false);
53326 +               dwc2_enable_global_interrupts(hsotg);
53327 +               dwc2_hcd_start(hsotg);
53328 +       }
53331  /*
53332   * GPWRDN interrupt handler.
53333   *
53334 @@ -673,64 +746,14 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
53336         if ((gpwrdn & GPWRDN_DISCONN_DET) &&
53337             (gpwrdn & GPWRDN_DISCONN_DET_MSK) && !linestate) {
53338 -               u32 gpwrdn_tmp;
53340                 dev_dbg(hsotg->dev, "%s: GPWRDN_DISCONN_DET\n", __func__);
53342 -               /* Switch-on voltage to the core */
53343 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
53344 -               gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
53345 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
53346 -               udelay(10);
53348 -               /* Reset core */
53349 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
53350 -               gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
53351 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
53352 -               udelay(10);
53354 -               /* Disable Power Down Clamp */
53355 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
53356 -               gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
53357 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
53358 -               udelay(10);
53360 -               /* Deassert reset core */
53361 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
53362 -               gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
53363 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
53364 -               udelay(10);
53366 -               /* Disable PMU interrupt */
53367 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
53368 -               gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
53369 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
53371 -               /* De-assert Wakeup Logic */
53372 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
53373 -               gpwrdn_tmp &= ~GPWRDN_PMUACTV;
53374 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
53376 -               hsotg->hibernated = 0;
53378 -               if (gpwrdn & GPWRDN_IDSTS) {
53379 -                       hsotg->op_state = OTG_STATE_B_PERIPHERAL;
53380 -                       dwc2_core_init(hsotg, false);
53381 -                       dwc2_enable_global_interrupts(hsotg);
53382 -                       dwc2_hsotg_core_init_disconnected(hsotg, false);
53383 -                       dwc2_hsotg_core_connect(hsotg);
53384 -               } else {
53385 -                       hsotg->op_state = OTG_STATE_A_HOST;
53387 -                       /* Initialize the Core for Host mode */
53388 -                       dwc2_core_init(hsotg, false);
53389 -                       dwc2_enable_global_interrupts(hsotg);
53390 -                       dwc2_hcd_start(hsotg);
53391 -               }
53392 -       }
53394 -       if ((gpwrdn & GPWRDN_LNSTSCHG) &&
53395 -           (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) {
53396 +               /*
53397 +                * Call disconnect detect function to exit from
53398 +                * hibernation
53399 +                */
53400 +               dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
53401 +       } else if ((gpwrdn & GPWRDN_LNSTSCHG) &&
53402 +                  (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) {
53403                 dev_dbg(hsotg->dev, "%s: GPWRDN_LNSTSCHG\n", __func__);
53404                 if (hsotg->hw_params.hibernation &&
53405                     hsotg->hibernated) {
53406 @@ -741,24 +764,21 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
53407                                 dwc2_exit_hibernation(hsotg, 1, 0, 1);
53408                         }
53409                 }
53410 -       }
53411 -       if ((gpwrdn & GPWRDN_RST_DET) && (gpwrdn & GPWRDN_RST_DET_MSK)) {
53412 +       } else if ((gpwrdn & GPWRDN_RST_DET) &&
53413 +                  (gpwrdn & GPWRDN_RST_DET_MSK)) {
53414                 dev_dbg(hsotg->dev, "%s: GPWRDN_RST_DET\n", __func__);
53415                 if (!linestate && (gpwrdn & GPWRDN_BSESSVLD))
53416                         dwc2_exit_hibernation(hsotg, 0, 1, 0);
53417 -       }
53418 -       if ((gpwrdn & GPWRDN_STS_CHGINT) &&
53419 -           (gpwrdn & GPWRDN_STS_CHGINT_MSK) && linestate) {
53420 +       } else if ((gpwrdn & GPWRDN_STS_CHGINT) &&
53421 +                  (gpwrdn & GPWRDN_STS_CHGINT_MSK)) {
53422                 dev_dbg(hsotg->dev, "%s: GPWRDN_STS_CHGINT\n", __func__);
53423 -               if (hsotg->hw_params.hibernation &&
53424 -                   hsotg->hibernated) {
53425 -                       if (gpwrdn & GPWRDN_IDSTS) {
53426 -                               dwc2_exit_hibernation(hsotg, 0, 0, 0);
53427 -                               call_gadget(hsotg, resume);
53428 -                       } else {
53429 -                               dwc2_exit_hibernation(hsotg, 1, 0, 1);
53430 -                       }
53431 -               }
53432 +               /*
53433 +                * As GPWRDN_STS_CHGINT exit from hibernation flow is
53434 +                * the same as in GPWRDN_DISCONN_DET flow. Call
53435 +                * disconnect detect helper function to exit from
53436 +                * hibernation.
53437 +                */
53438 +               dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
53439         }
53442 diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
53443 index ad4c94366dad..d2f623d83bf7 100644
53444 --- a/drivers/usb/dwc2/gadget.c
53445 +++ b/drivers/usb/dwc2/gadget.c
53446 @@ -422,7 +422,7 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
53448         struct usb_request *req = &hs_req->req;
53450 -       usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
53451 +       usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
53454  /*
53455 @@ -1242,6 +1242,7 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
53457         int ret;
53459 +       hs_ep->map_dir = hs_ep->dir_in;
53460         ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
53461         if (ret)
53462                 goto dma_error;
53463 diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
53464 index 1a9789ec5847..6af1dcbc3656 100644
53465 --- a/drivers/usb/dwc2/hcd.c
53466 +++ b/drivers/usb/dwc2/hcd.c
53467 @@ -5580,7 +5580,15 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
53468                 return ret;
53469         }
53471 -       dwc2_hcd_rem_wakeup(hsotg);
53472 +       if (rem_wakeup) {
53473 +               dwc2_hcd_rem_wakeup(hsotg);
53474 +               /*
53475 +                * Change "port_connect_status_change" flag to re-enumerate,
53476 +                * because after exit from hibernation port connection status
53477 +                * is not detected.
53478 +                */
53479 +               hsotg->flags.b.port_connect_status_change = 1;
53480 +       }
53482         hsotg->hibernated = 0;
53483         hsotg->bus_suspended = 0;
53484 diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
53485 index f2448d0a9d39..126f0e10b3ef 100644
53486 --- a/drivers/usb/dwc3/core.c
53487 +++ b/drivers/usb/dwc3/core.c
53488 @@ -114,6 +114,8 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
53489         dwc->current_dr_role = mode;
53492 +static int dwc3_core_soft_reset(struct dwc3 *dwc);
53494  static void __dwc3_set_mode(struct work_struct *work)
53496         struct dwc3 *dwc = work_to_dwc(work);
53497 @@ -121,6 +123,8 @@ static void __dwc3_set_mode(struct work_struct *work)
53498         int ret;
53499         u32 reg;
53501 +       mutex_lock(&dwc->mutex);
53503         pm_runtime_get_sync(dwc->dev);
53505         if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
53506 @@ -154,6 +158,25 @@ static void __dwc3_set_mode(struct work_struct *work)
53507                 break;
53508         }
53510 +       /* For DRD host or device mode only */
53511 +       if (dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG) {
53512 +               reg = dwc3_readl(dwc->regs, DWC3_GCTL);
53513 +               reg |= DWC3_GCTL_CORESOFTRESET;
53514 +               dwc3_writel(dwc->regs, DWC3_GCTL, reg);
53516 +               /*
53517 +                * Wait for internal clocks to synchronized. DWC_usb31 and
53518 +                * DWC_usb32 may need at least 50ms (less for DWC_usb3). To
53519 +                * keep it consistent across different IPs, let's wait up to
53520 +                * 100ms before clearing GCTL.CORESOFTRESET.
53521 +                */
53522 +               msleep(100);
53524 +               reg = dwc3_readl(dwc->regs, DWC3_GCTL);
53525 +               reg &= ~DWC3_GCTL_CORESOFTRESET;
53526 +               dwc3_writel(dwc->regs, DWC3_GCTL, reg);
53527 +       }
53529         spin_lock_irqsave(&dwc->lock, flags);
53531         dwc3_set_prtcap(dwc, dwc->desired_dr_role);
53532 @@ -178,6 +201,8 @@ static void __dwc3_set_mode(struct work_struct *work)
53533                 }
53534                 break;
53535         case DWC3_GCTL_PRTCAP_DEVICE:
53536 +               dwc3_core_soft_reset(dwc);
53538                 dwc3_event_buffers_setup(dwc);
53540                 if (dwc->usb2_phy)
53541 @@ -200,6 +225,7 @@ static void __dwc3_set_mode(struct work_struct *work)
53542  out:
53543         pm_runtime_mark_last_busy(dwc->dev);
53544         pm_runtime_put_autosuspend(dwc->dev);
53545 +       mutex_unlock(&dwc->mutex);
53548  void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
53549 @@ -1277,6 +1303,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
53550                                 "snps,usb3_lpm_capable");
53551         dwc->usb2_lpm_disable = device_property_read_bool(dev,
53552                                 "snps,usb2-lpm-disable");
53553 +       dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
53554 +                               "snps,usb2-gadget-lpm-disable");
53555         device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
53556                                 &rx_thr_num_pkt_prd);
53557         device_property_read_u8(dev, "snps,rx-max-burst-prd",
53558 @@ -1543,6 +1571,7 @@ static int dwc3_probe(struct platform_device *pdev)
53559         dwc3_cache_hwparams(dwc);
53561         spin_lock_init(&dwc->lock);
53562 +       mutex_init(&dwc->mutex);
53564         pm_runtime_set_active(dev);
53565         pm_runtime_use_autosuspend(dev);
53566 diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
53567 index 052b20d52651..453cfebd4d04 100644
53568 --- a/drivers/usb/dwc3/core.h
53569 +++ b/drivers/usb/dwc3/core.h
53570 @@ -13,6 +13,7 @@
53572  #include <linux/device.h>
53573  #include <linux/spinlock.h>
53574 +#include <linux/mutex.h>
53575  #include <linux/ioport.h>
53576  #include <linux/list.h>
53577  #include <linux/bitops.h>
53578 @@ -946,6 +947,7 @@ struct dwc3_scratchpad_array {
53579   * @scratch_addr: dma address of scratchbuf
53580   * @ep0_in_setup: one control transfer is completed and enter setup phase
53581   * @lock: for synchronizing
53582 + * @mutex: for mode switching
53583   * @dev: pointer to our struct device
53584   * @sysdev: pointer to the DMA-capable device
53585   * @xhci: pointer to our xHCI child
53586 @@ -1034,7 +1036,8 @@ struct dwc3_scratchpad_array {
53587   * @dis_start_transfer_quirk: set if start_transfer failure SW workaround is
53588   *                     not needed for DWC_usb31 version 1.70a-ea06 and below
53589   * @usb3_lpm_capable: set if hadrware supports Link Power Management
53590 - * @usb2_lpm_disable: set to disable usb2 lpm
53591 + * @usb2_lpm_disable: set to disable usb2 lpm for host
53592 + * @usb2_gadget_lpm_disable: set to disable usb2 lpm for gadget
53593   * @disable_scramble_quirk: set if we enable the disable scramble quirk
53594   * @u2exit_lfps_quirk: set if we enable u2exit lfps quirk
53595   * @u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk
53596 @@ -1085,6 +1088,9 @@ struct dwc3 {
53597         /* device lock */
53598         spinlock_t              lock;
53600 +       /* mode switching lock */
53601 +       struct mutex            mutex;
53603         struct device           *dev;
53604         struct device           *sysdev;
53606 @@ -1238,6 +1244,7 @@ struct dwc3 {
53607         unsigned                dis_start_transfer_quirk:1;
53608         unsigned                usb3_lpm_capable:1;
53609         unsigned                usb2_lpm_disable:1;
53610 +       unsigned                usb2_gadget_lpm_disable:1;
53612         unsigned                disable_scramble_quirk:1;
53613         unsigned                u2exit_lfps_quirk:1;
53614 diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
53615 index 75f0042b998b..84c1a4ac2444 100644
53616 --- a/drivers/usb/dwc3/dwc3-imx8mp.c
53617 +++ b/drivers/usb/dwc3/dwc3-imx8mp.c
53618 @@ -167,6 +167,7 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
53620         dwc3_np = of_get_child_by_name(node, "dwc3");
53621         if (!dwc3_np) {
53622 +               err = -ENODEV;
53623                 dev_err(dev, "failed to find dwc3 core child\n");
53624                 goto disable_rpm;
53625         }
53626 diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
53627 index 3db17806e92e..e196673f5c64 100644
53628 --- a/drivers/usb/dwc3/dwc3-omap.c
53629 +++ b/drivers/usb/dwc3/dwc3-omap.c
53630 @@ -437,8 +437,13 @@ static int dwc3_omap_extcon_register(struct dwc3_omap *omap)
53632                 if (extcon_get_state(edev, EXTCON_USB) == true)
53633                         dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
53634 +               else
53635 +                       dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
53637                 if (extcon_get_state(edev, EXTCON_USB_HOST) == true)
53638                         dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
53639 +               else
53640 +                       dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
53642                 omap->edev = edev;
53643         }
53644 diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
53645 index 4c5c6972124a..19789e94bbd0 100644
53646 --- a/drivers/usb/dwc3/dwc3-pci.c
53647 +++ b/drivers/usb/dwc3/dwc3-pci.c
53648 @@ -41,6 +41,7 @@
53649  #define PCI_DEVICE_ID_INTEL_TGPH               0x43ee
53650  #define PCI_DEVICE_ID_INTEL_JSP                        0x4dee
53651  #define PCI_DEVICE_ID_INTEL_ADLP               0x51ee
53652 +#define PCI_DEVICE_ID_INTEL_ADLM               0x54ee
53653  #define PCI_DEVICE_ID_INTEL_ADLS               0x7ae1
53654  #define PCI_DEVICE_ID_INTEL_TGL                        0x9a15
53656 @@ -122,6 +123,7 @@ static const struct property_entry dwc3_pci_mrfld_properties[] = {
53657         PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
53658         PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
53659         PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
53660 +       PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
53661         PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
53662         {}
53663  };
53664 @@ -388,6 +390,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
53665         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLP),
53666           (kernel_ulong_t) &dwc3_pci_intel_swnode, },
53668 +       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLM),
53669 +         (kernel_ulong_t) &dwc3_pci_intel_swnode, },
53671         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
53672           (kernel_ulong_t) &dwc3_pci_intel_swnode, },
53674 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
53675 index c7ef218e7a8c..8585b56d9f2d 100644
53676 --- a/drivers/usb/dwc3/gadget.c
53677 +++ b/drivers/usb/dwc3/gadget.c
53678 @@ -308,13 +308,12 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
53679         }
53681         if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
53682 -               int             needs_wakeup;
53683 +               int link_state;
53685 -               needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
53686 -                               dwc->link_state == DWC3_LINK_STATE_U2 ||
53687 -                               dwc->link_state == DWC3_LINK_STATE_U3);
53689 -               if (unlikely(needs_wakeup)) {
53690 +               link_state = dwc3_gadget_get_link_state(dwc);
53691 +               if (link_state == DWC3_LINK_STATE_U1 ||
53692 +                   link_state == DWC3_LINK_STATE_U2 ||
53693 +                   link_state == DWC3_LINK_STATE_U3) {
53694                         ret = __dwc3_gadget_wakeup(dwc);
53695                         dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
53696                                         ret);
53697 @@ -608,12 +607,14 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
53698                 u8 bInterval_m1;
53700                 /*
53701 -                * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it
53702 -                * must be set to 0 when the controller operates in full-speed.
53703 +                * Valid range for DEPCFG.bInterval_m1 is from 0 to 13.
53704 +                *
53705 +                * NOTE: The programming guide incorrectly stated bInterval_m1
53706 +                * must be set to 0 when operating in fullspeed. Internally the
53707 +                * controller does not have this limitation. See DWC_usb3x
53708 +                * programming guide section 3.2.2.1.
53709                  */
53710                 bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
53711 -               if (dwc->gadget->speed == USB_SPEED_FULL)
53712 -                       bInterval_m1 = 0;
53714                 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
53715                     dwc->gadget->speed == USB_SPEED_FULL)
53716 @@ -1675,7 +1676,9 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
53717                 }
53718         }
53720 -       return __dwc3_gadget_kick_transfer(dep);
53721 +       __dwc3_gadget_kick_transfer(dep);
53723 +       return 0;
53726  static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
53727 @@ -1973,6 +1976,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
53728         case DWC3_LINK_STATE_RESET:
53729         case DWC3_LINK_STATE_RX_DET:    /* in HS, means Early Suspend */
53730         case DWC3_LINK_STATE_U3:        /* in HS, means SUSPEND */
53731 +       case DWC3_LINK_STATE_U2:        /* in HS, means Sleep (L1) */
53732 +       case DWC3_LINK_STATE_U1:
53733         case DWC3_LINK_STATE_RESUME:
53734                 break;
53735         default:
53736 @@ -2299,6 +2304,10 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
53737         if (DWC3_VER_IS_PRIOR(DWC3, 250A))
53738                 reg |= DWC3_DEVTEN_ULSTCNGEN;
53740 +       /* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */
53741 +       if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
53742 +               reg |= DWC3_DEVTEN_EOPFEN;
53744         dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
53747 @@ -3322,6 +3331,15 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
53749         u32                     reg;
53751 +       /*
53752 +        * Ideally, dwc3_reset_gadget() would trigger the function
53753 +        * drivers to stop any active transfers through ep disable.
53754 +        * However, for functions which defer ep disable, such as mass
53755 +        * storage, we will need to rely on the call to stop active
53756 +        * transfers here, and avoid allowing of request queuing.
53757 +        */
53758 +       dwc->connected = false;
53760         /*
53761          * WORKAROUND: DWC3 revisions <1.88a have an issue which
53762          * would cause a missing Disconnect Event if there's a
53763 @@ -3460,6 +3478,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
53764         /* Enable USB2 LPM Capability */
53766         if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A) &&
53767 +           !dwc->usb2_gadget_lpm_disable &&
53768             (speed != DWC3_DSTS_SUPERSPEED) &&
53769             (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
53770                 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
53771 @@ -3486,6 +3505,12 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
53773                 dwc3_gadget_dctl_write_safe(dwc, reg);
53774         } else {
53775 +               if (dwc->usb2_gadget_lpm_disable) {
53776 +                       reg = dwc3_readl(dwc->regs, DWC3_DCFG);
53777 +                       reg &= ~DWC3_DCFG_LPM_CAP;
53778 +                       dwc3_writel(dwc->regs, DWC3_DCFG, reg);
53779 +               }
53781                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
53782                 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
53783                 dwc3_gadget_dctl_write_safe(dwc, reg);
53784 @@ -3934,7 +3959,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
53785         dwc->gadget->ssp_rate           = USB_SSP_GEN_UNKNOWN;
53786         dwc->gadget->sg_supported       = true;
53787         dwc->gadget->name               = "dwc3-gadget";
53788 -       dwc->gadget->lpm_capable        = true;
53789 +       dwc->gadget->lpm_capable        = !dwc->usb2_gadget_lpm_disable;
53791         /*
53792          * FIXME We might be setting max_speed to <SUPER, however versions
53793 @@ -4005,8 +4030,9 @@ int dwc3_gadget_init(struct dwc3 *dwc)
53795  void dwc3_gadget_exit(struct dwc3 *dwc)
53797 -       usb_del_gadget_udc(dwc->gadget);
53798 +       usb_del_gadget(dwc->gadget);
53799         dwc3_gadget_free_endpoints(dwc);
53800 +       usb_put_gadget(dwc->gadget);
53801         dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
53802                           dwc->bounce_addr);
53803         kfree(dwc->setup_buf);
53804 diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
53805 index 2d115353424c..8bb25773b61e 100644
53806 --- a/drivers/usb/gadget/config.c
53807 +++ b/drivers/usb/gadget/config.c
53808 @@ -194,9 +194,13 @@ EXPORT_SYMBOL_GPL(usb_assign_descriptors);
53809  void usb_free_all_descriptors(struct usb_function *f)
53811         usb_free_descriptors(f->fs_descriptors);
53812 +       f->fs_descriptors = NULL;
53813         usb_free_descriptors(f->hs_descriptors);
53814 +       f->hs_descriptors = NULL;
53815         usb_free_descriptors(f->ss_descriptors);
53816 +       f->ss_descriptors = NULL;
53817         usb_free_descriptors(f->ssp_descriptors);
53818 +       f->ssp_descriptors = NULL;
53820  EXPORT_SYMBOL_GPL(usb_free_all_descriptors);
53822 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
53823 index 801a8b668a35..10a5d9f0f2b9 100644
53824 --- a/drivers/usb/gadget/function/f_fs.c
53825 +++ b/drivers/usb/gadget/function/f_fs.c
53826 @@ -2640,6 +2640,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
53828         do { /* lang_count > 0 so we can use do-while */
53829                 unsigned needed = needed_count;
53830 +               u32 str_per_lang = str_count;
53832                 if (len < 3)
53833                         goto error_free;
53834 @@ -2675,7 +2676,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
53836                         data += length + 1;
53837                         len -= length + 1;
53838 -               } while (--str_count);
53839 +               } while (--str_per_lang);
53841                 s->id = 0;   /* terminator */
53842                 s->s = NULL;
53843 diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
53844 index 560382e0a8f3..e65f474ad7b3 100644
53845 --- a/drivers/usb/gadget/function/f_uac1.c
53846 +++ b/drivers/usb/gadget/function/f_uac1.c
53847 @@ -19,6 +19,9 @@
53848  #include "u_audio.h"
53849  #include "u_uac1.h"
53851 +/* UAC1 spec: 3.7.2.3 Audio Channel Cluster Format */
53852 +#define UAC1_CHANNEL_MASK 0x0FFF
53854  struct f_uac1 {
53855         struct g_audio g_audio;
53856         u8 ac_intf, as_in_intf, as_out_intf;
53857 @@ -30,6 +33,11 @@ static inline struct f_uac1 *func_to_uac1(struct usb_function *f)
53858         return container_of(f, struct f_uac1, g_audio.func);
53861 +static inline struct f_uac1_opts *g_audio_to_uac1_opts(struct g_audio *audio)
53863 +       return container_of(audio->func.fi, struct f_uac1_opts, func_inst);
53866  /*
53867   * DESCRIPTORS ... most are static, but strings and full
53868   * configuration descriptors are built on demand.
53869 @@ -505,11 +513,42 @@ static void f_audio_disable(struct usb_function *f)
53871  /*-------------------------------------------------------------------------*/
53873 +static int f_audio_validate_opts(struct g_audio *audio, struct device *dev)
53875 +       struct f_uac1_opts *opts = g_audio_to_uac1_opts(audio);
53877 +       if (!opts->p_chmask && !opts->c_chmask) {
53878 +               dev_err(dev, "Error: no playback and capture channels\n");
53879 +               return -EINVAL;
53880 +       } else if (opts->p_chmask & ~UAC1_CHANNEL_MASK) {
53881 +               dev_err(dev, "Error: unsupported playback channels mask\n");
53882 +               return -EINVAL;
53883 +       } else if (opts->c_chmask & ~UAC1_CHANNEL_MASK) {
53884 +               dev_err(dev, "Error: unsupported capture channels mask\n");
53885 +               return -EINVAL;
53886 +       } else if ((opts->p_ssize < 1) || (opts->p_ssize > 4)) {
53887 +               dev_err(dev, "Error: incorrect playback sample size\n");
53888 +               return -EINVAL;
53889 +       } else if ((opts->c_ssize < 1) || (opts->c_ssize > 4)) {
53890 +               dev_err(dev, "Error: incorrect capture sample size\n");
53891 +               return -EINVAL;
53892 +       } else if (!opts->p_srate) {
53893 +               dev_err(dev, "Error: incorrect playback sampling rate\n");
53894 +               return -EINVAL;
53895 +       } else if (!opts->c_srate) {
53896 +               dev_err(dev, "Error: incorrect capture sampling rate\n");
53897 +               return -EINVAL;
53898 +       }
53900 +       return 0;
53903  /* audio function driver setup/binding */
53904  static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
53906         struct usb_composite_dev        *cdev = c->cdev;
53907         struct usb_gadget               *gadget = cdev->gadget;
53908 +       struct device                   *dev = &gadget->dev;
53909         struct f_uac1                   *uac1 = func_to_uac1(f);
53910         struct g_audio                  *audio = func_to_g_audio(f);
53911         struct f_uac1_opts              *audio_opts;
53912 @@ -519,6 +558,10 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
53913         int                             rate;
53914         int                             status;
53916 +       status = f_audio_validate_opts(audio, dev);
53917 +       if (status)
53918 +               return status;
53920         audio_opts = container_of(f->fi, struct f_uac1_opts, func_inst);
53922         us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1));
53923 diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
53924 index 6f03e944e0e3..dd960cea642f 100644
53925 --- a/drivers/usb/gadget/function/f_uac2.c
53926 +++ b/drivers/usb/gadget/function/f_uac2.c
53927 @@ -14,6 +14,9 @@
53928  #include "u_audio.h"
53929  #include "u_uac2.h"
53931 +/* UAC2 spec: 4.1 Audio Channel Cluster Descriptor */
53932 +#define UAC2_CHANNEL_MASK 0x07FFFFFF
53934  /*
53935   * The driver implements a simple UAC_2 topology.
53936   * USB-OUT -> IT_1 -> OT_3 -> ALSA_Capture
53937 @@ -604,6 +607,36 @@ static void setup_descriptor(struct f_uac2_opts *opts)
53938         hs_audio_desc[i] = NULL;
53941 +static int afunc_validate_opts(struct g_audio *agdev, struct device *dev)
53943 +       struct f_uac2_opts *opts = g_audio_to_uac2_opts(agdev);
53945 +       if (!opts->p_chmask && !opts->c_chmask) {
53946 +               dev_err(dev, "Error: no playback and capture channels\n");
53947 +               return -EINVAL;
53948 +       } else if (opts->p_chmask & ~UAC2_CHANNEL_MASK) {
53949 +               dev_err(dev, "Error: unsupported playback channels mask\n");
53950 +               return -EINVAL;
53951 +       } else if (opts->c_chmask & ~UAC2_CHANNEL_MASK) {
53952 +               dev_err(dev, "Error: unsupported capture channels mask\n");
53953 +               return -EINVAL;
53954 +       } else if ((opts->p_ssize < 1) || (opts->p_ssize > 4)) {
53955 +               dev_err(dev, "Error: incorrect playback sample size\n");
53956 +               return -EINVAL;
53957 +       } else if ((opts->c_ssize < 1) || (opts->c_ssize > 4)) {
53958 +               dev_err(dev, "Error: incorrect capture sample size\n");
53959 +               return -EINVAL;
53960 +       } else if (!opts->p_srate) {
53961 +               dev_err(dev, "Error: incorrect playback sampling rate\n");
53962 +               return -EINVAL;
53963 +       } else if (!opts->c_srate) {
53964 +               dev_err(dev, "Error: incorrect capture sampling rate\n");
53965 +               return -EINVAL;
53966 +       }
53968 +       return 0;
53971  static int
53972  afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
53974 @@ -612,11 +645,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
53975         struct usb_composite_dev *cdev = cfg->cdev;
53976         struct usb_gadget *gadget = cdev->gadget;
53977         struct device *dev = &gadget->dev;
53978 -       struct f_uac2_opts *uac2_opts;
53979 +       struct f_uac2_opts *uac2_opts = g_audio_to_uac2_opts(agdev);
53980         struct usb_string *us;
53981         int ret;
53983 -       uac2_opts = container_of(fn->fi, struct f_uac2_opts, func_inst);
53984 +       ret = afunc_validate_opts(agdev, dev);
53985 +       if (ret)
53986 +               return ret;
53988         us = usb_gstrings_attach(cdev, fn_strings, ARRAY_SIZE(strings_fn));
53989         if (IS_ERR(us))
53990 diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
53991 index 44b4352a2676..f48a00e49794 100644
53992 --- a/drivers/usb/gadget/function/f_uvc.c
53993 +++ b/drivers/usb/gadget/function/f_uvc.c
53994 @@ -633,7 +633,12 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
53996         uvc_hs_streaming_ep.wMaxPacketSize =
53997                 cpu_to_le16(max_packet_size | ((max_packet_mult - 1) << 11));
53998 -       uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
54000 +       /* A high-bandwidth endpoint must specify a bInterval value of 1 */
54001 +       if (max_packet_mult > 1)
54002 +               uvc_hs_streaming_ep.bInterval = 1;
54003 +       else
54004 +               uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
54006         uvc_ss_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size);
54007         uvc_ss_streaming_ep.bInterval = opts->streaming_interval;
54008 @@ -817,6 +822,7 @@ static struct usb_function_instance *uvc_alloc_inst(void)
54009         pd->bmControls[0]               = 1;
54010         pd->bmControls[1]               = 0;
54011         pd->iProcessing                 = 0;
54012 +       pd->bmVideoStandards            = 0;
54014         od = &opts->uvc_output_terminal;
54015         od->bLength                     = UVC_DT_OUTPUT_TERMINAL_SIZE;
54016 diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c
54017 index a9f8eb8e1c76..2c9eab2b863d 100644
54018 --- a/drivers/usb/gadget/legacy/webcam.c
54019 +++ b/drivers/usb/gadget/legacy/webcam.c
54020 @@ -125,6 +125,7 @@ static const struct uvc_processing_unit_descriptor uvc_processing = {
54021         .bmControls[0]          = 1,
54022         .bmControls[1]          = 0,
54023         .iProcessing            = 0,
54024 +       .bmVideoStandards       = 0,
54025  };
54027  static const struct uvc_output_terminal_descriptor uvc_output_terminal = {
54028 diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c
54029 index be7bb64e3594..d11d3d14313f 100644
54030 --- a/drivers/usb/gadget/udc/aspeed-vhub/core.c
54031 +++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c
54032 @@ -36,6 +36,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
54033                    int status)
54035         bool internal = req->internal;
54036 +       struct ast_vhub *vhub = ep->vhub;
54038         EPVDBG(ep, "completing request @%p, status %d\n", req, status);
54040 @@ -46,7 +47,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
54042         if (req->req.dma) {
54043                 if (!WARN_ON(!ep->dev))
54044 -                       usb_gadget_unmap_request(&ep->dev->gadget,
54045 +                       usb_gadget_unmap_request_by_dev(&vhub->pdev->dev,
54046                                                  &req->req, ep->epn.is_in);
54047                 req->req.dma = 0;
54048         }
54049 diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
54050 index 02d8bfae58fb..cb164c615e6f 100644
54051 --- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
54052 +++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
54053 @@ -376,7 +376,7 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
54054         if (ep->epn.desc_mode ||
54055             ((((unsigned long)u_req->buf & 7) == 0) &&
54056              (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) {
54057 -               rc = usb_gadget_map_request(&ep->dev->gadget, u_req,
54058 +               rc = usb_gadget_map_request_by_dev(&vhub->pdev->dev, u_req,
54059                                             ep->epn.is_in);
54060                 if (rc) {
54061                         dev_warn(&vhub->pdev->dev,
54062 diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
54063 index 57067763b100..5f474ffe2be1 100644
54064 --- a/drivers/usb/gadget/udc/dummy_hcd.c
54065 +++ b/drivers/usb/gadget/udc/dummy_hcd.c
54066 @@ -903,6 +903,21 @@ static int dummy_pullup(struct usb_gadget *_gadget, int value)
54067         spin_lock_irqsave(&dum->lock, flags);
54068         dum->pullup = (value != 0);
54069         set_link_state(dum_hcd);
54070 +       if (value == 0) {
54071 +               /*
54072 +                * Emulate synchronize_irq(): wait for callbacks to finish.
54073 +                * This seems to be the best place to emulate the call to
54074 +                * synchronize_irq() that's in usb_gadget_remove_driver().
54075 +                * Doing it in dummy_udc_stop() would be too late since it
54076 +                * is called after the unbind callback and unbind shouldn't
54077 +                * be invoked until all the other callbacks are finished.
54078 +                */
54079 +               while (dum->callback_usage > 0) {
54080 +                       spin_unlock_irqrestore(&dum->lock, flags);
54081 +                       usleep_range(1000, 2000);
54082 +                       spin_lock_irqsave(&dum->lock, flags);
54083 +               }
54084 +       }
54085         spin_unlock_irqrestore(&dum->lock, flags);
54087         usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
54088 @@ -1004,14 +1019,6 @@ static int dummy_udc_stop(struct usb_gadget *g)
54089         spin_lock_irq(&dum->lock);
54090         dum->ints_enabled = 0;
54091         stop_activity(dum);
54093 -       /* emulate synchronize_irq(): wait for callbacks to finish */
54094 -       while (dum->callback_usage > 0) {
54095 -               spin_unlock_irq(&dum->lock);
54096 -               usleep_range(1000, 2000);
54097 -               spin_lock_irq(&dum->lock);
54098 -       }
54100         dum->driver = NULL;
54101         spin_unlock_irq(&dum->lock);
54103 diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
54104 index d6ca50f01985..75bf446f4a66 100644
54105 --- a/drivers/usb/gadget/udc/fotg210-udc.c
54106 +++ b/drivers/usb/gadget/udc/fotg210-udc.c
54107 @@ -338,15 +338,16 @@ static void fotg210_start_dma(struct fotg210_ep *ep,
54108                 } else {
54109                         buffer = req->req.buf + req->req.actual;
54110                         length = ioread32(ep->fotg210->reg +
54111 -                                       FOTG210_FIBCR(ep->epnum - 1));
54112 -                       length &= FIBCR_BCFX;
54113 +                                       FOTG210_FIBCR(ep->epnum - 1)) & FIBCR_BCFX;
54114 +                       if (length > req->req.length - req->req.actual)
54115 +                               length = req->req.length - req->req.actual;
54116                 }
54117         } else {
54118                 buffer = req->req.buf + req->req.actual;
54119                 if (req->req.length - req->req.actual > ep->ep.maxpacket)
54120                         length = ep->ep.maxpacket;
54121                 else
54122 -                       length = req->req.length;
54123 +                       length = req->req.length - req->req.actual;
54124         }
54126         d = dma_map_single(dev, buffer, length,
54127 @@ -379,8 +380,7 @@ static void fotg210_ep0_queue(struct fotg210_ep *ep,
54128         }
54129         if (ep->dir_in) { /* if IN */
54130                 fotg210_start_dma(ep, req);
54131 -               if ((req->req.length == req->req.actual) ||
54132 -                   (req->req.actual < ep->ep.maxpacket))
54133 +               if (req->req.length == req->req.actual)
54134                         fotg210_done(ep, req, 0);
54135         } else { /* OUT */
54136                 u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR0);
54137 @@ -820,7 +820,7 @@ static void fotg210_ep0in(struct fotg210_udc *fotg210)
54138                 if (req->req.length)
54139                         fotg210_start_dma(ep, req);
54141 -               if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
54142 +               if (req->req.actual == req->req.length)
54143                         fotg210_done(ep, req, 0);
54144         } else {
54145                 fotg210_set_cxdone(fotg210);
54146 @@ -849,12 +849,16 @@ static void fotg210_out_fifo_handler(struct fotg210_ep *ep)
54148         struct fotg210_request *req = list_entry(ep->queue.next,
54149                                                  struct fotg210_request, queue);
54150 +       int disgr1 = ioread32(ep->fotg210->reg + FOTG210_DISGR1);
54152         fotg210_start_dma(ep, req);
54154 -       /* finish out transfer */
54155 +       /* Complete the request when it's full or a short packet arrived.
54156 +        * Like other drivers, short_not_ok isn't handled.
54157 +        */
54159         if (req->req.length == req->req.actual ||
54160 -           req->req.actual < ep->ep.maxpacket)
54161 +           (disgr1 & DISGR1_SPK_INT(ep->epnum - 1)))
54162                 fotg210_done(ep, req, 0);
54165 @@ -1027,6 +1031,12 @@ static void fotg210_init(struct fotg210_udc *fotg210)
54166         value &= ~DMCR_GLINT_EN;
54167         iowrite32(value, fotg210->reg + FOTG210_DMCR);
54169 +       /* enable only grp2 irqs we handle */
54170 +       iowrite32(~(DISGR2_DMA_ERROR | DISGR2_RX0BYTE_INT | DISGR2_TX0BYTE_INT
54171 +                   | DISGR2_ISO_SEQ_ABORT_INT | DISGR2_ISO_SEQ_ERR_INT
54172 +                   | DISGR2_RESM_INT | DISGR2_SUSP_INT | DISGR2_USBRST_INT),
54173 +                 fotg210->reg + FOTG210_DMISGR2);
54175         /* disable all fifo interrupt */
54176         iowrite32(~(u32)0, fotg210->reg + FOTG210_DMISGR1);
54178 diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
54179 index a3c1fc924268..fd3656d0f760 100644
54180 --- a/drivers/usb/gadget/udc/pch_udc.c
54181 +++ b/drivers/usb/gadget/udc/pch_udc.c
54182 @@ -7,12 +7,14 @@
54183  #include <linux/module.h>
54184  #include <linux/pci.h>
54185  #include <linux/delay.h>
54186 +#include <linux/dmi.h>
54187  #include <linux/errno.h>
54188 +#include <linux/gpio/consumer.h>
54189 +#include <linux/gpio/machine.h>
54190  #include <linux/list.h>
54191  #include <linux/interrupt.h>
54192  #include <linux/usb/ch9.h>
54193  #include <linux/usb/gadget.h>
54194 -#include <linux/gpio/consumer.h>
54195  #include <linux/irq.h>
54197  #define PCH_VBUS_PERIOD                3000    /* VBUS polling period (msec) */
54198 @@ -596,18 +598,22 @@ static void pch_udc_reconnect(struct pch_udc_dev *dev)
54199  static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
54200                                           int is_active)
54202 +       unsigned long           iflags;
54204 +       spin_lock_irqsave(&dev->lock, iflags);
54205         if (is_active) {
54206                 pch_udc_reconnect(dev);
54207                 dev->vbus_session = 1;
54208         } else {
54209                 if (dev->driver && dev->driver->disconnect) {
54210 -                       spin_lock(&dev->lock);
54211 +                       spin_unlock_irqrestore(&dev->lock, iflags);
54212                         dev->driver->disconnect(&dev->gadget);
54213 -                       spin_unlock(&dev->lock);
54214 +                       spin_lock_irqsave(&dev->lock, iflags);
54215                 }
54216                 pch_udc_set_disconnect(dev);
54217                 dev->vbus_session = 0;
54218         }
54219 +       spin_unlock_irqrestore(&dev->lock, iflags);
54222  /**
54223 @@ -1166,20 +1172,25 @@ static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
54224  static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
54226         struct pch_udc_dev      *dev;
54227 +       unsigned long           iflags;
54229         if (!gadget)
54230                 return -EINVAL;
54232         dev = container_of(gadget, struct pch_udc_dev, gadget);
54234 +       spin_lock_irqsave(&dev->lock, iflags);
54235         if (is_on) {
54236                 pch_udc_reconnect(dev);
54237         } else {
54238                 if (dev->driver && dev->driver->disconnect) {
54239 -                       spin_lock(&dev->lock);
54240 +                       spin_unlock_irqrestore(&dev->lock, iflags);
54241                         dev->driver->disconnect(&dev->gadget);
54242 -                       spin_unlock(&dev->lock);
54243 +                       spin_lock_irqsave(&dev->lock, iflags);
54244                 }
54245                 pch_udc_set_disconnect(dev);
54246         }
54247 +       spin_unlock_irqrestore(&dev->lock, iflags);
54249         return 0;
54251 @@ -1350,6 +1361,43 @@ static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
54252         return IRQ_HANDLED;
54255 +static struct gpiod_lookup_table minnowboard_udc_gpios = {
54256 +       .dev_id         = "0000:02:02.4",
54257 +       .table          = {
54258 +               GPIO_LOOKUP("sch_gpio.33158", 12, NULL, GPIO_ACTIVE_HIGH),
54259 +               {}
54260 +       },
54263 +static const struct dmi_system_id pch_udc_gpio_dmi_table[] = {
54264 +       {
54265 +               .ident = "MinnowBoard",
54266 +               .matches = {
54267 +                       DMI_MATCH(DMI_BOARD_NAME, "MinnowBoard"),
54268 +               },
54269 +               .driver_data = &minnowboard_udc_gpios,
54270 +       },
54271 +       { }
54274 +static void pch_vbus_gpio_remove_table(void *table)
54276 +       gpiod_remove_lookup_table(table);
54279 +static int pch_vbus_gpio_add_table(struct pch_udc_dev *dev)
54281 +       struct device *d = &dev->pdev->dev;
54282 +       const struct dmi_system_id *dmi;
54284 +       dmi = dmi_first_match(pch_udc_gpio_dmi_table);
54285 +       if (!dmi)
54286 +               return 0;
54288 +       gpiod_add_lookup_table(dmi->driver_data);
54289 +       return devm_add_action_or_reset(d, pch_vbus_gpio_remove_table, dmi->driver_data);
54292  /**
54293   * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
54294   * @dev:               Reference to the driver structure
54295 @@ -1360,6 +1408,7 @@ static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
54296   */
54297  static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
54299 +       struct device *d = &dev->pdev->dev;
54300         int err;
54301         int irq_num = 0;
54302         struct gpio_desc *gpiod;
54303 @@ -1367,8 +1416,12 @@ static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
54304         dev->vbus_gpio.port = NULL;
54305         dev->vbus_gpio.intr = 0;
54307 +       err = pch_vbus_gpio_add_table(dev);
54308 +       if (err)
54309 +               return err;
54311         /* Retrieve the GPIO line from the USB gadget device */
54312 -       gpiod = devm_gpiod_get(dev->gadget.dev.parent, NULL, GPIOD_IN);
54313 +       gpiod = devm_gpiod_get_optional(d, NULL, GPIOD_IN);
54314         if (IS_ERR(gpiod))
54315                 return PTR_ERR(gpiod);
54316         gpiod_set_consumer_name(gpiod, "pch_vbus");
54317 @@ -1756,7 +1809,7 @@ static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
54318         }
54319         /* prevent from using desc. - set HOST BUSY */
54320         dma_desc->status |= PCH_UDC_BS_HST_BSY;
54321 -       dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
54322 +       dma_desc->dataptr = lower_32_bits(DMA_ADDR_INVALID);
54323         req->td_data = dma_desc;
54324         req->td_data_last = dma_desc;
54325         req->chain_len = 1;
54326 @@ -2298,6 +2351,21 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
54327                 pch_udc_set_dma(dev, DMA_DIR_RX);
54330 +static int pch_udc_gadget_setup(struct pch_udc_dev *dev)
54331 +       __must_hold(&dev->lock)
54333 +       int rc;
54335 +       /* In some cases we can get an interrupt before driver gets setup */
54336 +       if (!dev->driver)
54337 +               return -ESHUTDOWN;
54339 +       spin_unlock(&dev->lock);
54340 +       rc = dev->driver->setup(&dev->gadget, &dev->setup_data);
54341 +       spin_lock(&dev->lock);
54342 +       return rc;
54345  /**
54346   * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
54347   * @dev:       Reference to the device structure
54348 @@ -2369,15 +2437,12 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
54349                         dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
54350                 else /* OUT */
54351                         dev->gadget.ep0 = &ep->ep;
54352 -               spin_lock(&dev->lock);
54353                 /* If Mass storage Reset */
54354                 if ((dev->setup_data.bRequestType == 0x21) &&
54355                     (dev->setup_data.bRequest == 0xFF))
54356                         dev->prot_stall = 0;
54357                 /* call gadget with setup data received */
54358 -               setup_supported = dev->driver->setup(&dev->gadget,
54359 -                                                    &dev->setup_data);
54360 -               spin_unlock(&dev->lock);
54361 +               setup_supported = pch_udc_gadget_setup(dev);
54363                 if (dev->setup_data.bRequestType & USB_DIR_IN) {
54364                         ep->td_data->status = (ep->td_data->status &
54365 @@ -2625,9 +2690,7 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
54366                 dev->ep[i].halted = 0;
54367         }
54368         dev->stall = 0;
54369 -       spin_unlock(&dev->lock);
54370 -       dev->driver->setup(&dev->gadget, &dev->setup_data);
54371 -       spin_lock(&dev->lock);
54372 +       pch_udc_gadget_setup(dev);
54375  /**
54376 @@ -2662,9 +2725,7 @@ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
54377         dev->stall = 0;
54379         /* call gadget zero with setup data received */
54380 -       spin_unlock(&dev->lock);
54381 -       dev->driver->setup(&dev->gadget, &dev->setup_data);
54382 -       spin_lock(&dev->lock);
54383 +       pch_udc_gadget_setup(dev);
54386  /**
54387 @@ -2870,14 +2931,20 @@ static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
54388   * @dev:       Reference to the driver structure
54389   *
54390   * Return codes:
54391 - *     0: Success
54392 + *     0:              Success
54393 + *     -%ERRNO:        All kind of errors when retrieving VBUS GPIO
54394   */
54395  static int pch_udc_pcd_init(struct pch_udc_dev *dev)
54397 +       int ret;
54399         pch_udc_init(dev);
54400         pch_udc_pcd_reinit(dev);
54401 -       pch_vbus_gpio_init(dev);
54402 -       return 0;
54404 +       ret = pch_vbus_gpio_init(dev);
54405 +       if (ret)
54406 +               pch_udc_exit(dev);
54407 +       return ret;
54410  /**
54411 @@ -2938,7 +3005,7 @@ static int init_dma_pools(struct pch_udc_dev *dev)
54412         dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
54413                                        UDC_EP0OUT_BUFF_SIZE * 4,
54414                                        DMA_FROM_DEVICE);
54415 -       return 0;
54416 +       return dma_mapping_error(&dev->pdev->dev, dev->dma_addr);
54419  static int pch_udc_start(struct usb_gadget *g,
54420 @@ -3063,6 +3130,7 @@ static int pch_udc_probe(struct pci_dev *pdev,
54421         if (retval)
54422                 return retval;
54424 +       dev->pdev = pdev;
54425         pci_set_drvdata(pdev, dev);
54427         /* Determine BAR based on PCI ID */
54428 @@ -3078,16 +3146,10 @@ static int pch_udc_probe(struct pci_dev *pdev,
54430         dev->base_addr = pcim_iomap_table(pdev)[bar];
54432 -       /*
54433 -        * FIXME: add a GPIO descriptor table to pdev.dev using
54434 -        * gpiod_add_descriptor_table() from <linux/gpio/machine.h> based on
54435 -        * the PCI subsystem ID. The system-dependent GPIO is necessary for
54436 -        * VBUS operation.
54437 -        */
54439         /* initialize the hardware */
54440 -       if (pch_udc_pcd_init(dev))
54441 -               return -ENODEV;
54442 +       retval = pch_udc_pcd_init(dev);
54443 +       if (retval)
54444 +               return retval;
54446         pci_enable_msi(pdev);
54448 @@ -3104,7 +3166,6 @@ static int pch_udc_probe(struct pci_dev *pdev,
54450         /* device struct setup */
54451         spin_lock_init(&dev->lock);
54452 -       dev->pdev = pdev;
54453         dev->gadget.ops = &pch_udc_ops;
54455         retval = init_dma_pools(dev);
54456 diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
54457 index 896c1a016d55..65cae4883454 100644
54458 --- a/drivers/usb/gadget/udc/r8a66597-udc.c
54459 +++ b/drivers/usb/gadget/udc/r8a66597-udc.c
54460 @@ -1849,6 +1849,8 @@ static int r8a66597_probe(struct platform_device *pdev)
54461                 return PTR_ERR(reg);
54463         ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
54464 +       if (!ires)
54465 +               return -EINVAL;
54466         irq = ires->start;
54467         irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
54469 diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
54470 index 1d3ebb07ccd4..b154b62abefa 100644
54471 --- a/drivers/usb/gadget/udc/s3c2410_udc.c
54472 +++ b/drivers/usb/gadget/udc/s3c2410_udc.c
54473 @@ -54,8 +54,6 @@ static struct clk             *udc_clock;
54474  static struct clk              *usb_bus_clock;
54475  static void __iomem            *base_addr;
54476  static int                     irq_usbd;
54477 -static u64                     rsrc_start;
54478 -static u64                     rsrc_len;
54479  static struct dentry           *s3c2410_udc_debugfs_root;
54481  static inline u32 udc_read(u32 reg)
54482 @@ -1752,7 +1750,8 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
54483         udc_clock = clk_get(NULL, "usb-device");
54484         if (IS_ERR(udc_clock)) {
54485                 dev_err(dev, "failed to get udc clock source\n");
54486 -               return PTR_ERR(udc_clock);
54487 +               retval = PTR_ERR(udc_clock);
54488 +               goto err_usb_bus_clk;
54489         }
54491         clk_prepare_enable(udc_clock);
54492 @@ -1775,7 +1774,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
54493         base_addr = devm_platform_ioremap_resource(pdev, 0);
54494         if (IS_ERR(base_addr)) {
54495                 retval = PTR_ERR(base_addr);
54496 -               goto err_mem;
54497 +               goto err_udc_clk;
54498         }
54500         the_controller = udc;
54501 @@ -1793,7 +1792,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
54502         if (retval != 0) {
54503                 dev_err(dev, "cannot get irq %i, err %d\n", irq_usbd, retval);
54504                 retval = -EBUSY;
54505 -               goto err_map;
54506 +               goto err_udc_clk;
54507         }
54509         dev_dbg(dev, "got irq %i\n", irq_usbd);
54510 @@ -1864,10 +1863,14 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
54511                 gpio_free(udc_info->vbus_pin);
54512  err_int:
54513         free_irq(irq_usbd, udc);
54514 -err_map:
54515 -       iounmap(base_addr);
54516 -err_mem:
54517 -       release_mem_region(rsrc_start, rsrc_len);
54518 +err_udc_clk:
54519 +       clk_disable_unprepare(udc_clock);
54520 +       clk_put(udc_clock);
54521 +       udc_clock = NULL;
54522 +err_usb_bus_clk:
54523 +       clk_disable_unprepare(usb_bus_clock);
54524 +       clk_put(usb_bus_clock);
54525 +       usb_bus_clock = NULL;
54527         return retval;
54529 @@ -1899,9 +1902,6 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
54531         free_irq(irq_usbd, udc);
54533 -       iounmap(base_addr);
54534 -       release_mem_region(rsrc_start, rsrc_len);
54536         if (!IS_ERR(udc_clock) && udc_clock != NULL) {
54537                 clk_disable_unprepare(udc_clock);
54538                 clk_put(udc_clock);
54539 diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
54540 index 32f1d3e90c26..99805d60a7ab 100644
54541 --- a/drivers/usb/gadget/udc/snps_udc_plat.c
54542 +++ b/drivers/usb/gadget/udc/snps_udc_plat.c
54543 @@ -114,8 +114,8 @@ static int udc_plat_probe(struct platform_device *pdev)
54545         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
54546         udc->virt_addr = devm_ioremap_resource(dev, res);
54547 -       if (IS_ERR(udc->regs))
54548 -               return PTR_ERR(udc->regs);
54549 +       if (IS_ERR(udc->virt_addr))
54550 +               return PTR_ERR(udc->virt_addr);
54552         /* udc csr registers base */
54553         udc->csr = udc->virt_addr + UDC_CSR_ADDR;
54554 diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
54555 index 580bef8eb4cb..2319c9737c2b 100644
54556 --- a/drivers/usb/gadget/udc/tegra-xudc.c
54557 +++ b/drivers/usb/gadget/udc/tegra-xudc.c
54558 @@ -3883,7 +3883,7 @@ static int tegra_xudc_remove(struct platform_device *pdev)
54560         pm_runtime_get_sync(xudc->dev);
54562 -       cancel_delayed_work(&xudc->plc_reset_work);
54563 +       cancel_delayed_work_sync(&xudc->plc_reset_work);
54564         cancel_work_sync(&xudc->usb_role_sw_work);
54566         usb_del_gadget_udc(&xudc->gadget);
54567 diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
54568 index b94f2a070c05..df9428f1dc5e 100644
54569 --- a/drivers/usb/host/Kconfig
54570 +++ b/drivers/usb/host/Kconfig
54571 @@ -272,6 +272,7 @@ config USB_EHCI_TEGRA
54572         select USB_CHIPIDEA
54573         select USB_CHIPIDEA_HOST
54574         select USB_CHIPIDEA_TEGRA
54575 +       select USB_GADGET
54576         help
54577           This option is deprecated now and the driver was removed, use
54578           USB_CHIPIDEA_TEGRA instead.
54579 diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
54580 index 5617ef30530a..f0e4a315cc81 100644
54581 --- a/drivers/usb/host/fotg210-hcd.c
54582 +++ b/drivers/usb/host/fotg210-hcd.c
54583 @@ -5568,7 +5568,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
54584         struct usb_hcd *hcd;
54585         struct resource *res;
54586         int irq;
54587 -       int retval = -ENODEV;
54588 +       int retval;
54589         struct fotg210_hcd *fotg210;
54591         if (usb_disabled())
54592 @@ -5588,7 +5588,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
54593         hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
54594                         dev_name(dev));
54595         if (!hcd) {
54596 -               dev_err(dev, "failed to create hcd with err %d\n", retval);
54597 +               dev_err(dev, "failed to create hcd\n");
54598                 retval = -ENOMEM;
54599                 goto fail_create_hcd;
54600         }
54601 diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
54602 index fa59b242cd51..e8af0a125f84 100644
54603 --- a/drivers/usb/host/xhci-ext-caps.h
54604 +++ b/drivers/usb/host/xhci-ext-caps.h
54605 @@ -7,8 +7,9 @@
54606   * Author: Sarah Sharp
54607   * Some code borrowed from the Linux EHCI driver.
54608   */
54609 -/* Up to 16 ms to halt an HC */
54610 -#define XHCI_MAX_HALT_USEC     (16*1000)
54612 +/* HC should halt within 16 ms, but use 32 ms as some hosts take longer */
54613 +#define XHCI_MAX_HALT_USEC     (32 * 1000)
54614  /* HC not running - set to 1 when run/stop bit is cleared. */
54615  #define XHCI_STS_HALT          (1<<0)
54617 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
54618 index f2c4ee7c4786..717c122f9449 100644
54619 --- a/drivers/usb/host/xhci-mem.c
54620 +++ b/drivers/usb/host/xhci-mem.c
54621 @@ -2129,6 +2129,15 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
54623         if (major_revision == 0x03) {
54624                 rhub = &xhci->usb3_rhub;
54625 +               /*
54626 +                * Some hosts incorrectly use sub-minor version for minor
54627 +                * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01
54628 +                * for bcdUSB 0x310). Since there is no USB release with sub
54629 +                * minor version 0x301 to 0x309, we can assume that they are
54630 +                * incorrect and fix it here.
54631 +                */
54632 +               if (minor_revision > 0x00 && minor_revision < 0x10)
54633 +                       minor_revision <<= 4;
54634         } else if (major_revision <= 0x02) {
54635                 rhub = &xhci->usb2_rhub;
54636         } else {
54637 @@ -2240,6 +2249,9 @@ static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
54638                 return;
54639         rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
54640                         flags, dev_to_node(dev));
54641 +       if (!rhub->ports)
54642 +               return;
54644         for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
54645                 if (xhci->hw_ports[i].rhub != rhub ||
54646                     xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
54647 diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
54648 index b45e5bf08997..8950d1f10a7f 100644
54649 --- a/drivers/usb/host/xhci-mtk-sch.c
54650 +++ b/drivers/usb/host/xhci-mtk-sch.c
54651 @@ -378,6 +378,31 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
54652         sch_ep->allocated = used;
54655 +static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
54657 +       struct mu3h_sch_tt *tt = sch_ep->sch_tt;
54658 +       u32 num_esit, tmp;
54659 +       int base;
54660 +       int i, j;
54662 +       num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
54663 +       for (i = 0; i < num_esit; i++) {
54664 +               base = offset + i * sch_ep->esit;
54666 +               /*
54667 +                * Compared with hs bus, no matter what ep type,
54668 +                * the hub will always delay one uframe to send data
54669 +                */
54670 +               for (j = 0; j < sch_ep->cs_count; j++) {
54671 +                       tmp = tt->fs_bus_bw[base + j] + sch_ep->bw_cost_per_microframe;
54672 +                       if (tmp > FS_PAYLOAD_MAX)
54673 +                               return -ERANGE;
54674 +               }
54675 +       }
54677 +       return 0;
54680  static int check_sch_tt(struct usb_device *udev,
54681         struct mu3h_sch_ep_info *sch_ep, u32 offset)
54683 @@ -402,7 +427,7 @@ static int check_sch_tt(struct usb_device *udev,
54684                         return -ERANGE;
54686                 for (i = 0; i < sch_ep->cs_count; i++)
54687 -                       if (test_bit(offset + i, tt->split_bit_map))
54688 +                       if (test_bit(offset + i, tt->ss_bit_map))
54689                                 return -ERANGE;
54691         } else {
54692 @@ -432,7 +457,7 @@ static int check_sch_tt(struct usb_device *udev,
54693                         cs_count = 7; /* HW limit */
54695                 for (i = 0; i < cs_count + 2; i++) {
54696 -                       if (test_bit(offset + i, tt->split_bit_map))
54697 +                       if (test_bit(offset + i, tt->ss_bit_map))
54698                                 return -ERANGE;
54699                 }
54701 @@ -448,24 +473,44 @@ static int check_sch_tt(struct usb_device *udev,
54702                         sch_ep->num_budget_microframes = sch_ep->esit;
54703         }
54705 -       return 0;
54706 +       return check_fs_bus_bw(sch_ep, offset);
54709  static void update_sch_tt(struct usb_device *udev,
54710 -       struct mu3h_sch_ep_info *sch_ep)
54711 +       struct mu3h_sch_ep_info *sch_ep, bool used)
54713         struct mu3h_sch_tt *tt = sch_ep->sch_tt;
54714         u32 base, num_esit;
54715 +       int bw_updated;
54716 +       int bits;
54717         int i, j;
54719         num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
54720 +       bits = (sch_ep->ep_type == ISOC_OUT_EP) ? sch_ep->cs_count : 1;
54722 +       if (used)
54723 +               bw_updated = sch_ep->bw_cost_per_microframe;
54724 +       else
54725 +               bw_updated = -sch_ep->bw_cost_per_microframe;
54727         for (i = 0; i < num_esit; i++) {
54728                 base = sch_ep->offset + i * sch_ep->esit;
54729 -               for (j = 0; j < sch_ep->num_budget_microframes; j++)
54730 -                       set_bit(base + j, tt->split_bit_map);
54732 +               for (j = 0; j < bits; j++) {
54733 +                       if (used)
54734 +                               set_bit(base + j, tt->ss_bit_map);
54735 +                       else
54736 +                               clear_bit(base + j, tt->ss_bit_map);
54737 +               }
54739 +               for (j = 0; j < sch_ep->cs_count; j++)
54740 +                       tt->fs_bus_bw[base + j] += bw_updated;
54741         }
54743 -       list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
54744 +       if (used)
54745 +               list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
54746 +       else
54747 +               list_del(&sch_ep->tt_endpoint);
54750  static int check_sch_bw(struct usb_device *udev,
54751 @@ -535,7 +580,7 @@ static int check_sch_bw(struct usb_device *udev,
54752                 if (!tt_offset_ok)
54753                         return -ERANGE;
54755 -               update_sch_tt(udev, sch_ep);
54756 +               update_sch_tt(udev, sch_ep, 1);
54757         }
54759         /* update bus bandwidth info */
54760 @@ -548,15 +593,16 @@ static void destroy_sch_ep(struct usb_device *udev,
54761         struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
54763         /* only release ep bw check passed by check_sch_bw() */
54764 -       if (sch_ep->allocated)
54765 +       if (sch_ep->allocated) {
54766                 update_bus_bw(sch_bw, sch_ep, 0);
54767 +               if (sch_ep->sch_tt)
54768 +                       update_sch_tt(udev, sch_ep, 0);
54769 +       }
54771 -       list_del(&sch_ep->endpoint);
54773 -       if (sch_ep->sch_tt) {
54774 -               list_del(&sch_ep->tt_endpoint);
54775 +       if (sch_ep->sch_tt)
54776                 drop_tt(udev);
54777 -       }
54779 +       list_del(&sch_ep->endpoint);
54780         kfree(sch_ep);
54783 @@ -643,7 +689,7 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
54784                  */
54785                 if (usb_endpoint_xfer_int(&ep->desc)
54786                         || usb_endpoint_xfer_isoc(&ep->desc))
54787 -                       ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(1));
54788 +                       ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(1));
54790                 return 0;
54791         }
54792 @@ -730,10 +776,10 @@ int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
54793                 list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
54795                 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
54796 -               ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
54797 +               ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(sch_ep->pkts)
54798                         | EP_BCSCOUNT(sch_ep->cs_count)
54799                         | EP_BBM(sch_ep->burst_mode));
54800 -               ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
54801 +               ep_ctx->reserved[1] = cpu_to_le32(EP_BOFFSET(sch_ep->offset)
54802                         | EP_BREPEAT(sch_ep->repeat));
54804                 xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
54805 diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
54806 index 2f27dc0d9c6b..1c331577fca9 100644
54807 --- a/drivers/usb/host/xhci-mtk.c
54808 +++ b/drivers/usb/host/xhci-mtk.c
54809 @@ -397,6 +397,8 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
54810         xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
54811         if (mtk->lpm_support)
54812                 xhci->quirks |= XHCI_LPM_SUPPORT;
54813 +       if (mtk->u2_lpm_disable)
54814 +               xhci->quirks |= XHCI_HW_LPM_DISABLE;
54816         /*
54817          * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream,
54818 @@ -469,6 +471,7 @@ static int xhci_mtk_probe(struct platform_device *pdev)
54819                 return ret;
54821         mtk->lpm_support = of_property_read_bool(node, "usb3-lpm-capable");
54822 +       mtk->u2_lpm_disable = of_property_read_bool(node, "usb2-lpm-disable");
54823         /* optional property, ignore the error if it does not exist */
54824         of_property_read_u32(node, "mediatek,u3p-dis-msk",
54825                              &mtk->u3p_dis_msk);
54826 diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
54827 index cbb09dfea62e..2fc0568ba054 100644
54828 --- a/drivers/usb/host/xhci-mtk.h
54829 +++ b/drivers/usb/host/xhci-mtk.h
54830 @@ -20,13 +20,15 @@
54831  #define XHCI_MTK_MAX_ESIT      64
54833  /**
54834 - * @split_bit_map: used to avoid split microframes overlay
54835 + * @ss_bit_map: used to avoid start split microframes overlay
54836 + * @fs_bus_bw: array to keep track of bandwidth already used for FS
54837   * @ep_list: Endpoints using this TT
54838   * @usb_tt: usb TT related
54839   * @tt_port: TT port number
54840   */
54841  struct mu3h_sch_tt {
54842 -       DECLARE_BITMAP(split_bit_map, XHCI_MTK_MAX_ESIT);
54843 +       DECLARE_BITMAP(ss_bit_map, XHCI_MTK_MAX_ESIT);
54844 +       u32 fs_bus_bw[XHCI_MTK_MAX_ESIT];
54845         struct list_head ep_list;
54846         struct usb_tt *usb_tt;
54847         int tt_port;
54848 @@ -150,6 +152,7 @@ struct xhci_hcd_mtk {
54849         struct phy **phys;
54850         int num_phys;
54851         bool lpm_support;
54852 +       bool u2_lpm_disable;
54853         /* usb remote wakeup */
54854         bool uwk_en;
54855         struct regmap *uwk;
54856 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
54857 index 5bbccc9a0179..7bc18cf8042c 100644
54858 --- a/drivers/usb/host/xhci-pci.c
54859 +++ b/drivers/usb/host/xhci-pci.c
54860 @@ -57,6 +57,7 @@
54861  #define PCI_DEVICE_ID_INTEL_CML_XHCI                   0xa3af
54862  #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI            0x9a13
54863  #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI           0x1138
54864 +#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI            0x461e
54866  #define PCI_DEVICE_ID_AMD_PROMONTORYA_4                        0x43b9
54867  #define PCI_DEVICE_ID_AMD_PROMONTORYA_3                        0x43ba
54868 @@ -166,8 +167,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
54869             (pdev->device == 0x15e0 || pdev->device == 0x15e1))
54870                 xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
54872 -       if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5)
54873 +       if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5) {
54874                 xhci->quirks |= XHCI_DISABLE_SPARSE;
54875 +               xhci->quirks |= XHCI_RESET_ON_RESUME;
54876 +       }
54878         if (pdev->vendor == PCI_VENDOR_ID_AMD)
54879                 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
54880 @@ -243,7 +246,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
54881              pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
54882              pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
54883              pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
54884 -            pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI))
54885 +            pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
54886 +            pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI))
54887                 xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
54889         if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
54890 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
54891 index ce38076901e2..6cdea0d00d19 100644
54892 --- a/drivers/usb/host/xhci-ring.c
54893 +++ b/drivers/usb/host/xhci-ring.c
54894 @@ -863,7 +863,7 @@ static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
54895         return ret;
54898 -static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
54899 +static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
54900                                 struct xhci_virt_ep *ep, unsigned int stream_id,
54901                                 struct xhci_td *td,
54902                                 enum xhci_ep_reset_type reset_type)
54903 @@ -876,7 +876,7 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
54904          * Device will be reset soon to recover the link so don't do anything
54905          */
54906         if (ep->vdev->flags & VDEV_PORT_ERROR)
54907 -               return;
54908 +               return -ENODEV;
54910         /* add td to cancelled list and let reset ep handler take care of it */
54911         if (reset_type == EP_HARD_RESET) {
54912 @@ -889,16 +889,18 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
54914         if (ep->ep_state & EP_HALTED) {
54915                 xhci_dbg(xhci, "Reset ep command already pending\n");
54916 -               return;
54917 +               return 0;
54918         }
54920         err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
54921         if (err)
54922 -               return;
54923 +               return err;
54925         ep->ep_state |= EP_HALTED;
54927         xhci_ring_cmd_db(xhci);
54929 +       return 0;
54932  /*
54933 @@ -1015,6 +1017,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
54934         struct xhci_td *td = NULL;
54935         enum xhci_ep_reset_type reset_type;
54936         struct xhci_command *command;
54937 +       int err;
54939         if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
54940                 if (!xhci->devs[slot_id])
54941 @@ -1059,7 +1062,10 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
54942                                         td->status = -EPROTO;
54943                         }
54944                         /* reset ep, reset handler cleans up cancelled tds */
54945 -                       xhci_handle_halted_endpoint(xhci, ep, 0, td, reset_type);
54946 +                       err = xhci_handle_halted_endpoint(xhci, ep, 0, td,
54947 +                                                         reset_type);
54948 +                       if (err)
54949 +                               break;
54950                         xhci_stop_watchdog_timer_in_irq(xhci, ep);
54951                         return;
54952                 case EP_STATE_RUNNING:
54953 @@ -2129,16 +2135,13 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
54954         return 0;
54957 -static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
54958 -       struct xhci_transfer_event *event, struct xhci_virt_ep *ep)
54959 +static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
54960 +                    struct xhci_ring *ep_ring, struct xhci_td *td,
54961 +                    u32 trb_comp_code)
54963         struct xhci_ep_ctx *ep_ctx;
54964 -       struct xhci_ring *ep_ring;
54965 -       u32 trb_comp_code;
54967 -       ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
54968         ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
54969 -       trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
54971         switch (trb_comp_code) {
54972         case COMP_STOPPED_LENGTH_INVALID:
54973 @@ -2234,9 +2237,9 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
54974  /*
54975   * Process control tds, update urb status and actual_length.
54976   */
54977 -static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
54978 -       union xhci_trb *ep_trb, struct xhci_transfer_event *event,
54979 -       struct xhci_virt_ep *ep)
54980 +static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
54981 +               struct xhci_ring *ep_ring,  struct xhci_td *td,
54982 +                          union xhci_trb *ep_trb, struct xhci_transfer_event *event)
54984         struct xhci_ep_ctx *ep_ctx;
54985         u32 trb_comp_code;
54986 @@ -2324,15 +2327,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
54987                 td->urb->actual_length = requested;
54989  finish_td:
54990 -       return finish_td(xhci, td, event, ep);
54991 +       return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
54994  /*
54995   * Process isochronous tds, update urb packet status and actual_length.
54996   */
54997 -static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
54998 -       union xhci_trb *ep_trb, struct xhci_transfer_event *event,
54999 -       struct xhci_virt_ep *ep)
55000 +static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
55001 +               struct xhci_ring *ep_ring, struct xhci_td *td,
55002 +               union xhci_trb *ep_trb, struct xhci_transfer_event *event)
55004         struct urb_priv *urb_priv;
55005         int idx;
55006 @@ -2409,7 +2412,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
55008         td->urb->actual_length += frame->actual_length;
55010 -       return finish_td(xhci, td, event, ep);
55011 +       return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
55014  static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
55015 @@ -2441,17 +2444,15 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
55016  /*
55017   * Process bulk and interrupt tds, update urb status and actual_length.
55018   */
55019 -static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
55020 -       union xhci_trb *ep_trb, struct xhci_transfer_event *event,
55021 -       struct xhci_virt_ep *ep)
55022 +static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
55023 +               struct xhci_ring *ep_ring, struct xhci_td *td,
55024 +               union xhci_trb *ep_trb, struct xhci_transfer_event *event)
55026         struct xhci_slot_ctx *slot_ctx;
55027 -       struct xhci_ring *ep_ring;
55028         u32 trb_comp_code;
55029         u32 remaining, requested, ep_trb_len;
55031         slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
55032 -       ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
55033         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
55034         remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
55035         ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
55036 @@ -2511,7 +2512,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
55037                           remaining);
55038                 td->urb->actual_length = 0;
55039         }
55040 -       return finish_td(xhci, td, event, ep);
55042 +       return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
55045  /*
55046 @@ -2854,11 +2856,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
55048                 /* update the urb's actual_length and give back to the core */
55049                 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
55050 -                       process_ctrl_td(xhci, td, ep_trb, event, ep);
55051 +                       process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event);
55052                 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
55053 -                       process_isoc_td(xhci, td, ep_trb, event, ep);
55054 +                       process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
55055                 else
55056 -                       process_bulk_intr_td(xhci, td, ep_trb, event, ep);
55057 +                       process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
55058  cleanup:
55059                 handling_skipped_tds = ep->skip &&
55060                         trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
55061 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
55062 index 1975016f46bf..0d2f1c37ab74 100644
55063 --- a/drivers/usb/host/xhci.c
55064 +++ b/drivers/usb/host/xhci.c
55065 @@ -228,6 +228,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
55066         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
55067         int err, i;
55068         u64 val;
55069 +       u32 intrs;
55071         /*
55072          * Some Renesas controllers get into a weird state if they are
55073 @@ -266,7 +267,10 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
55074         if (upper_32_bits(val))
55075                 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
55077 -       for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) {
55078 +       intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
55079 +                     ARRAY_SIZE(xhci->run_regs->ir_set));
55081 +       for (i = 0; i < intrs; i++) {
55082                 struct xhci_intr_reg __iomem *ir;
55084                 ir = &xhci->run_regs->ir_set[i];
55085 @@ -1510,7 +1514,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
55086   * we need to issue an evaluate context command and wait on it.
55087   */
55088  static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
55089 -               unsigned int ep_index, struct urb *urb)
55090 +               unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
55092         struct xhci_container_ctx *out_ctx;
55093         struct xhci_input_control_ctx *ctrl_ctx;
55094 @@ -1541,7 +1545,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
55095                  * changes max packet sizes.
55096                  */
55098 -               command = xhci_alloc_command(xhci, true, GFP_KERNEL);
55099 +               command = xhci_alloc_command(xhci, true, mem_flags);
55100                 if (!command)
55101                         return -ENOMEM;
55103 @@ -1635,7 +1639,7 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
55104                  */
55105                 if (urb->dev->speed == USB_SPEED_FULL) {
55106                         ret = xhci_check_maxpacket(xhci, slot_id,
55107 -                                       ep_index, urb);
55108 +                                       ep_index, urb, mem_flags);
55109                         if (ret < 0) {
55110                                 xhci_urb_free_priv(urb_priv);
55111                                 urb->hcpriv = NULL;
55112 @@ -3269,6 +3273,14 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
55114         /* config ep command clears toggle if add and drop ep flags are set */
55115         ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
55116 +       if (!ctrl_ctx) {
55117 +               spin_unlock_irqrestore(&xhci->lock, flags);
55118 +               xhci_free_command(xhci, cfg_cmd);
55119 +               xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
55120 +                               __func__);
55121 +               goto cleanup;
55122 +       }
55124         xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
55125                                            ctrl_ctx, ep_flag, ep_flag);
55126         xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
55127 diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
55128 index eebeadd26946..6b92d037d8fc 100644
55129 --- a/drivers/usb/musb/mediatek.c
55130 +++ b/drivers/usb/musb/mediatek.c
55131 @@ -518,8 +518,8 @@ static int mtk_musb_probe(struct platform_device *pdev)
55133         glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
55134         if (IS_ERR(glue->xceiv)) {
55135 -               dev_err(dev, "fail to getting usb-phy %d\n", ret);
55136                 ret = PTR_ERR(glue->xceiv);
55137 +               dev_err(dev, "fail to getting usb-phy %d\n", ret);
55138                 goto err_unregister_usb_phy;
55139         }
55141 diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
55142 index fc0457db62e1..8f09a387b773 100644
55143 --- a/drivers/usb/musb/musb_core.c
55144 +++ b/drivers/usb/musb/musb_core.c
55145 @@ -2070,7 +2070,7 @@ static void musb_irq_work(struct work_struct *data)
55146         struct musb *musb = container_of(data, struct musb, irq_work.work);
55147         int error;
55149 -       error = pm_runtime_get_sync(musb->controller);
55150 +       error = pm_runtime_resume_and_get(musb->controller);
55151         if (error < 0) {
55152                 dev_err(musb->controller, "Could not enable: %i\n", error);
55154 diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
55155 index 97f37077b7f9..33b637d0d8d9 100644
55156 --- a/drivers/usb/roles/class.c
55157 +++ b/drivers/usb/roles/class.c
55158 @@ -189,6 +189,8 @@ usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
55159                 return NULL;
55161         dev = class_find_device_by_fwnode(role_class, fwnode);
55162 +       if (dev)
55163 +               WARN_ON(!try_module_get(dev->parent->driver->owner));
55165         return dev ? to_role_switch(dev) : NULL;
55167 diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
55168 index 7252b0ce75a6..fe1c13a8849c 100644
55169 --- a/drivers/usb/serial/ti_usb_3410_5052.c
55170 +++ b/drivers/usb/serial/ti_usb_3410_5052.c
55171 @@ -1418,14 +1418,19 @@ static int ti_set_serial_info(struct tty_struct *tty,
55172         struct serial_struct *ss)
55174         struct usb_serial_port *port = tty->driver_data;
55175 -       struct ti_port *tport = usb_get_serial_port_data(port);
55176 +       struct tty_port *tport = &port->port;
55177         unsigned cwait;
55179         cwait = ss->closing_wait;
55180         if (cwait != ASYNC_CLOSING_WAIT_NONE)
55181                 cwait = msecs_to_jiffies(10 * ss->closing_wait);
55183 -       tport->tp_port->port.closing_wait = cwait;
55184 +       if (!capable(CAP_SYS_ADMIN)) {
55185 +               if (cwait != tport->closing_wait)
55186 +                       return -EPERM;
55187 +       }
55189 +       tport->closing_wait = cwait;
55191         return 0;
55193 diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
55194 index 46d46a4f99c9..4e9c994a972a 100644
55195 --- a/drivers/usb/serial/usb_wwan.c
55196 +++ b/drivers/usb/serial/usb_wwan.c
55197 @@ -140,10 +140,10 @@ int usb_wwan_get_serial_info(struct tty_struct *tty,
55198         ss->line            = port->minor;
55199         ss->port            = port->port_number;
55200         ss->baud_base       = tty_get_baud_rate(port->port.tty);
55201 -       ss->close_delay     = port->port.close_delay / 10;
55202 +       ss->close_delay     = jiffies_to_msecs(port->port.close_delay) / 10;
55203         ss->closing_wait    = port->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
55204                                  ASYNC_CLOSING_WAIT_NONE :
55205 -                                port->port.closing_wait / 10;
55206 +                                jiffies_to_msecs(port->port.closing_wait) / 10;
55207         return 0;
55209  EXPORT_SYMBOL(usb_wwan_get_serial_info);
55210 @@ -155,9 +155,10 @@ int usb_wwan_set_serial_info(struct tty_struct *tty,
55211         unsigned int closing_wait, close_delay;
55212         int retval = 0;
55214 -       close_delay = ss->close_delay * 10;
55215 +       close_delay = msecs_to_jiffies(ss->close_delay * 10);
55216         closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
55217 -                       ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
55218 +                       ASYNC_CLOSING_WAIT_NONE :
55219 +                       msecs_to_jiffies(ss->closing_wait * 10);
55221         mutex_lock(&port->port.mutex);
55223 diff --git a/drivers/usb/serial/xr_serial.c b/drivers/usb/serial/xr_serial.c
55224 index 0ca04906da4b..c59c8b47a120 100644
55225 --- a/drivers/usb/serial/xr_serial.c
55226 +++ b/drivers/usb/serial/xr_serial.c
55227 @@ -467,6 +467,11 @@ static void xr_set_termios(struct tty_struct *tty,
55228                 termios->c_cflag &= ~CSIZE;
55229                 if (old_termios)
55230                         termios->c_cflag |= old_termios->c_cflag & CSIZE;
55231 +               else
55232 +                       termios->c_cflag |= CS8;
55234 +               if (C_CSIZE(tty) == CS7)
55235 +                       bits |= XR21V141X_UART_DATA_7;
55236                 else
55237                         bits |= XR21V141X_UART_DATA_8;
55238                 break;
55239 diff --git a/drivers/usb/typec/stusb160x.c b/drivers/usb/typec/stusb160x.c
55240 index d21750bbbb44..6eaeba9b096e 100644
55241 --- a/drivers/usb/typec/stusb160x.c
55242 +++ b/drivers/usb/typec/stusb160x.c
55243 @@ -682,8 +682,8 @@ static int stusb160x_probe(struct i2c_client *client)
55244         }
55246         fwnode = device_get_named_child_node(chip->dev, "connector");
55247 -       if (IS_ERR(fwnode))
55248 -               return PTR_ERR(fwnode);
55249 +       if (!fwnode)
55250 +               return -ENODEV;
55252         /*
55253          * When both VDD and VSYS power supplies are present, the low power
55254 diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
55255 index a27deb0b5f03..027afd7dfdce 100644
55256 --- a/drivers/usb/typec/tcpm/tcpci.c
55257 +++ b/drivers/usb/typec/tcpm/tcpci.c
55258 @@ -24,6 +24,15 @@
55259  #define        AUTO_DISCHARGE_PD_HEADROOM_MV           850
55260  #define        AUTO_DISCHARGE_PPS_HEADROOM_MV          1250
55262 +#define tcpc_presenting_cc1_rd(reg) \
55263 +       (!(TCPC_ROLE_CTRL_DRP & (reg)) && \
55264 +        (((reg) & (TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT)) == \
55265 +         (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT)))
55266 +#define tcpc_presenting_cc2_rd(reg) \
55267 +       (!(TCPC_ROLE_CTRL_DRP & (reg)) && \
55268 +        (((reg) & (TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT)) == \
55269 +         (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT)))
55271  struct tcpci {
55272         struct device *dev;
55274 @@ -178,19 +187,25 @@ static int tcpci_get_cc(struct tcpc_dev *tcpc,
55275                         enum typec_cc_status *cc1, enum typec_cc_status *cc2)
55277         struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
55278 -       unsigned int reg;
55279 +       unsigned int reg, role_control;
55280         int ret;
55282 +       ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, &role_control);
55283 +       if (ret < 0)
55284 +               return ret;
55286         ret = regmap_read(tcpci->regmap, TCPC_CC_STATUS, &reg);
55287         if (ret < 0)
55288                 return ret;
55290         *cc1 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC1_SHIFT) &
55291                                  TCPC_CC_STATUS_CC1_MASK,
55292 -                                reg & TCPC_CC_STATUS_TERM);
55293 +                                reg & TCPC_CC_STATUS_TERM ||
55294 +                                tcpc_presenting_cc1_rd(role_control));
55295         *cc2 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC2_SHIFT) &
55296                                  TCPC_CC_STATUS_CC2_MASK,
55297 -                                reg & TCPC_CC_STATUS_TERM);
55298 +                                reg & TCPC_CC_STATUS_TERM ||
55299 +                                tcpc_presenting_cc2_rd(role_control));
55301         return 0;
55303 diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
55304 index ce7af398c7c1..52acc884a61f 100644
55305 --- a/drivers/usb/typec/tcpm/tcpm.c
55306 +++ b/drivers/usb/typec/tcpm/tcpm.c
55307 @@ -268,12 +268,27 @@ struct pd_mode_data {
55308         struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
55309  };
55312 + * @min_volt: Actual min voltage at the local port
55313 + * @req_min_volt: Requested min voltage to the port partner
55314 + * @max_volt: Actual max voltage at the local port
55315 + * @req_max_volt: Requested max voltage to the port partner
55316 + * @max_curr: Actual max current at the local port
55317 + * @req_max_curr: Requested max current of the port partner
55318 + * @req_out_volt: Requested output voltage to the port partner
55319 + * @req_op_curr: Requested operating current to the port partner
55320 + * @supported: Parter has atleast one APDO hence supports PPS
55321 + * @active: PPS mode is active
55322 + */
55323  struct pd_pps_data {
55324         u32 min_volt;
55325 +       u32 req_min_volt;
55326         u32 max_volt;
55327 +       u32 req_max_volt;
55328         u32 max_curr;
55329 -       u32 out_volt;
55330 -       u32 op_curr;
55331 +       u32 req_max_curr;
55332 +       u32 req_out_volt;
55333 +       u32 req_op_curr;
55334         bool supported;
55335         bool active;
55336  };
55337 @@ -389,7 +404,10 @@ struct tcpm_port {
55338         unsigned int operating_snk_mw;
55339         bool update_sink_caps;
55341 -       /* Requested current / voltage */
55342 +       /* Requested current / voltage to the port partner */
55343 +       u32 req_current_limit;
55344 +       u32 req_supply_voltage;
55345 +       /* Actual current / voltage limit of the local port */
55346         u32 current_limit;
55347         u32 supply_voltage;
55349 @@ -438,6 +456,9 @@ struct tcpm_port {
55350         enum tcpm_ams next_ams;
55351         bool in_ams;
55353 +       /* Auto vbus discharge status */
55354 +       bool auto_vbus_discharge_enabled;
55356  #ifdef CONFIG_DEBUG_FS
55357         struct dentry *dentry;
55358         struct mutex logbuffer_lock;    /* log buffer access lock */
55359 @@ -507,6 +528,9 @@ static const char * const pd_rev[] = {
55360         (tcpm_port_is_sink(port) && \
55361         ((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0))
55363 +#define tcpm_wait_for_discharge(port) \
55364 +       (((port)->auto_vbus_discharge_enabled && !(port)->vbus_vsafe0v) ? PD_T_SAFE_0V : 0)
55366  static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
55368         if (port->port_type == TYPEC_PORT_DRP) {
55369 @@ -1853,7 +1877,6 @@ static void vdm_run_state_machine(struct tcpm_port *port)
55370                         }
55372                         if (res < 0) {
55373 -                               port->vdm_sm_running = false;
55374                                 return;
55375                         }
55376                 }
55377 @@ -1869,6 +1892,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
55378                 port->vdo_data[0] = port->vdo_retry;
55379                 port->vdo_count = 1;
55380                 port->vdm_state = VDM_STATE_READY;
55381 +               tcpm_ams_finish(port);
55382                 break;
55383         case VDM_STATE_BUSY:
55384                 port->vdm_state = VDM_STATE_ERR_TMOUT;
55385 @@ -1934,7 +1958,7 @@ static void vdm_state_machine_work(struct kthread_work *work)
55386                  port->vdm_state != VDM_STATE_BUSY &&
55387                  port->vdm_state != VDM_STATE_SEND_MESSAGE);
55389 -       if (port->vdm_state == VDM_STATE_ERR_TMOUT)
55390 +       if (port->vdm_state < VDM_STATE_READY)
55391                 port->vdm_sm_running = false;
55393         mutex_unlock(&port->lock);
55394 @@ -2363,7 +2387,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
55395                 port->nr_sink_caps = cnt;
55396                 port->sink_cap_done = true;
55397                 if (port->ams == GET_SINK_CAPABILITIES)
55398 -                       tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
55399 +                       tcpm_set_state(port, ready_state(port), 0);
55400                 /* Unexpected Sink Capabilities */
55401                 else
55402                         tcpm_pd_handle_msg(port,
55403 @@ -2432,8 +2456,8 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
55404                 case SNK_TRANSITION_SINK:
55405                         if (port->vbus_present) {
55406                                 tcpm_set_current_limit(port,
55407 -                                                      port->current_limit,
55408 -                                                      port->supply_voltage);
55409 +                                                      port->req_current_limit,
55410 +                                                      port->req_supply_voltage);
55411                                 port->explicit_contract = true;
55412                                 tcpm_set_auto_vbus_discharge_threshold(port,
55413                                                                        TYPEC_PWR_MODE_PD,
55414 @@ -2492,8 +2516,8 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
55415                         break;
55416                 case SNK_NEGOTIATE_PPS_CAPABILITIES:
55417                         /* Revert data back from any requested PPS updates */
55418 -                       port->pps_data.out_volt = port->supply_voltage;
55419 -                       port->pps_data.op_curr = port->current_limit;
55420 +                       port->pps_data.req_out_volt = port->supply_voltage;
55421 +                       port->pps_data.req_op_curr = port->current_limit;
55422                         port->pps_status = (type == PD_CTRL_WAIT ?
55423                                             -EAGAIN : -EOPNOTSUPP);
55425 @@ -2525,6 +2549,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
55426                         port->sink_cap_done = true;
55427                         tcpm_set_state(port, ready_state(port), 0);
55428                         break;
55429 +               case SRC_READY:
55430 +               case SNK_READY:
55431 +                       if (port->vdm_state > VDM_STATE_READY) {
55432 +                               port->vdm_state = VDM_STATE_DONE;
55433 +                               if (tcpm_vdm_ams(port))
55434 +                                       tcpm_ams_finish(port);
55435 +                               mod_vdm_delayed_work(port, 0);
55436 +                               break;
55437 +                       }
55438 +                       fallthrough;
55439                 default:
55440                         tcpm_pd_handle_state(port,
55441                                              port->pwr_role == TYPEC_SOURCE ?
55442 @@ -2542,8 +2576,12 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
55443                         break;
55444                 case SNK_NEGOTIATE_PPS_CAPABILITIES:
55445                         port->pps_data.active = true;
55446 -                       port->supply_voltage = port->pps_data.out_volt;
55447 -                       port->current_limit = port->pps_data.op_curr;
55448 +                       port->pps_data.min_volt = port->pps_data.req_min_volt;
55449 +                       port->pps_data.max_volt = port->pps_data.req_max_volt;
55450 +                       port->pps_data.max_curr = port->pps_data.req_max_curr;
55451 +                       port->req_supply_voltage = port->pps_data.req_out_volt;
55452 +                       port->req_current_limit = port->pps_data.req_op_curr;
55453 +                       power_supply_changed(port->psy);
55454                         tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
55455                         break;
55456                 case SOFT_RESET_SEND:
55457 @@ -3102,17 +3140,16 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
55458                 src = port->source_caps[src_pdo];
55459                 snk = port->snk_pdo[snk_pdo];
55461 -               port->pps_data.min_volt = max(pdo_pps_apdo_min_voltage(src),
55462 -                                             pdo_pps_apdo_min_voltage(snk));
55463 -               port->pps_data.max_volt = min(pdo_pps_apdo_max_voltage(src),
55464 -                                             pdo_pps_apdo_max_voltage(snk));
55465 -               port->pps_data.max_curr = min_pps_apdo_current(src, snk);
55466 -               port->pps_data.out_volt = min(port->pps_data.max_volt,
55467 -                                             max(port->pps_data.min_volt,
55468 -                                                 port->pps_data.out_volt));
55469 -               port->pps_data.op_curr = min(port->pps_data.max_curr,
55470 -                                            port->pps_data.op_curr);
55471 -               power_supply_changed(port->psy);
55472 +               port->pps_data.req_min_volt = max(pdo_pps_apdo_min_voltage(src),
55473 +                                                 pdo_pps_apdo_min_voltage(snk));
55474 +               port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src),
55475 +                                                 pdo_pps_apdo_max_voltage(snk));
55476 +               port->pps_data.req_max_curr = min_pps_apdo_current(src, snk);
55477 +               port->pps_data.req_out_volt = min(port->pps_data.req_max_volt,
55478 +                                                 max(port->pps_data.req_min_volt,
55479 +                                                     port->pps_data.req_out_volt));
55480 +               port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
55481 +                                                port->pps_data.req_op_curr);
55482         }
55484         return src_pdo;
55485 @@ -3192,8 +3229,8 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
55486                          flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
55487         }
55489 -       port->current_limit = ma;
55490 -       port->supply_voltage = mv;
55491 +       port->req_current_limit = ma;
55492 +       port->req_supply_voltage = mv;
55494         return 0;
55496 @@ -3239,10 +3276,10 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
55497                         tcpm_log(port, "Invalid APDO selected!");
55498                         return -EINVAL;
55499                 }
55500 -               max_mv = port->pps_data.max_volt;
55501 -               max_ma = port->pps_data.max_curr;
55502 -               out_mv = port->pps_data.out_volt;
55503 -               op_ma = port->pps_data.op_curr;
55504 +               max_mv = port->pps_data.req_max_volt;
55505 +               max_ma = port->pps_data.req_max_curr;
55506 +               out_mv = port->pps_data.req_out_volt;
55507 +               op_ma = port->pps_data.req_op_curr;
55508                 break;
55509         default:
55510                 tcpm_log(port, "Invalid PDO selected!");
55511 @@ -3289,8 +3326,8 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
55512         tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
55513                  src_pdo_index, out_mv, op_ma);
55515 -       port->pps_data.op_curr = op_ma;
55516 -       port->pps_data.out_volt = out_mv;
55517 +       port->pps_data.req_op_curr = op_ma;
55518 +       port->pps_data.req_out_volt = out_mv;
55520         return 0;
55522 @@ -3418,6 +3455,8 @@ static int tcpm_src_attach(struct tcpm_port *port)
55523         if (port->tcpc->enable_auto_vbus_discharge) {
55524                 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, true);
55525                 tcpm_log_force(port, "enable vbus discharge ret:%d", ret);
55526 +               if (!ret)
55527 +                       port->auto_vbus_discharge_enabled = true;
55528         }
55530         ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port));
55531 @@ -3500,6 +3539,8 @@ static void tcpm_reset_port(struct tcpm_port *port)
55532         if (port->tcpc->enable_auto_vbus_discharge) {
55533                 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, false);
55534                 tcpm_log_force(port, "Disable vbus discharge ret:%d", ret);
55535 +               if (!ret)
55536 +                       port->auto_vbus_discharge_enabled = false;
55537         }
55538         port->in_ams = false;
55539         port->ams = NONE_AMS;
55540 @@ -3533,8 +3574,6 @@ static void tcpm_reset_port(struct tcpm_port *port)
55541         port->sink_cap_done = false;
55542         if (port->tcpc->enable_frs)
55543                 port->tcpc->enable_frs(port->tcpc, false);
55545 -       power_supply_changed(port->psy);
55548  static void tcpm_detach(struct tcpm_port *port)
55549 @@ -3574,6 +3613,8 @@ static int tcpm_snk_attach(struct tcpm_port *port)
55550                 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
55551                 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, true);
55552                 tcpm_log_force(port, "enable vbus discharge ret:%d", ret);
55553 +               if (!ret)
55554 +                       port->auto_vbus_discharge_enabled = true;
55555         }
55557         ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port));
55558 @@ -4103,6 +4144,23 @@ static void run_state_machine(struct tcpm_port *port)
55559                 }
55560                 break;
55561         case SNK_TRANSITION_SINK:
55562 +               /* From the USB PD spec:
55563 +                * "The Sink Shall transition to Sink Standby before a positive or
55564 +                * negative voltage transition of VBUS. During Sink Standby
55565 +                * the Sink Shall reduce its power draw to pSnkStdby."
55566 +                *
55567 +                * This is not applicable to PPS though as the port can continue
55568 +                * to draw negotiated power without switching to standby.
55569 +                */
55570 +               if (port->supply_voltage != port->req_supply_voltage && !port->pps_data.active &&
55571 +                   port->current_limit * port->supply_voltage / 1000 > PD_P_SNK_STDBY_MW) {
55572 +                       u32 stdby_ma = PD_P_SNK_STDBY_MW * 1000 / port->supply_voltage;
55574 +                       tcpm_log(port, "Setting standby current %u mV @ %u mA",
55575 +                                port->supply_voltage, stdby_ma);
55576 +                       tcpm_set_current_limit(port, stdby_ma, port->supply_voltage);
55577 +               }
55578 +               fallthrough;
55579         case SNK_TRANSITION_SINK_VBUS:
55580                 tcpm_set_state(port, hard_reset_state(port),
55581                                PD_T_PS_TRANSITION);
55582 @@ -4676,9 +4734,9 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
55583                 if (tcpm_port_is_disconnected(port) ||
55584                     !tcpm_port_is_source(port)) {
55585                         if (port->port_type == TYPEC_PORT_SRC)
55586 -                               tcpm_set_state(port, SRC_UNATTACHED, 0);
55587 +                               tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
55588                         else
55589 -                               tcpm_set_state(port, SNK_UNATTACHED, 0);
55590 +                               tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
55591                 }
55592                 break;
55593         case SNK_UNATTACHED:
55594 @@ -4709,7 +4767,23 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
55595                         tcpm_set_state(port, SNK_DEBOUNCED, 0);
55596                 break;
55597         case SNK_READY:
55598 -               if (tcpm_port_is_disconnected(port))
55599 +               /*
55600 +                * EXIT condition is based primarily on vbus disconnect and CC is secondary.
55601 +                * "A port that has entered into USB PD communications with the Source and
55602 +                * has seen the CC voltage exceed vRd-USB may monitor the CC pin to detect
55603 +                * cable disconnect in addition to monitoring VBUS.
55604 +                *
55605 +                * A port that is monitoring the CC voltage for disconnect (but is not in
55606 +                * the process of a USB PD PR_Swap or USB PD FR_Swap) shall transition to
55607 +                * Unattached.SNK within tSinkDisconnect after the CC voltage remains below
55608 +                * vRd-USB for tPDDebounce."
55609 +                *
55610 +                * When set_auto_vbus_discharge_threshold is enabled, CC pins go
55611 +                * away before vbus decays to disconnect threshold. Allow
55612 +                * disconnect to be driven by vbus disconnect when auto vbus
55613 +                * discharge is enabled.
55614 +                */
55615 +               if (!port->auto_vbus_discharge_enabled && tcpm_port_is_disconnected(port))
55616                         tcpm_set_state(port, unattached_state(port), 0);
55617                 else if (!port->pd_capable &&
55618                          (cc1 != old_cc1 || cc2 != old_cc2))
55619 @@ -4808,9 +4882,13 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
55620                  * Ignore CC changes here.
55621                  */
55622                 break;
55624         default:
55625 -               if (tcpm_port_is_disconnected(port))
55626 +               /*
55627 +                * While acting as sink and auto vbus discharge is enabled, Allow disconnect
55628 +                * to be driven by vbus disconnect.
55629 +                */
55630 +               if (tcpm_port_is_disconnected(port) && !(port->pwr_role == TYPEC_SINK &&
55631 +                                                        port->auto_vbus_discharge_enabled))
55632                         tcpm_set_state(port, unattached_state(port), 0);
55633                 break;
55634         }
55635 @@ -4974,8 +5052,16 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
55636         case SRC_TRANSITION_SUPPLY:
55637         case SRC_READY:
55638         case SRC_WAIT_NEW_CAPABILITIES:
55639 -               /* Force to unattached state to re-initiate connection */
55640 -               tcpm_set_state(port, SRC_UNATTACHED, 0);
55641 +               /*
55642 +                * Force to unattached state to re-initiate connection.
55643 +                * DRP port should move to Unattached.SNK instead of Unattached.SRC if
55644 +                * sink removed. Although sink removal here is due to source's vbus collapse,
55645 +                * treat it the same way for consistency.
55646 +                */
55647 +               if (port->port_type == TYPEC_PORT_SRC)
55648 +                       tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
55649 +               else
55650 +                       tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
55651                 break;
55653         case PORT_RESET:
55654 @@ -4994,9 +5080,8 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
55655                 break;
55657         default:
55658 -               if (port->pwr_role == TYPEC_SINK &&
55659 -                   port->attached)
55660 -                       tcpm_set_state(port, SNK_UNATTACHED, 0);
55661 +               if (port->pwr_role == TYPEC_SINK && port->attached)
55662 +                       tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
55663                 break;
55664         }
55666 @@ -5018,7 +5103,23 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
55667                         tcpm_set_state(port, tcpm_try_snk(port) ? SNK_TRY : SRC_ATTACHED,
55668                                        PD_T_CC_DEBOUNCE);
55669                 break;
55670 +       case SRC_STARTUP:
55671 +       case SRC_SEND_CAPABILITIES:
55672 +       case SRC_SEND_CAPABILITIES_TIMEOUT:
55673 +       case SRC_NEGOTIATE_CAPABILITIES:
55674 +       case SRC_TRANSITION_SUPPLY:
55675 +       case SRC_READY:
55676 +       case SRC_WAIT_NEW_CAPABILITIES:
55677 +               if (port->auto_vbus_discharge_enabled) {
55678 +                       if (port->port_type == TYPEC_PORT_SRC)
55679 +                               tcpm_set_state(port, SRC_UNATTACHED, 0);
55680 +                       else
55681 +                               tcpm_set_state(port, SNK_UNATTACHED, 0);
55682 +               }
55683 +               break;
55684         default:
55685 +               if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
55686 +                       tcpm_set_state(port, SNK_UNATTACHED, 0);
55687                 break;
55688         }
55690 @@ -5374,7 +5475,7 @@ static int tcpm_try_role(struct typec_port *p, int role)
55691         return ret;
55694 -static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
55695 +static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr)
55697         unsigned int target_mw;
55698         int ret;
55699 @@ -5392,12 +5493,12 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
55700                 goto port_unlock;
55701         }
55703 -       if (op_curr > port->pps_data.max_curr) {
55704 +       if (req_op_curr > port->pps_data.max_curr) {
55705                 ret = -EINVAL;
55706                 goto port_unlock;
55707         }
55709 -       target_mw = (op_curr * port->pps_data.out_volt) / 1000;
55710 +       target_mw = (req_op_curr * port->supply_voltage) / 1000;
55711         if (target_mw < port->operating_snk_mw) {
55712                 ret = -EINVAL;
55713                 goto port_unlock;
55714 @@ -5411,10 +5512,10 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
55715         }
55717         /* Round down operating current to align with PPS valid steps */
55718 -       op_curr = op_curr - (op_curr % RDO_PROG_CURR_MA_STEP);
55719 +       req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP);
55721         reinit_completion(&port->pps_complete);
55722 -       port->pps_data.op_curr = op_curr;
55723 +       port->pps_data.req_op_curr = req_op_curr;
55724         port->pps_status = 0;
55725         port->pps_pending = true;
55726         mutex_unlock(&port->lock);
55727 @@ -5435,7 +5536,7 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
55728         return ret;
55731 -static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
55732 +static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
55734         unsigned int target_mw;
55735         int ret;
55736 @@ -5453,13 +5554,13 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
55737                 goto port_unlock;
55738         }
55740 -       if (out_volt < port->pps_data.min_volt ||
55741 -           out_volt > port->pps_data.max_volt) {
55742 +       if (req_out_volt < port->pps_data.min_volt ||
55743 +           req_out_volt > port->pps_data.max_volt) {
55744                 ret = -EINVAL;
55745                 goto port_unlock;
55746         }
55748 -       target_mw = (port->pps_data.op_curr * out_volt) / 1000;
55749 +       target_mw = (port->current_limit * req_out_volt) / 1000;
55750         if (target_mw < port->operating_snk_mw) {
55751                 ret = -EINVAL;
55752                 goto port_unlock;
55753 @@ -5473,10 +5574,10 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
55754         }
55756         /* Round down output voltage to align with PPS valid steps */
55757 -       out_volt = out_volt - (out_volt % RDO_PROG_VOLT_MV_STEP);
55758 +       req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP);
55760         reinit_completion(&port->pps_complete);
55761 -       port->pps_data.out_volt = out_volt;
55762 +       port->pps_data.req_out_volt = req_out_volt;
55763         port->pps_status = 0;
55764         port->pps_pending = true;
55765         mutex_unlock(&port->lock);
55766 @@ -5534,8 +5635,8 @@ static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
55768         /* Trigger PPS request or move back to standard PDO contract */
55769         if (activate) {
55770 -               port->pps_data.out_volt = port->supply_voltage;
55771 -               port->pps_data.op_curr = port->current_limit;
55772 +               port->pps_data.req_out_volt = port->supply_voltage;
55773 +               port->pps_data.req_op_curr = port->current_limit;
55774         }
55775         mutex_unlock(&port->lock);
55777 diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
55778 index 29bd1c5a283c..4038104568f5 100644
55779 --- a/drivers/usb/typec/tps6598x.c
55780 +++ b/drivers/usb/typec/tps6598x.c
55781 @@ -614,8 +614,8 @@ static int tps6598x_probe(struct i2c_client *client)
55782                 return ret;
55784         fwnode = device_get_named_child_node(&client->dev, "connector");
55785 -       if (IS_ERR(fwnode))
55786 -               return PTR_ERR(fwnode);
55787 +       if (!fwnode)
55788 +               return -ENODEV;
55790         tps->role_sw = fwnode_usb_role_switch_get(fwnode);
55791         if (IS_ERR(tps->role_sw)) {
55792 diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
55793 index 244270755ae6..1e266f083bf8 100644
55794 --- a/drivers/usb/typec/ucsi/ucsi.c
55795 +++ b/drivers/usb/typec/ucsi/ucsi.c
55796 @@ -495,7 +495,8 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
55797         }
55800 -static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
55801 +static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
55802 +                        u32 *pdos, int offset, int num_pdos)
55804         struct ucsi *ucsi = con->ucsi;
55805         u64 command;
55806 @@ -503,17 +504,39 @@ static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
55808         command = UCSI_COMMAND(UCSI_GET_PDOS) | UCSI_CONNECTOR_NUMBER(con->num);
55809         command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
55810 -       command |= UCSI_GET_PDOS_NUM_PDOS(UCSI_MAX_PDOS - 1);
55811 +       command |= UCSI_GET_PDOS_PDO_OFFSET(offset);
55812 +       command |= UCSI_GET_PDOS_NUM_PDOS(num_pdos - 1);
55813         command |= UCSI_GET_PDOS_SRC_PDOS;
55814 -       ret = ucsi_send_command(ucsi, command, con->src_pdos,
55815 -                              sizeof(con->src_pdos));
55816 -       if (ret < 0) {
55817 +       ret = ucsi_send_command(ucsi, command, pdos + offset,
55818 +                               num_pdos * sizeof(u32));
55819 +       if (ret < 0)
55820                 dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
55821 +       if (ret == 0 && offset == 0)
55822 +               dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
55824 +       return ret;
55827 +static void ucsi_get_src_pdos(struct ucsi_connector *con, int is_partner)
55829 +       int ret;
55831 +       /* UCSI max payload means only getting at most 4 PDOs at a time */
55832 +       ret = ucsi_get_pdos(con, 1, con->src_pdos, 0, UCSI_MAX_PDOS);
55833 +       if (ret < 0)
55834                 return;
55835 -       }
55837         con->num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
55838 -       if (ret == 0)
55839 -               dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
55840 +       if (con->num_pdos < UCSI_MAX_PDOS)
55841 +               return;
55843 +       /* get the remaining PDOs, if any */
55844 +       ret = ucsi_get_pdos(con, 1, con->src_pdos, UCSI_MAX_PDOS,
55845 +                           PDO_MAX_OBJECTS - UCSI_MAX_PDOS);
55846 +       if (ret < 0)
55847 +               return;
55849 +       con->num_pdos += ret / sizeof(u32);
55852  static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
55853 @@ -522,7 +545,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
55854         case UCSI_CONSTAT_PWR_OPMODE_PD:
55855                 con->rdo = con->status.request_data_obj;
55856                 typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
55857 -               ucsi_get_pdos(con, 1);
55858 +               ucsi_get_src_pdos(con, 1);
55859                 break;
55860         case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
55861                 con->rdo = 0;
55862 @@ -999,6 +1022,7 @@ static const struct typec_operations ucsi_ops = {
55863         .pr_set = ucsi_pr_swap
55864  };
55866 +/* Caller must call fwnode_handle_put() after use */
55867  static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
55869         struct fwnode_handle *fwnode;
55870 @@ -1033,7 +1057,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
55871         command |= UCSI_CONNECTOR_NUMBER(con->num);
55872         ret = ucsi_send_command(ucsi, command, &con->cap, sizeof(con->cap));
55873         if (ret < 0)
55874 -               goto out;
55875 +               goto out_unlock;
55877         if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DRP)
55878                 cap->data = TYPEC_PORT_DRD;
55879 @@ -1151,6 +1175,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
55880         trace_ucsi_register_port(con->num, &con->status);
55882  out:
55883 +       fwnode_handle_put(cap->fwnode);
55884 +out_unlock:
55885         mutex_unlock(&con->lock);
55886         return ret;
55888 diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
55889 index 3920e20a9e9e..cee666790907 100644
55890 --- a/drivers/usb/typec/ucsi/ucsi.h
55891 +++ b/drivers/usb/typec/ucsi/ucsi.h
55892 @@ -8,6 +8,7 @@
55893  #include <linux/power_supply.h>
55894  #include <linux/types.h>
55895  #include <linux/usb/typec.h>
55896 +#include <linux/usb/pd.h>
55897  #include <linux/usb/role.h>
55899  /* -------------------------------------------------------------------------- */
55900 @@ -134,7 +135,9 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
55902  /* GET_PDOS command bits */
55903  #define UCSI_GET_PDOS_PARTNER_PDO(_r_)         ((u64)(_r_) << 23)
55904 +#define UCSI_GET_PDOS_PDO_OFFSET(_r_)          ((u64)(_r_) << 24)
55905  #define UCSI_GET_PDOS_NUM_PDOS(_r_)            ((u64)(_r_) << 32)
55906 +#define UCSI_MAX_PDOS                          (4)
55907  #define UCSI_GET_PDOS_SRC_PDOS                 ((u64)1 << 34)
55909  /* -------------------------------------------------------------------------- */
55910 @@ -302,7 +305,6 @@ struct ucsi {
55912  #define UCSI_MAX_SVID          5
55913  #define UCSI_MAX_ALTMODES      (UCSI_MAX_SVID * 6)
55914 -#define UCSI_MAX_PDOS          (4)
55916  #define UCSI_TYPEC_VSAFE5V     5000
55917  #define UCSI_TYPEC_1_5_CURRENT 1500
55918 @@ -330,7 +332,7 @@ struct ucsi_connector {
55919         struct power_supply *psy;
55920         struct power_supply_desc psy_desc;
55921         u32 rdo;
55922 -       u32 src_pdos[UCSI_MAX_PDOS];
55923 +       u32 src_pdos[PDO_MAX_OBJECTS];
55924         int num_pdos;
55926         struct usb_role_switch *usb_role_sw;
55927 diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
55928 index f7633ee655a1..d1cf6b51bf85 100644
55929 --- a/drivers/usb/usbip/vudc_sysfs.c
55930 +++ b/drivers/usb/usbip/vudc_sysfs.c
55931 @@ -156,12 +156,14 @@ static ssize_t usbip_sockfd_store(struct device *dev,
55932                 tcp_rx = kthread_create(&v_rx_loop, &udc->ud, "vudc_rx");
55933                 if (IS_ERR(tcp_rx)) {
55934                         sockfd_put(socket);
55935 +                       mutex_unlock(&udc->ud.sysfs_lock);
55936                         return -EINVAL;
55937                 }
55938                 tcp_tx = kthread_create(&v_tx_loop, &udc->ud, "vudc_tx");
55939                 if (IS_ERR(tcp_tx)) {
55940                         kthread_stop(tcp_rx);
55941                         sockfd_put(socket);
55942 +                       mutex_unlock(&udc->ud.sysfs_lock);
55943                         return -EINVAL;
55944                 }
55946 diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
55947 index f27e25112c40..8722f5effacd 100644
55948 --- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
55949 +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
55950 @@ -568,23 +568,39 @@ static int vfio_fsl_mc_init_device(struct vfio_fsl_mc_device *vdev)
55951                 dev_err(&mc_dev->dev, "VFIO_FSL_MC: Failed to setup DPRC (%d)\n", ret);
55952                 goto out_nc_unreg;
55953         }
55954 +       return 0;
55956 +out_nc_unreg:
55957 +       bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
55958 +       return ret;
55961 +static int vfio_fsl_mc_scan_container(struct fsl_mc_device *mc_dev)
55963 +       int ret;
55965 +       /* non dprc devices do not scan for other devices */
55966 +       if (!is_fsl_mc_bus_dprc(mc_dev))
55967 +               return 0;
55968         ret = dprc_scan_container(mc_dev, false);
55969         if (ret) {
55970 -               dev_err(&mc_dev->dev, "VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
55971 -               goto out_dprc_cleanup;
55972 +               dev_err(&mc_dev->dev,
55973 +                       "VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
55974 +               dprc_remove_devices(mc_dev, NULL, 0);
55975 +               return ret;
55976         }
55978         return 0;
55981 +static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev)
55983 +       struct fsl_mc_device *mc_dev = vdev->mc_dev;
55985 +       if (!is_fsl_mc_bus_dprc(mc_dev))
55986 +               return;
55988 -out_dprc_cleanup:
55989 -       dprc_remove_devices(mc_dev, NULL, 0);
55990         dprc_cleanup(mc_dev);
55991 -out_nc_unreg:
55992         bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
55993 -       vdev->nb.notifier_call = NULL;
55995 -       return ret;
55998  static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
55999 @@ -607,29 +623,39 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
56000         }
56002         vdev->mc_dev = mc_dev;
56004 -       ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
56005 -       if (ret) {
56006 -               dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
56007 -               goto out_group_put;
56008 -       }
56009 +       mutex_init(&vdev->igate);
56011         ret = vfio_fsl_mc_reflck_attach(vdev);
56012         if (ret)
56013 -               goto out_group_dev;
56014 +               goto out_group_put;
56016         ret = vfio_fsl_mc_init_device(vdev);
56017         if (ret)
56018                 goto out_reflck;
56020 -       mutex_init(&vdev->igate);
56021 +       ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
56022 +       if (ret) {
56023 +               dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
56024 +               goto out_device;
56025 +       }
56027 +       /*
56028 +        * This triggers recursion into vfio_fsl_mc_probe() on another device
56029 +        * and the vfio_fsl_mc_reflck_attach() must succeed, which relies on the
56030 +        * vfio_add_group_dev() above. It has no impact on this vdev, so it is
56031 +        * safe to be after the vfio device is made live.
56032 +        */
56033 +       ret = vfio_fsl_mc_scan_container(mc_dev);
56034 +       if (ret)
56035 +               goto out_group_dev;
56036         return 0;
56038 -out_reflck:
56039 -       vfio_fsl_mc_reflck_put(vdev->reflck);
56040  out_group_dev:
56041         vfio_del_group_dev(dev);
56042 +out_device:
56043 +       vfio_fsl_uninit_device(vdev);
56044 +out_reflck:
56045 +       vfio_fsl_mc_reflck_put(vdev->reflck);
56046  out_group_put:
56047         vfio_iommu_group_put(group, dev);
56048         return ret;
56049 @@ -646,16 +672,10 @@ static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
56051         mutex_destroy(&vdev->igate);
56053 +       dprc_remove_devices(mc_dev, NULL, 0);
56054 +       vfio_fsl_uninit_device(vdev);
56055         vfio_fsl_mc_reflck_put(vdev->reflck);
56057 -       if (is_fsl_mc_bus_dprc(mc_dev)) {
56058 -               dprc_remove_devices(mc_dev, NULL, 0);
56059 -               dprc_cleanup(mc_dev);
56060 -       }
56062 -       if (vdev->nb.notifier_call)
56063 -               bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
56065         vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
56067         return 0;
56068 diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
56069 index 917fd84c1c6f..367ff5412a38 100644
56070 --- a/drivers/vfio/mdev/mdev_sysfs.c
56071 +++ b/drivers/vfio/mdev/mdev_sysfs.c
56072 @@ -105,6 +105,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
56073                 return ERR_PTR(-ENOMEM);
56075         type->kobj.kset = parent->mdev_types_kset;
56076 +       type->parent = parent;
56078         ret = kobject_init_and_add(&type->kobj, &mdev_type_ktype, NULL,
56079                                    "%s-%s", dev_driver_string(parent->dev),
56080 @@ -132,7 +133,6 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
56081         }
56083         type->group = group;
56084 -       type->parent = parent;
56085         return type;
56087  attrs_failed:
56088 diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
56089 index 5023e23db3bc..cb7f2dc09e9d 100644
56090 --- a/drivers/vfio/pci/vfio_pci.c
56091 +++ b/drivers/vfio/pci/vfio_pci.c
56092 @@ -1924,6 +1924,68 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb,
56093         return 0;
56096 +static int vfio_pci_vf_init(struct vfio_pci_device *vdev)
56098 +       struct pci_dev *pdev = vdev->pdev;
56099 +       int ret;
56101 +       if (!pdev->is_physfn)
56102 +               return 0;
56104 +       vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
56105 +       if (!vdev->vf_token)
56106 +               return -ENOMEM;
56108 +       mutex_init(&vdev->vf_token->lock);
56109 +       uuid_gen(&vdev->vf_token->uuid);
56111 +       vdev->nb.notifier_call = vfio_pci_bus_notifier;
56112 +       ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
56113 +       if (ret) {
56114 +               kfree(vdev->vf_token);
56115 +               return ret;
56116 +       }
56117 +       return 0;
56120 +static void vfio_pci_vf_uninit(struct vfio_pci_device *vdev)
56122 +       if (!vdev->vf_token)
56123 +               return;
56125 +       bus_unregister_notifier(&pci_bus_type, &vdev->nb);
56126 +       WARN_ON(vdev->vf_token->users);
56127 +       mutex_destroy(&vdev->vf_token->lock);
56128 +       kfree(vdev->vf_token);
56131 +static int vfio_pci_vga_init(struct vfio_pci_device *vdev)
56133 +       struct pci_dev *pdev = vdev->pdev;
56134 +       int ret;
56136 +       if (!vfio_pci_is_vga(pdev))
56137 +               return 0;
56139 +       ret = vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
56140 +       if (ret)
56141 +               return ret;
56142 +       vga_set_legacy_decoding(pdev, vfio_pci_set_vga_decode(vdev, false));
56143 +       return 0;
56146 +static void vfio_pci_vga_uninit(struct vfio_pci_device *vdev)
56148 +       struct pci_dev *pdev = vdev->pdev;
56150 +       if (!vfio_pci_is_vga(pdev))
56151 +               return;
56152 +       vga_client_register(pdev, NULL, NULL, NULL);
56153 +       vga_set_legacy_decoding(pdev, VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
56154 +                                             VGA_RSRC_LEGACY_IO |
56155 +                                             VGA_RSRC_LEGACY_MEM);
56158  static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
56160         struct vfio_pci_device *vdev;
56161 @@ -1970,35 +2032,15 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
56162         INIT_LIST_HEAD(&vdev->vma_list);
56163         init_rwsem(&vdev->memory_lock);
56165 -       ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
56166 +       ret = vfio_pci_reflck_attach(vdev);
56167         if (ret)
56168                 goto out_free;
56170 -       ret = vfio_pci_reflck_attach(vdev);
56171 +       ret = vfio_pci_vf_init(vdev);
56172         if (ret)
56173 -               goto out_del_group_dev;
56175 -       if (pdev->is_physfn) {
56176 -               vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
56177 -               if (!vdev->vf_token) {
56178 -                       ret = -ENOMEM;
56179 -                       goto out_reflck;
56180 -               }
56182 -               mutex_init(&vdev->vf_token->lock);
56183 -               uuid_gen(&vdev->vf_token->uuid);
56185 -               vdev->nb.notifier_call = vfio_pci_bus_notifier;
56186 -               ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
56187 -               if (ret)
56188 -                       goto out_vf_token;
56189 -       }
56191 -       if (vfio_pci_is_vga(pdev)) {
56192 -               vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
56193 -               vga_set_legacy_decoding(pdev,
56194 -                                       vfio_pci_set_vga_decode(vdev, false));
56195 -       }
56196 +               goto out_reflck;
56197 +       ret = vfio_pci_vga_init(vdev);
56198 +       if (ret)
56199 +               goto out_vf;
56201         vfio_pci_probe_power_state(vdev);
56203 @@ -2016,15 +2058,20 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
56204                 vfio_pci_set_power_state(vdev, PCI_D3hot);
56205         }
56207 -       return ret;
56208 +       ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
56209 +       if (ret)
56210 +               goto out_power;
56211 +       return 0;
56213 -out_vf_token:
56214 -       kfree(vdev->vf_token);
56215 +out_power:
56216 +       if (!disable_idle_d3)
56217 +               vfio_pci_set_power_state(vdev, PCI_D0);
56218 +out_vf:
56219 +       vfio_pci_vf_uninit(vdev);
56220  out_reflck:
56221         vfio_pci_reflck_put(vdev->reflck);
56222 -out_del_group_dev:
56223 -       vfio_del_group_dev(&pdev->dev);
56224  out_free:
56225 +       kfree(vdev->pm_save);
56226         kfree(vdev);
56227  out_group_put:
56228         vfio_iommu_group_put(group, &pdev->dev);
56229 @@ -2041,33 +2088,19 @@ static void vfio_pci_remove(struct pci_dev *pdev)
56230         if (!vdev)
56231                 return;
56233 -       if (vdev->vf_token) {
56234 -               WARN_ON(vdev->vf_token->users);
56235 -               mutex_destroy(&vdev->vf_token->lock);
56236 -               kfree(vdev->vf_token);
56237 -       }
56239 -       if (vdev->nb.notifier_call)
56240 -               bus_unregister_notifier(&pci_bus_type, &vdev->nb);
56242 +       vfio_pci_vf_uninit(vdev);
56243         vfio_pci_reflck_put(vdev->reflck);
56244 +       vfio_pci_vga_uninit(vdev);
56246         vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
56247 -       kfree(vdev->region);
56248 -       mutex_destroy(&vdev->ioeventfds_lock);
56250         if (!disable_idle_d3)
56251                 vfio_pci_set_power_state(vdev, PCI_D0);
56253 +       mutex_destroy(&vdev->ioeventfds_lock);
56254 +       kfree(vdev->region);
56255         kfree(vdev->pm_save);
56256         kfree(vdev);
56258 -       if (vfio_pci_is_vga(pdev)) {
56259 -               vga_client_register(pdev, NULL, NULL, NULL);
56260 -               vga_set_legacy_decoding(pdev,
56261 -                               VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
56262 -                               VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
56263 -       }
56266  static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
56267 diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
56268 index bfa4c6ef554e..c79d2f2387aa 100644
56269 --- a/drivers/vhost/vdpa.c
56270 +++ b/drivers/vhost/vdpa.c
56271 @@ -993,6 +993,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
56272         if (vma->vm_end - vma->vm_start != notify.size)
56273                 return -ENOTSUPP;
56275 +       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
56276         vma->vm_ops = &vhost_vdpa_vm_ops;
56277         return 0;
56279 diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
56280 index 091f07e7c145..e9fbe2483844 100644
56281 --- a/drivers/video/backlight/qcom-wled.c
56282 +++ b/drivers/video/backlight/qcom-wled.c
56283 @@ -336,19 +336,19 @@ static int wled3_sync_toggle(struct wled *wled)
56284         unsigned int mask = GENMASK(wled->max_string_count - 1, 0);
56286         rc = regmap_update_bits(wled->regmap,
56287 -                               wled->ctrl_addr + WLED3_SINK_REG_SYNC,
56288 +                               wled->sink_addr + WLED3_SINK_REG_SYNC,
56289                                 mask, mask);
56290         if (rc < 0)
56291                 return rc;
56293         rc = regmap_update_bits(wled->regmap,
56294 -                               wled->ctrl_addr + WLED3_SINK_REG_SYNC,
56295 +                               wled->sink_addr + WLED3_SINK_REG_SYNC,
56296                                 mask, WLED3_SINK_REG_SYNC_CLEAR);
56298         return rc;
56301 -static int wled5_sync_toggle(struct wled *wled)
56302 +static int wled5_mod_sync_toggle(struct wled *wled)
56304         int rc;
56305         u8 val;
56306 @@ -445,10 +445,23 @@ static int wled_update_status(struct backlight_device *bl)
56307                         goto unlock_mutex;
56308                 }
56310 -               rc = wled->wled_sync_toggle(wled);
56311 -               if (rc < 0) {
56312 -                       dev_err(wled->dev, "wled sync failed rc:%d\n", rc);
56313 -                       goto unlock_mutex;
56314 +               if (wled->version < 5) {
56315 +                       rc = wled->wled_sync_toggle(wled);
56316 +                       if (rc < 0) {
56317 +                               dev_err(wled->dev, "wled sync failed rc:%d\n", rc);
56318 +                               goto unlock_mutex;
56319 +                       }
56320 +               } else {
56321 +                       /*
56322 +                        * For WLED5 toggling the MOD_SYNC_BIT updates the
56323 +                        * brightness
56324 +                        */
56325 +                       rc = wled5_mod_sync_toggle(wled);
56326 +                       if (rc < 0) {
56327 +                               dev_err(wled->dev, "wled mod sync failed rc:%d\n",
56328 +                                       rc);
56329 +                               goto unlock_mutex;
56330 +                       }
56331                 }
56332         }
56334 @@ -1459,7 +1472,7 @@ static int wled_configure(struct wled *wled)
56335                 size = ARRAY_SIZE(wled5_opts);
56336                 *cfg = wled5_config_defaults;
56337                 wled->wled_set_brightness = wled5_set_brightness;
56338 -               wled->wled_sync_toggle = wled5_sync_toggle;
56339 +               wled->wled_sync_toggle = wled3_sync_toggle;
56340                 wled->wled_cabc_config = wled5_cabc_config;
56341                 wled->wled_ovp_delay = wled5_ovp_delay;
56342                 wled->wled_auto_detection_required =
56343 diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
56344 index 757d5c3f620b..ff09e57f3c38 100644
56345 --- a/drivers/video/fbdev/core/fbcmap.c
56346 +++ b/drivers/video/fbdev/core/fbcmap.c
56347 @@ -101,17 +101,17 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
56348                 if (!len)
56349                         return 0;
56351 -               cmap->red = kmalloc(size, flags);
56352 +               cmap->red = kzalloc(size, flags);
56353                 if (!cmap->red)
56354                         goto fail;
56355 -               cmap->green = kmalloc(size, flags);
56356 +               cmap->green = kzalloc(size, flags);
56357                 if (!cmap->green)
56358                         goto fail;
56359 -               cmap->blue = kmalloc(size, flags);
56360 +               cmap->blue = kzalloc(size, flags);
56361                 if (!cmap->blue)
56362                         goto fail;
56363                 if (transp) {
56364 -                       cmap->transp = kmalloc(size, flags);
56365 +                       cmap->transp = kzalloc(size, flags);
56366                         if (!cmap->transp)
56367                                 goto fail;
56368                 } else {
56369 diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
56370 index cfe63932f825..71c00ef772a3 100644
56371 --- a/drivers/video/fbdev/omap/hwa742.c
56372 +++ b/drivers/video/fbdev/omap/hwa742.c
56373 @@ -913,7 +913,7 @@ static void hwa742_resume(void)
56374                 if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
56375                         break;
56376                 set_current_state(TASK_UNINTERRUPTIBLE);
56377 -               schedule_timeout(msecs_to_jiffies(5));
56378 +               schedule_msec_hrtimeout((5));
56379         }
56380         hwa742_set_update_mode(hwa742.update_mode_before_suspend);
56382 diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
56383 index f1551e00eb12..f0f651e92504 100644
56384 --- a/drivers/video/fbdev/pxafb.c
56385 +++ b/drivers/video/fbdev/pxafb.c
56386 @@ -1287,7 +1287,7 @@ static int pxafb_smart_thread(void *arg)
56387                 mutex_unlock(&fbi->ctrlr_lock);
56389                 set_current_state(TASK_INTERRUPTIBLE);
56390 -               schedule_timeout(msecs_to_jiffies(30));
56391 +               schedule_msec_hrtimeout((30));
56392         }
56394         pr_debug("%s(): task ending\n", __func__);
56395 diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
56396 index f1964ea4b826..e21e1e86ad15 100644
56397 --- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
56398 +++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
56399 @@ -1524,7 +1524,8 @@ static const struct file_operations ne_enclave_fops = {
56400   *                       enclave file descriptor to be further used for enclave
56401   *                       resources handling e.g. memory regions and CPUs.
56402   * @ne_pci_dev :       Private data associated with the PCI device.
56403 - * @slot_uid:          Generated unique slot id associated with an enclave.
56404 + * @slot_uid:          User pointer to store the generated unique slot id
56405 + *                     associated with an enclave to.
56406   *
56407   * Context: Process context. This function is called with the ne_pci_dev enclave
56408   *         mutex held.
56409 @@ -1532,7 +1533,7 @@ static const struct file_operations ne_enclave_fops = {
56410   * * Enclave fd on success.
56411   * * Negative return value on failure.
56412   */
56413 -static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
56414 +static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 __user *slot_uid)
56416         struct ne_pci_dev_cmd_reply cmd_reply = {};
56417         int enclave_fd = -1;
56418 @@ -1634,7 +1635,18 @@ static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
56420         list_add(&ne_enclave->enclave_list_entry, &ne_pci_dev->enclaves_list);
56422 -       *slot_uid = ne_enclave->slot_uid;
56423 +       if (copy_to_user(slot_uid, &ne_enclave->slot_uid, sizeof(ne_enclave->slot_uid))) {
56424 +               /*
56425 +                * As we're holding the only reference to 'enclave_file', fput()
56426 +                * will call ne_enclave_release() which will do a proper cleanup
56427 +                * of all so far allocated resources, leaving only the unused fd
56428 +                * for us to free.
56429 +                */
56430 +               fput(enclave_file);
56431 +               put_unused_fd(enclave_fd);
56433 +               return -EFAULT;
56434 +       }
56436         fd_install(enclave_fd, enclave_file);
56438 @@ -1671,34 +1683,13 @@ static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
56439         switch (cmd) {
56440         case NE_CREATE_VM: {
56441                 int enclave_fd = -1;
56442 -               struct file *enclave_file = NULL;
56443                 struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev;
56444 -               int rc = -EINVAL;
56445 -               u64 slot_uid = 0;
56446 +               u64 __user *slot_uid = (void __user *)arg;
56448                 mutex_lock(&ne_pci_dev->enclaves_list_mutex);
56450 -               enclave_fd = ne_create_vm_ioctl(ne_pci_dev, &slot_uid);
56451 -               if (enclave_fd < 0) {
56452 -                       rc = enclave_fd;
56454 -                       mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
56456 -                       return rc;
56457 -               }
56459 +               enclave_fd = ne_create_vm_ioctl(ne_pci_dev, slot_uid);
56460                 mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
56462 -               if (copy_to_user((void __user *)arg, &slot_uid, sizeof(slot_uid))) {
56463 -                       enclave_file = fget(enclave_fd);
56464 -                       /* Decrement file refs to have release() called. */
56465 -                       fput(enclave_file);
56466 -                       fput(enclave_file);
56467 -                       put_unused_fd(enclave_fd);
56469 -                       return -EFAULT;
56470 -               }
56472                 return enclave_fd;
56473         }
56475 diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
56476 index f01d58c7a042..a3e7be96527d 100644
56477 --- a/drivers/xen/gntdev.c
56478 +++ b/drivers/xen/gntdev.c
56479 @@ -1017,8 +1017,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
56480                 err = mmu_interval_notifier_insert_locked(
56481                         &map->notifier, vma->vm_mm, vma->vm_start,
56482                         vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
56483 -               if (err)
56484 +               if (err) {
56485 +                       map->vma = NULL;
56486                         goto out_unlock_put;
56487 +               }
56488         }
56489         mutex_unlock(&priv->lock);
56491 diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
56492 index e64e6befc63b..87e6b7db892f 100644
56493 --- a/drivers/xen/unpopulated-alloc.c
56494 +++ b/drivers/xen/unpopulated-alloc.c
56495 @@ -39,8 +39,10 @@ static int fill_list(unsigned int nr_pages)
56496         }
56498         pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
56499 -       if (!pgmap)
56500 +       if (!pgmap) {
56501 +               ret = -ENOMEM;
56502                 goto err_pgmap;
56503 +       }
56505         pgmap->type = MEMORY_DEVICE_GENERIC;
56506         pgmap->range = (struct range) {
56507 diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
56508 index 649f04f112dc..59c32c9b799f 100644
56509 --- a/fs/9p/vfs_file.c
56510 +++ b/fs/9p/vfs_file.c
56511 @@ -86,8 +86,8 @@ int v9fs_file_open(struct inode *inode, struct file *file)
56512                  * to work.
56513                  */
56514                 writeback_fid = v9fs_writeback_fid(file_dentry(file));
56515 -               if (IS_ERR(fid)) {
56516 -                       err = PTR_ERR(fid);
56517 +               if (IS_ERR(writeback_fid)) {
56518 +                       err = PTR_ERR(writeback_fid);
56519                         mutex_unlock(&v9inode->v_mutex);
56520                         goto out_error;
56521                 }
56522 diff --git a/fs/Kconfig b/fs/Kconfig
56523 index a55bda4233bb..f61330e4efc0 100644
56524 --- a/fs/Kconfig
56525 +++ b/fs/Kconfig
56526 @@ -145,6 +145,7 @@ menu "DOS/FAT/EXFAT/NT Filesystems"
56527  source "fs/fat/Kconfig"
56528  source "fs/exfat/Kconfig"
56529  source "fs/ntfs/Kconfig"
56530 +source "fs/ntfs3/Kconfig"
56532  endmenu
56533  endif # BLOCK
56534 diff --git a/fs/Makefile b/fs/Makefile
56535 index 3215fe205256..6bdfcf712cb1 100644
56536 --- a/fs/Makefile
56537 +++ b/fs/Makefile
56538 @@ -99,6 +99,7 @@ obj-$(CONFIG_SYSV_FS)         += sysv/
56539  obj-$(CONFIG_CIFS)             += cifs/
56540  obj-$(CONFIG_HPFS_FS)          += hpfs/
56541  obj-$(CONFIG_NTFS_FS)          += ntfs/
56542 +obj-$(CONFIG_NTFS3_FS)         += ntfs3/
56543  obj-$(CONFIG_UFS_FS)           += ufs/
56544  obj-$(CONFIG_EFS_FS)           += efs/
56545  obj-$(CONFIG_JFFS2_FS)         += jffs2/
56546 diff --git a/fs/afs/dir.c b/fs/afs/dir.c
56547 index 17548c1faf02..31251d11d576 100644
56548 --- a/fs/afs/dir.c
56549 +++ b/fs/afs/dir.c
56550 @@ -1342,6 +1342,7 @@ static int afs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
56552         afs_op_set_vnode(op, 0, dvnode);
56553         op->file[0].dv_delta = 1;
56554 +       op->file[0].modification = true;
56555         op->file[0].update_ctime = true;
56556         op->dentry      = dentry;
56557         op->create.mode = S_IFDIR | mode;
56558 @@ -1423,6 +1424,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
56560         afs_op_set_vnode(op, 0, dvnode);
56561         op->file[0].dv_delta = 1;
56562 +       op->file[0].modification = true;
56563         op->file[0].update_ctime = true;
56565         op->dentry      = dentry;
56566 @@ -1559,6 +1561,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
56568         afs_op_set_vnode(op, 0, dvnode);
56569         op->file[0].dv_delta = 1;
56570 +       op->file[0].modification = true;
56571         op->file[0].update_ctime = true;
56573         /* Try to make sure we have a callback promise on the victim. */
56574 @@ -1641,6 +1644,7 @@ static int afs_create(struct user_namespace *mnt_userns, struct inode *dir,
56576         afs_op_set_vnode(op, 0, dvnode);
56577         op->file[0].dv_delta = 1;
56578 +       op->file[0].modification = true;
56579         op->file[0].update_ctime = true;
56581         op->dentry      = dentry;
56582 @@ -1715,6 +1719,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
56583         afs_op_set_vnode(op, 0, dvnode);
56584         afs_op_set_vnode(op, 1, vnode);
56585         op->file[0].dv_delta = 1;
56586 +       op->file[0].modification = true;
56587         op->file[0].update_ctime = true;
56588         op->file[1].update_ctime = true;
56590 @@ -1910,6 +1915,8 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
56591         afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
56592         op->file[0].dv_delta = 1;
56593         op->file[1].dv_delta = 1;
56594 +       op->file[0].modification = true;
56595 +       op->file[1].modification = true;
56596         op->file[0].update_ctime = true;
56597         op->file[1].update_ctime = true;
56599 diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c
56600 index 04f75a44f243..dae9a57d7ec0 100644
56601 --- a/fs/afs/dir_silly.c
56602 +++ b/fs/afs/dir_silly.c
56603 @@ -73,6 +73,8 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
56604         afs_op_set_vnode(op, 1, dvnode);
56605         op->file[0].dv_delta = 1;
56606         op->file[1].dv_delta = 1;
56607 +       op->file[0].modification = true;
56608 +       op->file[1].modification = true;
56609         op->file[0].update_ctime = true;
56610         op->file[1].update_ctime = true;
56612 @@ -201,6 +203,7 @@ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode
56613         afs_op_set_vnode(op, 0, dvnode);
56614         afs_op_set_vnode(op, 1, vnode);
56615         op->file[0].dv_delta = 1;
56616 +       op->file[0].modification = true;
56617         op->file[0].update_ctime = true;
56618         op->file[1].op_unlinked = true;
56619         op->file[1].update_ctime = true;
56620 diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
56621 index 71c58723763d..a82515b47350 100644
56622 --- a/fs/afs/fs_operation.c
56623 +++ b/fs/afs/fs_operation.c
56624 @@ -118,6 +118,8 @@ static void afs_prepare_vnode(struct afs_operation *op, struct afs_vnode_param *
56625                 vp->cb_break_before     = afs_calc_vnode_cb_break(vnode);
56626                 if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
56627                         op->flags       |= AFS_OPERATION_CUR_ONLY;
56628 +               if (vp->modification)
56629 +                       set_bit(AFS_VNODE_MODIFYING, &vnode->flags);
56630         }
56632         if (vp->fid.vnode)
56633 @@ -223,6 +225,10 @@ int afs_put_operation(struct afs_operation *op)
56635         if (op->ops && op->ops->put)
56636                 op->ops->put(op);
56637 +       if (op->file[0].modification)
56638 +               clear_bit(AFS_VNODE_MODIFYING, &op->file[0].vnode->flags);
56639 +       if (op->file[1].modification && op->file[1].vnode != op->file[0].vnode)
56640 +               clear_bit(AFS_VNODE_MODIFYING, &op->file[1].vnode->flags);
56641         if (op->file[0].put_vnode)
56642                 iput(&op->file[0].vnode->vfs_inode);
56643         if (op->file[1].put_vnode)
56644 diff --git a/fs/afs/inode.c b/fs/afs/inode.c
56645 index 12be88716e4c..fddf7d54e0b7 100644
56646 --- a/fs/afs/inode.c
56647 +++ b/fs/afs/inode.c
56648 @@ -102,13 +102,13 @@ static int afs_inode_init_from_status(struct afs_operation *op,
56650         switch (status->type) {
56651         case AFS_FTYPE_FILE:
56652 -               inode->i_mode   = S_IFREG | status->mode;
56653 +               inode->i_mode   = S_IFREG | (status->mode & S_IALLUGO);
56654                 inode->i_op     = &afs_file_inode_operations;
56655                 inode->i_fop    = &afs_file_operations;
56656                 inode->i_mapping->a_ops = &afs_fs_aops;
56657                 break;
56658         case AFS_FTYPE_DIR:
56659 -               inode->i_mode   = S_IFDIR | status->mode;
56660 +               inode->i_mode   = S_IFDIR |  (status->mode & S_IALLUGO);
56661                 inode->i_op     = &afs_dir_inode_operations;
56662                 inode->i_fop    = &afs_dir_file_operations;
56663                 inode->i_mapping->a_ops = &afs_dir_aops;
56664 @@ -198,7 +198,7 @@ static void afs_apply_status(struct afs_operation *op,
56665         if (status->mode != vnode->status.mode) {
56666                 mode = inode->i_mode;
56667                 mode &= ~S_IALLUGO;
56668 -               mode |= status->mode;
56669 +               mode |= status->mode & S_IALLUGO;
56670                 WRITE_ONCE(inode->i_mode, mode);
56671         }
56673 @@ -293,8 +293,9 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v
56674                         op->flags &= ~AFS_OPERATION_DIR_CONFLICT;
56675                 }
56676         } else if (vp->scb.have_status) {
56677 -               if (vp->dv_before + vp->dv_delta != vp->scb.status.data_version &&
56678 -                   vp->speculative)
56679 +               if (vp->speculative &&
56680 +                   (test_bit(AFS_VNODE_MODIFYING, &vnode->flags) ||
56681 +                    vp->dv_before != vnode->status.data_version))
56682                         /* Ignore the result of a speculative bulk status fetch
56683                          * if it splits around a modification op, thereby
56684                          * appearing to regress the data version.
56685 @@ -910,6 +911,7 @@ int afs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
56686         }
56687         op->ctime = attr->ia_ctime;
56688         op->file[0].update_ctime = 1;
56689 +       op->file[0].modification = true;
56691         op->ops = &afs_setattr_operation;
56692         ret = afs_do_sync_operation(op);
56693 diff --git a/fs/afs/internal.h b/fs/afs/internal.h
56694 index 1627b1872812..be981a9a1add 100644
56695 --- a/fs/afs/internal.h
56696 +++ b/fs/afs/internal.h
56697 @@ -640,6 +640,7 @@ struct afs_vnode {
56698  #define AFS_VNODE_PSEUDODIR    7               /* set if Vnode is a pseudo directory */
56699  #define AFS_VNODE_NEW_CONTENT  8               /* Set if file has new content (create/trunc-0) */
56700  #define AFS_VNODE_SILLY_DELETED        9               /* Set if file has been silly-deleted */
56701 +#define AFS_VNODE_MODIFYING    10              /* Set if we're performing a modification op */
56703         struct list_head        wb_keys;        /* List of keys available for writeback */
56704         struct list_head        pending_locks;  /* locks waiting to be granted */
56705 @@ -756,6 +757,7 @@ struct afs_vnode_param {
56706         bool                    set_size:1;     /* Must update i_size */
56707         bool                    op_unlinked:1;  /* True if file was unlinked by op */
56708         bool                    speculative:1;  /* T if speculative status fetch (no vnode lock) */
56709 +       bool                    modification:1; /* Set if the content gets modified */
56710  };
56712  /*
56713 diff --git a/fs/afs/write.c b/fs/afs/write.c
56714 index eb737ed63afb..ebe3b6493fce 100644
56715 --- a/fs/afs/write.c
56716 +++ b/fs/afs/write.c
56717 @@ -450,6 +450,7 @@ static int afs_store_data(struct address_space *mapping,
56718         afs_op_set_vnode(op, 0, vnode);
56719         op->file[0].dv_delta = 1;
56720         op->store.mapping = mapping;
56721 +       op->file[0].modification = true;
56722         op->store.first = first;
56723         op->store.last = last;
56724         op->store.first_offset = offset;
56725 diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
56726 index 744b99ddc28c..a7d9e147dee6 100644
56727 --- a/fs/btrfs/block-group.c
56728 +++ b/fs/btrfs/block-group.c
56729 @@ -3269,6 +3269,7 @@ static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
56730   */
56731  void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
56733 +       struct btrfs_transaction *cur_trans = trans->transaction;
56734         struct btrfs_fs_info *fs_info = trans->fs_info;
56735         struct btrfs_space_info *info;
56736         u64 left;
56737 @@ -3283,6 +3284,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
56738         lockdep_assert_held(&fs_info->chunk_mutex);
56740         info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
56741 +again:
56742         spin_lock(&info->lock);
56743         left = info->total_bytes - btrfs_space_info_used(info, true);
56744         spin_unlock(&info->lock);
56745 @@ -3301,6 +3303,58 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
56747         if (left < thresh) {
56748                 u64 flags = btrfs_system_alloc_profile(fs_info);
56749 +               u64 reserved = atomic64_read(&cur_trans->chunk_bytes_reserved);
56751 +               /*
56752 +                * If there's not available space for the chunk tree (system
56753 +                * space) and there are other tasks that reserved space for
56754 +                * creating a new system block group, wait for them to complete
56755 +                * the creation of their system block group and release excess
56756 +                * reserved space. We do this because:
56757 +                *
56758 +                * *) We can end up allocating more system chunks than necessary
56759 +                *    when there are multiple tasks that are concurrently
56760 +                *    allocating block groups, which can lead to exhaustion of
56761 +                *    the system array in the superblock;
56762 +                *
56763 +                * *) If we allocate extra and unnecessary system block groups,
56764 +                *    despite being empty for a long time, and possibly forever,
56765 +                *    they end not being added to the list of unused block groups
56766 +                *    because that typically happens only when deallocating the
56767 +                *    last extent from a block group - which never happens since
56768 +                *    we never allocate from them in the first place. The few
56769 +                *    exceptions are when mounting a filesystem or running scrub,
56770 +                *    which add unused block groups to the list of unused block
56771 +                *    groups, to be deleted by the cleaner kthread.
56772 +                *    And even when they are added to the list of unused block
56773 +                *    groups, it can take a long time until they get deleted,
56774 +                *    since the cleaner kthread might be sleeping or busy with
56775 +                *    other work (deleting subvolumes, running delayed iputs,
56776 +                *    defrag scheduling, etc);
56777 +                *
56778 +                * This is rare in practice, but can happen when too many tasks
56779 +                * are allocating blocks groups in parallel (via fallocate())
56780 +                * and before the one that reserved space for a new system block
56781 +                * group finishes the block group creation and releases the space
56782 +                * reserved in excess (at btrfs_create_pending_block_groups()),
56783 +                * other tasks end up here and see free system space temporarily
56784 +                * not enough for updating the chunk tree.
56785 +                *
56786 +                * We unlock the chunk mutex before waiting for such tasks and
56787 +                * lock it again after the wait, otherwise we would deadlock.
56788 +                * It is safe to do so because allocating a system chunk is the
56789 +                * first thing done while allocating a new block group.
56790 +                */
56791 +               if (reserved > trans->chunk_bytes_reserved) {
56792 +                       const u64 min_needed = reserved - thresh;
56794 +                       mutex_unlock(&fs_info->chunk_mutex);
56795 +                       wait_event(cur_trans->chunk_reserve_wait,
56796 +                          atomic64_read(&cur_trans->chunk_bytes_reserved) <=
56797 +                          min_needed);
56798 +                       mutex_lock(&fs_info->chunk_mutex);
56799 +                       goto again;
56800 +               }
56802                 /*
56803                  * Ignore failure to create system chunk. We might end up not
56804 @@ -3315,8 +3369,10 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
56805                 ret = btrfs_block_rsv_add(fs_info->chunk_root,
56806                                           &fs_info->chunk_block_rsv,
56807                                           thresh, BTRFS_RESERVE_NO_FLUSH);
56808 -               if (!ret)
56809 +               if (!ret) {
56810 +                       atomic64_add(thresh, &cur_trans->chunk_bytes_reserved);
56811                         trans->chunk_bytes_reserved += thresh;
56812 +               }
56813         }
56816 diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
56817 index 28e202e89660..418903604936 100644
56818 --- a/fs/btrfs/btrfs_inode.h
56819 +++ b/fs/btrfs/btrfs_inode.h
56820 @@ -299,6 +299,21 @@ static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode,
56821                                                   mod);
56825 + * Called every time after doing a buffered, direct IO or memory mapped write.
56826 + *
56827 + * This is to ensure that if we write to a file that was previously fsynced in
56828 + * the current transaction, then try to fsync it again in the same transaction,
56829 + * we will know that there were changes in the file and that it needs to be
56830 + * logged.
56831 + */
56832 +static inline void btrfs_set_inode_last_sub_trans(struct btrfs_inode *inode)
56834 +       spin_lock(&inode->lock);
56835 +       inode->last_sub_trans = inode->root->log_transid;
56836 +       spin_unlock(&inode->lock);
56839  static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
56841         int ret = 0;
56842 diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
56843 index 3f4c832abfed..81387cdf334d 100644
56844 --- a/fs/btrfs/compression.c
56845 +++ b/fs/btrfs/compression.c
56846 @@ -80,10 +80,15 @@ static int compression_compress_pages(int type, struct list_head *ws,
56847         case BTRFS_COMPRESS_NONE:
56848         default:
56849                 /*
56850 -                * This can't happen, the type is validated several times
56851 -                * before we get here. As a sane fallback, return what the
56852 -                * callers will understand as 'no compression happened'.
56853 +                * This can happen when compression races with remount setting
56854 +                * it to 'no compress', while caller doesn't call
56855 +                * inode_need_compress() to check if we really need to
56856 +                * compress.
56857 +                *
56858 +                * Not a big deal, just need to inform caller that we
56859 +                * haven't allocated any pages yet.
56860                  */
56861 +               *out_pages = 0;
56862                 return -E2BIG;
56863         }
56865 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
56866 index 34b929bd5c1a..f43ce82a6aed 100644
56867 --- a/fs/btrfs/ctree.c
56868 +++ b/fs/btrfs/ctree.c
56869 @@ -1365,10 +1365,30 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
56870                                    "failed to read tree block %llu from get_old_root",
56871                                    logical);
56872                 } else {
56873 +                       struct tree_mod_elem *tm2;
56875                         btrfs_tree_read_lock(old);
56876                         eb = btrfs_clone_extent_buffer(old);
56877 +                       /*
56878 +                        * After the lookup for the most recent tree mod operation
56879 +                        * above and before we locked and cloned the extent buffer
56880 +                        * 'old', a new tree mod log operation may have been added.
56881 +                        * So lookup for a more recent one to make sure the number
56882 +                        * of mod log operations we replay is consistent with the
56883 +                        * number of items we have in the cloned extent buffer,
56884 +                        * otherwise we can hit a BUG_ON when rewinding the extent
56885 +                        * buffer.
56886 +                        */
56887 +                       tm2 = tree_mod_log_search(fs_info, logical, time_seq);
56888                         btrfs_tree_read_unlock(old);
56889                         free_extent_buffer(old);
56890 +                       ASSERT(tm2);
56891 +                       ASSERT(tm2 == tm || tm2->seq > tm->seq);
56892 +                       if (!tm2 || tm2->seq < tm->seq) {
56893 +                               free_extent_buffer(eb);
56894 +                               return NULL;
56895 +                       }
56896 +                       tm = tm2;
56897                 }
56898         } else if (old_root) {
56899                 eb_root_owner = btrfs_header_owner(eb_root);
56900 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
56901 index 9ae776ab3967..29ef969035df 100644
56902 --- a/fs/btrfs/ctree.h
56903 +++ b/fs/btrfs/ctree.h
56904 @@ -3110,7 +3110,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
56905                                struct btrfs_inode *inode, u64 new_size,
56906                                u32 min_type);
56908 -int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
56909 +int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context);
56910  int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
56911                                bool in_reclaim_context);
56912  int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
56913 diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
56914 index 56642ca7af10..fa1c3bc93ccf 100644
56915 --- a/fs/btrfs/delalloc-space.c
56916 +++ b/fs/btrfs/delalloc-space.c
56917 @@ -311,7 +311,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
56918                         flush = BTRFS_RESERVE_FLUSH_LIMIT;
56920                 if (btrfs_transaction_in_commit(fs_info))
56921 -                       schedule_timeout(1);
56922 +                       schedule_min_hrtimeout();
56923         }
56925         num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
56926 diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
56927 index bf25401c9768..c1d2b6786129 100644
56928 --- a/fs/btrfs/delayed-inode.c
56929 +++ b/fs/btrfs/delayed-inode.c
56930 @@ -1589,8 +1589,8 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
56931          * We can only do one readdir with delayed items at a time because of
56932          * item->readdir_list.
56933          */
56934 -       inode_unlock_shared(inode);
56935 -       inode_lock(inode);
56936 +       btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
56937 +       btrfs_inode_lock(inode, 0);
56939         mutex_lock(&delayed_node->mutex);
56940         item = __btrfs_first_delayed_insertion_item(delayed_node);
56941 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
56942 index 36a3c973fda1..5b82050b871a 100644
56943 --- a/fs/btrfs/extent-tree.c
56944 +++ b/fs/btrfs/extent-tree.c
56945 @@ -1340,12 +1340,16 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
56946                 stripe = bbio->stripes;
56947                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
56948                         u64 bytes;
56949 +                       struct btrfs_device *device = stripe->dev;
56951 -                       if (!stripe->dev->bdev) {
56952 +                       if (!device->bdev) {
56953                                 ASSERT(btrfs_test_opt(fs_info, DEGRADED));
56954                                 continue;
56955                         }
56957 +                       if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
56958 +                               continue;
56960                         ret = do_discard_extent(stripe, &bytes);
56961                         if (!ret) {
56962                                 discarded_bytes += bytes;
56963 diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
56964 index 0e155f013839..abee4b62741d 100644
56965 --- a/fs/btrfs/file.c
56966 +++ b/fs/btrfs/file.c
56967 @@ -2014,14 +2014,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
56968         else
56969                 num_written = btrfs_buffered_write(iocb, from);
56971 -       /*
56972 -        * We also have to set last_sub_trans to the current log transid,
56973 -        * otherwise subsequent syncs to a file that's been synced in this
56974 -        * transaction will appear to have already occurred.
56975 -        */
56976 -       spin_lock(&inode->lock);
56977 -       inode->last_sub_trans = inode->root->log_transid;
56978 -       spin_unlock(&inode->lock);
56979 +       btrfs_set_inode_last_sub_trans(inode);
56981         if (num_written > 0)
56982                 num_written = generic_write_sync(iocb, num_written);
56984 @@ -2073,6 +2067,30 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
56985         return ret;
56988 +static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
56990 +       struct btrfs_inode *inode = BTRFS_I(ctx->inode);
56991 +       struct btrfs_fs_info *fs_info = inode->root->fs_info;
56993 +       if (btrfs_inode_in_log(inode, fs_info->generation) &&
56994 +           list_empty(&ctx->ordered_extents))
56995 +               return true;
56997 +       /*
56998 +        * If we are doing a fast fsync we can not bail out if the inode's
56999 +        * last_trans is <= then the last committed transaction, because we only
57000 +        * update the last_trans of the inode during ordered extent completion,
57001 +        * and for a fast fsync we don't wait for that, we only wait for the
57002 +        * writeback to complete.
57003 +        */
57004 +       if (inode->last_trans <= fs_info->last_trans_committed &&
57005 +           (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
57006 +            list_empty(&ctx->ordered_extents)))
57007 +               return true;
57009 +       return false;
57012  /*
57013   * fsync call for both files and directories.  This logs the inode into
57014   * the tree log instead of forcing full commits whenever possible.
57015 @@ -2122,7 +2140,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
57016         if (ret)
57017                 goto out;
57019 -       inode_lock(inode);
57020 +       btrfs_inode_lock(inode, 0);
57022         atomic_inc(&root->log_batch);
57024 @@ -2154,7 +2172,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
57025          */
57026         ret = start_ordered_ops(inode, start, end);
57027         if (ret) {
57028 -               inode_unlock(inode);
57029 +               btrfs_inode_unlock(inode, 0);
57030                 goto out;
57031         }
57033 @@ -2191,17 +2209,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
57035         atomic_inc(&root->log_batch);
57037 -       /*
57038 -        * If we are doing a fast fsync we can not bail out if the inode's
57039 -        * last_trans is <= then the last committed transaction, because we only
57040 -        * update the last_trans of the inode during ordered extent completion,
57041 -        * and for a fast fsync we don't wait for that, we only wait for the
57042 -        * writeback to complete.
57043 -        */
57044         smp_mb();
57045 -       if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
57046 -           (BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed &&
57047 -            (full_sync || list_empty(&ctx.ordered_extents)))) {
57048 +       if (skip_inode_logging(&ctx)) {
57049                 /*
57050                  * We've had everything committed since the last time we were
57051                  * modified so clear this flag in case it was set for whatever
57052 @@ -2255,7 +2264,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
57053          * file again, but that will end up using the synchronization
57054          * inside btrfs_sync_log to keep things safe.
57055          */
57056 -       inode_unlock(inode);
57057 +       btrfs_inode_unlock(inode, 0);
57059         if (ret != BTRFS_NO_LOG_SYNC) {
57060                 if (!ret) {
57061 @@ -2285,7 +2294,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
57063  out_release_extents:
57064         btrfs_release_log_ctx_extents(&ctx);
57065 -       inode_unlock(inode);
57066 +       btrfs_inode_unlock(inode, 0);
57067         goto out;
57070 @@ -2735,8 +2744,6 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
57071                         extent_info->file_offset += replace_len;
57072                 }
57074 -               cur_offset = drop_args.drop_end;
57076                 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
57077                 if (ret)
57078                         break;
57079 @@ -2756,7 +2763,9 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
57080                 BUG_ON(ret);    /* shouldn't happen */
57081                 trans->block_rsv = rsv;
57083 -               if (!extent_info) {
57084 +               cur_offset = drop_args.drop_end;
57085 +               len = end - cur_offset;
57086 +               if (!extent_info && len) {
57087                         ret = find_first_non_hole(BTRFS_I(inode), &cur_offset,
57088                                                   &len);
57089                         if (unlikely(ret < 0))
57090 @@ -2868,7 +2877,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
57091         if (ret)
57092                 return ret;
57094 -       inode_lock(inode);
57095 +       btrfs_inode_lock(inode, 0);
57096         ino_size = round_up(inode->i_size, fs_info->sectorsize);
57097         ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
57098         if (ret < 0)
57099 @@ -2908,7 +2917,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
57100                 truncated_block = true;
57101                 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
57102                 if (ret) {
57103 -                       inode_unlock(inode);
57104 +                       btrfs_inode_unlock(inode, 0);
57105                         return ret;
57106                 }
57107         }
57108 @@ -3009,7 +3018,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
57109                                 ret = ret2;
57110                 }
57111         }
57112 -       inode_unlock(inode);
57113 +       btrfs_inode_unlock(inode, 0);
57114         return ret;
57117 @@ -3377,7 +3386,7 @@ static long btrfs_fallocate(struct file *file, int mode,
57119         if (mode & FALLOC_FL_ZERO_RANGE) {
57120                 ret = btrfs_zero_range(inode, offset, len, mode);
57121 -               inode_unlock(inode);
57122 +               btrfs_inode_unlock(inode, 0);
57123                 return ret;
57124         }
57126 @@ -3487,7 +3496,7 @@ static long btrfs_fallocate(struct file *file, int mode,
57127         unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
57128                              &cached_state);
57129  out:
57130 -       inode_unlock(inode);
57131 +       btrfs_inode_unlock(inode, 0);
57132         /* Let go of our reservation. */
57133         if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
57134                 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
57135 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
57136 index 9988decd5717..ac9c2691376d 100644
57137 --- a/fs/btrfs/free-space-cache.c
57138 +++ b/fs/btrfs/free-space-cache.c
57139 @@ -3942,7 +3942,7 @@ static int cleanup_free_space_cache_v1(struct btrfs_fs_info *fs_info,
57141         struct btrfs_block_group *block_group;
57142         struct rb_node *node;
57143 -       int ret;
57144 +       int ret = 0;
57146         btrfs_info(fs_info, "cleaning free space cache v1");
57148 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
57149 index a520775949a0..8c4d2eaa5d58 100644
57150 --- a/fs/btrfs/inode.c
57151 +++ b/fs/btrfs/inode.c
57152 @@ -8619,9 +8619,7 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
57153         set_page_dirty(page);
57154         SetPageUptodate(page);
57156 -       BTRFS_I(inode)->last_trans = fs_info->generation;
57157 -       BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
57158 -       BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
57159 +       btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
57161         unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
57163 @@ -9674,7 +9672,7 @@ static int start_delalloc_inodes(struct btrfs_root *root,
57164         return ret;
57167 -int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
57168 +int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
57170         struct writeback_control wbc = {
57171                 .nr_to_write = LONG_MAX,
57172 @@ -9687,7 +9685,7 @@ int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
57173         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
57174                 return -EROFS;
57176 -       return start_delalloc_inodes(root, &wbc, true, false);
57177 +       return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
57180  int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
57181 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
57182 index e8d53fea4c61..f9ecb6c0bf15 100644
57183 --- a/fs/btrfs/ioctl.c
57184 +++ b/fs/btrfs/ioctl.c
57185 @@ -226,7 +226,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
57186         if (ret)
57187                 return ret;
57189 -       inode_lock(inode);
57190 +       btrfs_inode_lock(inode, 0);
57191         fsflags = btrfs_mask_fsflags_for_type(inode, fsflags);
57192         old_fsflags = btrfs_inode_flags_to_fsflags(binode->flags);
57194 @@ -353,7 +353,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
57195   out_end_trans:
57196         btrfs_end_transaction(trans);
57197   out_unlock:
57198 -       inode_unlock(inode);
57199 +       btrfs_inode_unlock(inode, 0);
57200         mnt_drop_write_file(file);
57201         return ret;
57203 @@ -449,7 +449,7 @@ static int btrfs_ioctl_fssetxattr(struct file *file, void __user *arg)
57204         if (ret)
57205                 return ret;
57207 -       inode_lock(inode);
57208 +       btrfs_inode_lock(inode, 0);
57210         old_flags = binode->flags;
57211         old_i_flags = inode->i_flags;
57212 @@ -501,7 +501,7 @@ static int btrfs_ioctl_fssetxattr(struct file *file, void __user *arg)
57213                 inode->i_flags = old_i_flags;
57214         }
57216 -       inode_unlock(inode);
57217 +       btrfs_inode_unlock(inode, 0);
57218         mnt_drop_write_file(file);
57220         return ret;
57221 @@ -697,8 +697,6 @@ static noinline int create_subvol(struct inode *dir,
57222         btrfs_set_root_otransid(root_item, trans->transid);
57224         btrfs_tree_unlock(leaf);
57225 -       free_extent_buffer(leaf);
57226 -       leaf = NULL;
57228         btrfs_set_root_dirid(root_item, BTRFS_FIRST_FREE_OBJECTID);
57230 @@ -707,8 +705,22 @@ static noinline int create_subvol(struct inode *dir,
57231         key.type = BTRFS_ROOT_ITEM_KEY;
57232         ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
57233                                 root_item);
57234 -       if (ret)
57235 +       if (ret) {
57236 +               /*
57237 +                * Since we don't abort the transaction in this case, free the
57238 +                * tree block so that we don't leak space and leave the
57239 +                * filesystem in an inconsistent state (an extent item in the
57240 +                * extent tree without backreferences). Also no need to have
57241 +                * the tree block locked since it is not in any tree at this
57242 +                * point, so no other task can find it and use it.
57243 +                */
57244 +               btrfs_free_tree_block(trans, root, leaf, 0, 1);
57245 +               free_extent_buffer(leaf);
57246                 goto fail;
57247 +       }
57249 +       free_extent_buffer(leaf);
57250 +       leaf = NULL;
57252         key.offset = (u64)-1;
57253         new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev);
57254 @@ -1014,7 +1026,7 @@ static noinline int btrfs_mksubvol(const struct path *parent,
57255  out_dput:
57256         dput(dentry);
57257  out_unlock:
57258 -       inode_unlock(dir);
57259 +       btrfs_inode_unlock(dir, 0);
57260         return error;
57263 @@ -1034,7 +1046,7 @@ static noinline int btrfs_mksnapshot(const struct path *parent,
57264          */
57265         btrfs_drew_read_lock(&root->snapshot_lock);
57267 -       ret = btrfs_start_delalloc_snapshot(root);
57268 +       ret = btrfs_start_delalloc_snapshot(root, false);
57269         if (ret)
57270                 goto out;
57272 @@ -1612,7 +1624,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
57273                         ra_index += cluster;
57274                 }
57276 -               inode_lock(inode);
57277 +               btrfs_inode_lock(inode, 0);
57278                 if (IS_SWAPFILE(inode)) {
57279                         ret = -ETXTBSY;
57280                 } else {
57281 @@ -1621,13 +1633,13 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
57282                         ret = cluster_pages_for_defrag(inode, pages, i, cluster);
57283                 }
57284                 if (ret < 0) {
57285 -                       inode_unlock(inode);
57286 +                       btrfs_inode_unlock(inode, 0);
57287                         goto out_ra;
57288                 }
57290                 defrag_count += ret;
57291                 balance_dirty_pages_ratelimited(inode->i_mapping);
57292 -               inode_unlock(inode);
57293 +               btrfs_inode_unlock(inode, 0);
57295                 if (newer_than) {
57296                         if (newer_off == (u64)-1)
57297 @@ -1675,9 +1687,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
57299  out_ra:
57300         if (do_compress) {
57301 -               inode_lock(inode);
57302 +               btrfs_inode_lock(inode, 0);
57303                 BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
57304 -               inode_unlock(inode);
57305 +               btrfs_inode_unlock(inode, 0);
57306         }
57307         if (!file)
57308                 kfree(ra);
57309 @@ -3112,9 +3124,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
57310                 goto out_dput;
57311         }
57313 -       inode_lock(inode);
57314 +       btrfs_inode_lock(inode, 0);
57315         err = btrfs_delete_subvolume(dir, dentry);
57316 -       inode_unlock(inode);
57317 +       btrfs_inode_unlock(inode, 0);
57318         if (!err) {
57319                 fsnotify_rmdir(dir, dentry);
57320                 d_delete(dentry);
57321 @@ -3123,7 +3135,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
57322  out_dput:
57323         dput(dentry);
57324  out_unlock_dir:
57325 -       inode_unlock(dir);
57326 +       btrfs_inode_unlock(dir, 0);
57327  free_subvol_name:
57328         kfree(subvol_name_ptr);
57329  free_parent:
57330 diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
57331 index 985a21558437..043e3fa961e0 100644
57332 --- a/fs/btrfs/ordered-data.c
57333 +++ b/fs/btrfs/ordered-data.c
57334 @@ -995,7 +995,7 @@ int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
57336         if (pre)
57337                 ret = clone_ordered_extent(ordered, 0, pre);
57338 -       if (post)
57339 +       if (ret == 0 && post)
57340                 ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
57341                                            post);
57343 diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
57344 index f0b9ef13153a..2991287a71a8 100644
57345 --- a/fs/btrfs/qgroup.c
57346 +++ b/fs/btrfs/qgroup.c
57347 @@ -3579,7 +3579,7 @@ static int try_flush_qgroup(struct btrfs_root *root)
57348                 return 0;
57349         }
57351 -       ret = btrfs_start_delalloc_snapshot(root);
57352 +       ret = btrfs_start_delalloc_snapshot(root, true);
57353         if (ret < 0)
57354                 goto out;
57355         btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
57356 diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
57357 index 762881b777b3..0abbf050580d 100644
57358 --- a/fs/btrfs/reflink.c
57359 +++ b/fs/btrfs/reflink.c
57360 @@ -833,7 +833,7 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
57361                 return -EINVAL;
57363         if (same_inode)
57364 -               inode_lock(src_inode);
57365 +               btrfs_inode_lock(src_inode, 0);
57366         else
57367                 lock_two_nondirectories(src_inode, dst_inode);
57369 @@ -849,7 +849,7 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
57371  out_unlock:
57372         if (same_inode)
57373 -               inode_unlock(src_inode);
57374 +               btrfs_inode_unlock(src_inode, 0);
57375         else
57376                 unlock_two_nondirectories(src_inode, dst_inode);
57378 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
57379 index 232d5da7b7be..829dc8dcc151 100644
57380 --- a/fs/btrfs/relocation.c
57381 +++ b/fs/btrfs/relocation.c
57382 @@ -733,10 +733,12 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
57383         struct extent_buffer *eb;
57384         struct btrfs_root_item *root_item;
57385         struct btrfs_key root_key;
57386 -       int ret;
57387 +       int ret = 0;
57388 +       bool must_abort = false;
57390         root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
57391 -       BUG_ON(!root_item);
57392 +       if (!root_item)
57393 +               return ERR_PTR(-ENOMEM);
57395         root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
57396         root_key.type = BTRFS_ROOT_ITEM_KEY;
57397 @@ -748,7 +750,9 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
57398                 /* called by btrfs_init_reloc_root */
57399                 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
57400                                       BTRFS_TREE_RELOC_OBJECTID);
57401 -               BUG_ON(ret);
57402 +               if (ret)
57403 +                       goto fail;
57405                 /*
57406                  * Set the last_snapshot field to the generation of the commit
57407                  * root - like this ctree.c:btrfs_block_can_be_shared() behaves
57408 @@ -769,9 +773,16 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
57409                  */
57410                 ret = btrfs_copy_root(trans, root, root->node, &eb,
57411                                       BTRFS_TREE_RELOC_OBJECTID);
57412 -               BUG_ON(ret);
57413 +               if (ret)
57414 +                       goto fail;
57415         }
57417 +       /*
57418 +        * We have changed references at this point, we must abort the
57419 +        * transaction if anything fails.
57420 +        */
57421 +       must_abort = true;
57423         memcpy(root_item, &root->root_item, sizeof(*root_item));
57424         btrfs_set_root_bytenr(root_item, eb->start);
57425         btrfs_set_root_level(root_item, btrfs_header_level(eb));
57426 @@ -789,14 +800,25 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
57428         ret = btrfs_insert_root(trans, fs_info->tree_root,
57429                                 &root_key, root_item);
57430 -       BUG_ON(ret);
57431 +       if (ret)
57432 +               goto fail;
57434         kfree(root_item);
57436         reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
57437 -       BUG_ON(IS_ERR(reloc_root));
57438 +       if (IS_ERR(reloc_root)) {
57439 +               ret = PTR_ERR(reloc_root);
57440 +               goto abort;
57441 +       }
57442         set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
57443         reloc_root->last_trans = trans->transid;
57444         return reloc_root;
57445 +fail:
57446 +       kfree(root_item);
57447 +abort:
57448 +       if (must_abort)
57449 +               btrfs_abort_transaction(trans, ret);
57450 +       return ERR_PTR(ret);
57453  /*
57454 @@ -875,7 +897,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
57455         int ret;
57457         if (!have_reloc_root(root))
57458 -               goto out;
57459 +               return 0;
57461         reloc_root = root->reloc_root;
57462         root_item = &reloc_root->root_item;
57463 @@ -908,10 +930,8 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
57465         ret = btrfs_update_root(trans, fs_info->tree_root,
57466                                 &reloc_root->root_key, root_item);
57467 -       BUG_ON(ret);
57468         btrfs_put_root(reloc_root);
57469 -out:
57470 -       return 0;
57471 +       return ret;
57474  /*
57475 @@ -1185,8 +1205,8 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
57476         int ret;
57477         int slot;
57479 -       BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
57480 -       BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
57481 +       ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
57482 +       ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
57484         last_snapshot = btrfs_root_last_snapshot(&src->root_item);
57485  again:
57486 @@ -1217,7 +1237,7 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
57487         parent = eb;
57488         while (1) {
57489                 level = btrfs_header_level(parent);
57490 -               BUG_ON(level < lowest_level);
57491 +               ASSERT(level >= lowest_level);
57493                 ret = btrfs_bin_search(parent, &key, &slot);
57494                 if (ret < 0)
57495 @@ -2578,7 +2598,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
57496                 return btrfs_end_transaction(trans);
57497         }
57499 -       inode_lock(&inode->vfs_inode);
57500 +       btrfs_inode_lock(&inode->vfs_inode, 0);
57501         for (nr = 0; nr < cluster->nr; nr++) {
57502                 start = cluster->boundary[nr] - offset;
57503                 if (nr + 1 < cluster->nr)
57504 @@ -2596,7 +2616,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
57505                 if (ret)
57506                         break;
57507         }
57508 -       inode_unlock(&inode->vfs_inode);
57509 +       btrfs_inode_unlock(&inode->vfs_inode, 0);
57511         if (cur_offset < prealloc_end)
57512                 btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
57513 diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
57514 index 3d9088eab2fc..b9202a1f1af1 100644
57515 --- a/fs/btrfs/scrub.c
57516 +++ b/fs/btrfs/scrub.c
57517 @@ -3682,8 +3682,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
57518                         spin_lock(&cache->lock);
57519                         if (!cache->to_copy) {
57520                                 spin_unlock(&cache->lock);
57521 -                               ro_set = 0;
57522 -                               goto done;
57523 +                               btrfs_put_block_group(cache);
57524 +                               goto skip;
57525                         }
57526                         spin_unlock(&cache->lock);
57527                 }
57528 @@ -3841,7 +3841,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
57529                                                       cache, found_key.offset))
57530                         ro_set = 0;
57532 -done:
57533                 down_write(&dev_replace->rwsem);
57534                 dev_replace->cursor_left = dev_replace->cursor_right;
57535                 dev_replace->item_needs_writeback = 1;
57536 diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
57537 index 8f323859156b..8ae8f1732fd2 100644
57538 --- a/fs/btrfs/send.c
57539 +++ b/fs/btrfs/send.c
57540 @@ -7139,7 +7139,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
57541         int i;
57543         if (root) {
57544 -               ret = btrfs_start_delalloc_snapshot(root);
57545 +               ret = btrfs_start_delalloc_snapshot(root, false);
57546                 if (ret)
57547                         return ret;
57548                 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
57549 @@ -7147,7 +7147,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
57551         for (i = 0; i < sctx->clone_roots_cnt; i++) {
57552                 root = sctx->clone_roots[i].root;
57553 -               ret = btrfs_start_delalloc_snapshot(root);
57554 +               ret = btrfs_start_delalloc_snapshot(root, false);
57555                 if (ret)
57556                         return ret;
57557                 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
57558 diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
57559 index acff6bb49a97..d56d3e7ca324 100644
57560 --- a/fs/btrfs/transaction.c
57561 +++ b/fs/btrfs/transaction.c
57562 @@ -260,6 +260,7 @@ static inline int extwriter_counter_read(struct btrfs_transaction *trans)
57563  void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
57565         struct btrfs_fs_info *fs_info = trans->fs_info;
57566 +       struct btrfs_transaction *cur_trans = trans->transaction;
57568         if (!trans->chunk_bytes_reserved)
57569                 return;
57570 @@ -268,6 +269,8 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
57572         btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
57573                                 trans->chunk_bytes_reserved, NULL);
57574 +       atomic64_sub(trans->chunk_bytes_reserved, &cur_trans->chunk_bytes_reserved);
57575 +       cond_wake_up(&cur_trans->chunk_reserve_wait);
57576         trans->chunk_bytes_reserved = 0;
57579 @@ -383,6 +386,8 @@ static noinline int join_transaction(struct btrfs_fs_info *fs_info,
57580         spin_lock_init(&cur_trans->dropped_roots_lock);
57581         INIT_LIST_HEAD(&cur_trans->releasing_ebs);
57582         spin_lock_init(&cur_trans->releasing_ebs_lock);
57583 +       atomic64_set(&cur_trans->chunk_bytes_reserved, 0);
57584 +       init_waitqueue_head(&cur_trans->chunk_reserve_wait);
57585         list_add_tail(&cur_trans->list, &fs_info->trans_list);
57586         extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
57587                         IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
57588 @@ -1961,7 +1966,6 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
57589          */
57590         BUG_ON(list_empty(&cur_trans->list));
57592 -       list_del_init(&cur_trans->list);
57593         if (cur_trans == fs_info->running_transaction) {
57594                 cur_trans->state = TRANS_STATE_COMMIT_DOING;
57595                 spin_unlock(&fs_info->trans_lock);
57596 @@ -1970,6 +1974,17 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
57598                 spin_lock(&fs_info->trans_lock);
57599         }
57601 +       /*
57602 +        * Now that we know no one else is still using the transaction we can
57603 +        * remove the transaction from the list of transactions. This avoids
57604 +        * the transaction kthread from cleaning up the transaction while some
57605 +        * other task is still using it, which could result in a use-after-free
57606 +        * on things like log trees, as it forces the transaction kthread to
57607 +        * wait for this transaction to be cleaned up by us.
57608 +        */
57609 +       list_del_init(&cur_trans->list);
57611         spin_unlock(&fs_info->trans_lock);
57613         btrfs_cleanup_one_transaction(trans->transaction, fs_info);
57614 diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
57615 index 6335716e513f..364cfbb4c5c5 100644
57616 --- a/fs/btrfs/transaction.h
57617 +++ b/fs/btrfs/transaction.h
57618 @@ -96,6 +96,13 @@ struct btrfs_transaction {
57620         spinlock_t releasing_ebs_lock;
57621         struct list_head releasing_ebs;
57623 +       /*
57624 +        * The number of bytes currently reserved, by all transaction handles
57625 +        * attached to this transaction, for metadata extents of the chunk tree.
57626 +        */
57627 +       atomic64_t chunk_bytes_reserved;
57628 +       wait_queue_head_t chunk_reserve_wait;
57629  };
57631  #define __TRANS_FREEZABLE      (1U << 0)
57632 @@ -175,7 +182,7 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
57633         spin_lock(&inode->lock);
57634         inode->last_trans = trans->transaction->transid;
57635         inode->last_sub_trans = inode->root->log_transid;
57636 -       inode->last_log_commit = inode->root->last_log_commit;
57637 +       inode->last_log_commit = inode->last_sub_trans - 1;
57638         spin_unlock(&inode->lock);
57641 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
57642 index 92a368627791..47e76e79b3d6 100644
57643 --- a/fs/btrfs/tree-log.c
57644 +++ b/fs/btrfs/tree-log.c
57645 @@ -3165,20 +3165,22 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
57646          */
57647         mutex_unlock(&root->log_mutex);
57649 -       btrfs_init_log_ctx(&root_log_ctx, NULL);
57651 -       mutex_lock(&log_root_tree->log_mutex);
57653         if (btrfs_is_zoned(fs_info)) {
57654 +               mutex_lock(&fs_info->tree_root->log_mutex);
57655                 if (!log_root_tree->node) {
57656                         ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
57657                         if (ret) {
57658 -                               mutex_unlock(&log_root_tree->log_mutex);
57659 +                               mutex_unlock(&fs_info->tree_log_mutex);
57660                                 goto out;
57661                         }
57662                 }
57663 +               mutex_unlock(&fs_info->tree_root->log_mutex);
57664         }
57666 +       btrfs_init_log_ctx(&root_log_ctx, NULL);
57668 +       mutex_lock(&log_root_tree->log_mutex);
57670         index2 = log_root_tree->log_transid % 2;
57671         list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
57672         root_log_ctx.log_transid = log_root_tree->log_transid;
57673 @@ -6058,7 +6060,8 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
57674          * (since logging them is pointless, a link count of 0 means they
57675          * will never be accessible).
57676          */
57677 -       if (btrfs_inode_in_log(inode, trans->transid) ||
57678 +       if ((btrfs_inode_in_log(inode, trans->transid) &&
57679 +            list_empty(&ctx->ordered_extents)) ||
57680             inode->vfs_inode.i_nlink == 0) {
57681                 ret = BTRFS_NO_LOG_SYNC;
57682                 goto end_no_trans;
57683 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
57684 index 1c6810bbaf8b..3912eda7905f 100644
57685 --- a/fs/btrfs/volumes.c
57686 +++ b/fs/btrfs/volumes.c
57687 @@ -4989,6 +4989,8 @@ static void init_alloc_chunk_ctl_policy_zoned(
57688                 ctl->max_chunk_size = 2 * ctl->max_stripe_size;
57689                 ctl->devs_max = min_t(int, ctl->devs_max,
57690                                       BTRFS_MAX_DEVS_SYS_CHUNK);
57691 +       } else {
57692 +               BUG();
57693         }
57695         /* We don't want a chunk larger than 10% of writable space */
57696 diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
57697 index eeb3ebe11d7a..304ce64c70a4 100644
57698 --- a/fs/btrfs/zoned.c
57699 +++ b/fs/btrfs/zoned.c
57700 @@ -342,6 +342,13 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
57701         if (!IS_ALIGNED(nr_sectors, zone_sectors))
57702                 zone_info->nr_zones++;
57704 +       if (bdev_is_zoned(bdev) && zone_info->max_zone_append_size == 0) {
57705 +               btrfs_err(fs_info, "zoned: device %pg does not support zone append",
57706 +                         bdev);
57707 +               ret = -EINVAL;
57708 +               goto out;
57709 +       }
57711         zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
57712         if (!zone_info->seq_zones) {
57713                 ret = -ENOMEM;
57714 @@ -1119,6 +1126,11 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
57715                         goto out;
57716                 }
57718 +               if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
57719 +                       ret = -EIO;
57720 +                       goto out;
57721 +               }
57723                 switch (zone.cond) {
57724                 case BLK_ZONE_COND_OFFLINE:
57725                 case BLK_ZONE_COND_READONLY:
57726 diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
57727 index 8e9626d63976..14418b02c189 100644
57728 --- a/fs/btrfs/zstd.c
57729 +++ b/fs/btrfs/zstd.c
57730 @@ -28,10 +28,10 @@
57731  /* 307s to avoid pathologically clashing with transaction commit */
57732  #define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ)
57734 -static ZSTD_parameters zstd_get_btrfs_parameters(unsigned int level,
57735 +static zstd_parameters zstd_get_btrfs_parameters(unsigned int level,
57736                                                  size_t src_len)
57738 -       ZSTD_parameters params = ZSTD_getParams(level, src_len, 0);
57739 +       zstd_parameters params = zstd_get_params(level, src_len);
57741         if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
57742                 params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
57743 @@ -48,8 +48,8 @@ struct workspace {
57744         unsigned long last_used; /* jiffies */
57745         struct list_head list;
57746         struct list_head lru_list;
57747 -       ZSTD_inBuffer in_buf;
57748 -       ZSTD_outBuffer out_buf;
57749 +       zstd_in_buffer in_buf;
57750 +       zstd_out_buffer out_buf;
57751  };
57753  /*
57754 @@ -155,12 +155,12 @@ static void zstd_calc_ws_mem_sizes(void)
57755         unsigned int level;
57757         for (level = 1; level <= ZSTD_BTRFS_MAX_LEVEL; level++) {
57758 -               ZSTD_parameters params =
57759 +               zstd_parameters params =
57760                         zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT);
57761                 size_t level_size =
57762                         max_t(size_t,
57763 -                             ZSTD_CStreamWorkspaceBound(params.cParams),
57764 -                             ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT));
57765 +                             zstd_cstream_workspace_bound(&params.cParams),
57766 +                             zstd_dstream_workspace_bound(ZSTD_BTRFS_MAX_INPUT));
57768                 max_size = max_t(size_t, max_size, level_size);
57769                 zstd_ws_mem_sizes[level - 1] = max_size;
57770 @@ -371,7 +371,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
57771                 unsigned long *total_in, unsigned long *total_out)
57773         struct workspace *workspace = list_entry(ws, struct workspace, list);
57774 -       ZSTD_CStream *stream;
57775 +       zstd_cstream *stream;
57776         int ret = 0;
57777         int nr_pages = 0;
57778         struct page *in_page = NULL;  /* The current page to read */
57779 @@ -381,7 +381,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
57780         unsigned long len = *total_out;
57781         const unsigned long nr_dest_pages = *out_pages;
57782         unsigned long max_out = nr_dest_pages * PAGE_SIZE;
57783 -       ZSTD_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
57784 +       zstd_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
57785                                                            len);
57787         *out_pages = 0;
57788 @@ -389,10 +389,10 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
57789         *total_in = 0;
57791         /* Initialize the stream */
57792 -       stream = ZSTD_initCStream(params, len, workspace->mem,
57793 +       stream = zstd_init_cstream(&params, len, workspace->mem,
57794                         workspace->size);
57795         if (!stream) {
57796 -               pr_warn("BTRFS: ZSTD_initCStream failed\n");
57797 +               pr_warn("BTRFS: zstd_init_cstream failed\n");
57798                 ret = -EIO;
57799                 goto out;
57800         }
57801 @@ -418,11 +418,11 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
57802         while (1) {
57803                 size_t ret2;
57805 -               ret2 = ZSTD_compressStream(stream, &workspace->out_buf,
57806 +               ret2 = zstd_compress_stream(stream, &workspace->out_buf,
57807                                 &workspace->in_buf);
57808 -               if (ZSTD_isError(ret2)) {
57809 -                       pr_debug("BTRFS: ZSTD_compressStream returned %d\n",
57810 -                                       ZSTD_getErrorCode(ret2));
57811 +               if (zstd_is_error(ret2)) {
57812 +                       pr_debug("BTRFS: zstd_compress_stream returned %d\n",
57813 +                                       zstd_get_error_code(ret2));
57814                         ret = -EIO;
57815                         goto out;
57816                 }
57817 @@ -487,10 +487,10 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
57818         while (1) {
57819                 size_t ret2;
57821 -               ret2 = ZSTD_endStream(stream, &workspace->out_buf);
57822 -               if (ZSTD_isError(ret2)) {
57823 -                       pr_debug("BTRFS: ZSTD_endStream returned %d\n",
57824 -                                       ZSTD_getErrorCode(ret2));
57825 +               ret2 = zstd_end_stream(stream, &workspace->out_buf);
57826 +               if (zstd_is_error(ret2)) {
57827 +                       pr_debug("BTRFS: zstd_end_stream returned %d\n",
57828 +                                       zstd_get_error_code(ret2));
57829                         ret = -EIO;
57830                         goto out;
57831                 }
57832 @@ -550,17 +550,17 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
57833         u64 disk_start = cb->start;
57834         struct bio *orig_bio = cb->orig_bio;
57835         size_t srclen = cb->compressed_len;
57836 -       ZSTD_DStream *stream;
57837 +       zstd_dstream *stream;
57838         int ret = 0;
57839         unsigned long page_in_index = 0;
57840         unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
57841         unsigned long buf_start;
57842         unsigned long total_out = 0;
57844 -       stream = ZSTD_initDStream(
57845 +       stream = zstd_init_dstream(
57846                         ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
57847         if (!stream) {
57848 -               pr_debug("BTRFS: ZSTD_initDStream failed\n");
57849 +               pr_debug("BTRFS: zstd_init_dstream failed\n");
57850                 ret = -EIO;
57851                 goto done;
57852         }
57853 @@ -576,11 +576,11 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
57854         while (1) {
57855                 size_t ret2;
57857 -               ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
57858 +               ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
57859                                 &workspace->in_buf);
57860 -               if (ZSTD_isError(ret2)) {
57861 -                       pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
57862 -                                       ZSTD_getErrorCode(ret2));
57863 +               if (zstd_is_error(ret2)) {
57864 +                       pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
57865 +                                       zstd_get_error_code(ret2));
57866                         ret = -EIO;
57867                         goto done;
57868                 }
57869 @@ -626,17 +626,17 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in,
57870                 size_t destlen)
57872         struct workspace *workspace = list_entry(ws, struct workspace, list);
57873 -       ZSTD_DStream *stream;
57874 +       zstd_dstream *stream;
57875         int ret = 0;
57876         size_t ret2;
57877         unsigned long total_out = 0;
57878         unsigned long pg_offset = 0;
57879         char *kaddr;
57881 -       stream = ZSTD_initDStream(
57882 +       stream = zstd_init_dstream(
57883                         ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
57884         if (!stream) {
57885 -               pr_warn("BTRFS: ZSTD_initDStream failed\n");
57886 +               pr_warn("BTRFS: zstd_init_dstream failed\n");
57887                 ret = -EIO;
57888                 goto finish;
57889         }
57890 @@ -660,15 +660,15 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in,
57892                 /* Check if the frame is over and we still need more input */
57893                 if (ret2 == 0) {
57894 -                       pr_debug("BTRFS: ZSTD_decompressStream ended early\n");
57895 +                       pr_debug("BTRFS: zstd_decompress_stream ended early\n");
57896                         ret = -EIO;
57897                         goto finish;
57898                 }
57899 -               ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
57900 +               ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
57901                                 &workspace->in_buf);
57902 -               if (ZSTD_isError(ret2)) {
57903 -                       pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
57904 -                                       ZSTD_getErrorCode(ret2));
57905 +               if (zstd_is_error(ret2)) {
57906 +                       pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
57907 +                                       zstd_get_error_code(ret2));
57908                         ret = -EIO;
57909                         goto finish;
57910                 }
57911 diff --git a/fs/ceph/export.c b/fs/ceph/export.c
57912 index e088843a7734..baa6368bece5 100644
57913 --- a/fs/ceph/export.c
57914 +++ b/fs/ceph/export.c
57915 @@ -178,8 +178,10 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
57916                 return ERR_CAST(inode);
57917         /* We need LINK caps to reliably check i_nlink */
57918         err = ceph_do_getattr(inode, CEPH_CAP_LINK_SHARED, false);
57919 -       if (err)
57920 +       if (err) {
57921 +               iput(inode);
57922                 return ERR_PTR(err);
57923 +       }
57924         /* -ESTALE if inode as been unlinked and no file is open */
57925         if ((inode->i_nlink == 0) && (atomic_read(&inode->i_count) == 1)) {
57926                 iput(inode);
57927 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
57928 index 5ddd20b62484..fa896a1c8b07 100644
57929 --- a/fs/cifs/cifsfs.c
57930 +++ b/fs/cifs/cifsfs.c
57931 @@ -834,7 +834,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
57932                 goto out;
57933         }
57935 -       rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, old_ctx->UNC);
57936 +       rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, NULL);
57937         if (rc) {
57938                 root = ERR_PTR(rc);
57939                 goto out;
57940 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
57941 index 24668eb006c6..3d62d52d730b 100644
57942 --- a/fs/cifs/connect.c
57943 +++ b/fs/cifs/connect.c
57944 @@ -488,6 +488,7 @@ server_unresponsive(struct TCP_Server_Info *server)
57945          */
57946         if ((server->tcpStatus == CifsGood ||
57947             server->tcpStatus == CifsNeedNegotiate) &&
57948 +           (!server->ops->can_echo || server->ops->can_echo(server)) &&
57949             time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
57950                 cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
57951                          (3 * server->echo_interval) / HZ);
57952 @@ -3175,17 +3176,29 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_
57953  int
57954  cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname)
57956 -       int rc = 0;
57957 +       int rc;
57959 -       smb3_parse_devname(devname, ctx);
57960 +       if (devname) {
57961 +               cifs_dbg(FYI, "%s: devname=%s\n", __func__, devname);
57962 +               rc = smb3_parse_devname(devname, ctx);
57963 +               if (rc) {
57964 +                       cifs_dbg(VFS, "%s: failed to parse %s: %d\n", __func__, devname, rc);
57965 +                       return rc;
57966 +               }
57967 +       }
57969         if (mntopts) {
57970                 char *ip;
57972 -               cifs_dbg(FYI, "%s: mntopts=%s\n", __func__, mntopts);
57973                 rc = smb3_parse_opt(mntopts, "ip", &ip);
57974 -               if (!rc && !cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip,
57975 -                                                strlen(ip))) {
57976 +               if (rc) {
57977 +                       cifs_dbg(VFS, "%s: failed to parse ip options: %d\n", __func__, rc);
57978 +                       return rc;
57979 +               }
57981 +               rc = cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip, strlen(ip));
57982 +               kfree(ip);
57983 +               if (!rc) {
57984                         cifs_dbg(VFS, "%s: failed to convert ip address\n", __func__);
57985                         return -EINVAL;
57986                 }
57987 @@ -3205,7 +3218,7 @@ cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const c
57988                 return -EINVAL;
57989         }
57991 -       return rc;
57992 +       return 0;
57995  static int
57996 diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
57997 index 78889024a7ed..a7253eb2e955 100644
57998 --- a/fs/cifs/fs_context.c
57999 +++ b/fs/cifs/fs_context.c
58000 @@ -475,6 +475,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
58002         /* move "pos" up to delimiter or NULL */
58003         pos += len;
58004 +       kfree(ctx->UNC);
58005         ctx->UNC = kstrndup(devname, pos - devname, GFP_KERNEL);
58006         if (!ctx->UNC)
58007                 return -ENOMEM;
58008 @@ -485,6 +486,9 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
58009         if (*pos == '/' || *pos == '\\')
58010                 pos++;
58012 +       kfree(ctx->prepath);
58013 +       ctx->prepath = NULL;
58015         /* If pos is NULL then no prepath */
58016         if (!*pos)
58017                 return 0;
58018 @@ -995,6 +999,9 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
58019                         goto cifs_parse_mount_err;
58020                 }
58021                 ctx->max_channels = result.uint_32;
58022 +               /* If more than one channel requested ... they want multichan */
58023 +               if (result.uint_32 > 1)
58024 +                       ctx->multichannel = true;
58025                 break;
58026         case Opt_handletimeout:
58027                 ctx->handle_timeout = result.uint_32;
58028 diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
58029 index 63d517b9f2ff..a92a1fb7cb52 100644
58030 --- a/fs/cifs/sess.c
58031 +++ b/fs/cifs/sess.c
58032 @@ -97,6 +97,12 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
58033                 return 0;
58034         }
58036 +       if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
58037 +               cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname);
58038 +               ses->chan_max = 1;
58039 +               return 0;
58040 +       }
58042         /*
58043          * Make a copy of the iface list at the time and use that
58044          * instead so as to not hold the iface spinlock for opening
58045 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
58046 index f703204fb185..5df6daacc230 100644
58047 --- a/fs/cifs/smb2ops.c
58048 +++ b/fs/cifs/smb2ops.c
58049 @@ -1763,18 +1763,14 @@ smb2_ioctl_query_info(const unsigned int xid,
58050         }
58052   iqinf_exit:
58053 -       kfree(vars);
58054 -       kfree(buffer);
58055 -       SMB2_open_free(&rqst[0]);
58056 -       if (qi.flags & PASSTHRU_FSCTL)
58057 -               SMB2_ioctl_free(&rqst[1]);
58058 -       else
58059 -               SMB2_query_info_free(&rqst[1]);
58061 -       SMB2_close_free(&rqst[2]);
58062 +       cifs_small_buf_release(rqst[0].rq_iov[0].iov_base);
58063 +       cifs_small_buf_release(rqst[1].rq_iov[0].iov_base);
58064 +       cifs_small_buf_release(rqst[2].rq_iov[0].iov_base);
58065         free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
58066         free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
58067         free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
58068 +       kfree(vars);
58069 +       kfree(buffer);
58070         return rc;
58072  e_fault:
58073 @@ -2232,7 +2228,7 @@ smb3_notify(const unsigned int xid, struct file *pfile,
58075         cifs_sb = CIFS_SB(inode->i_sb);
58077 -       utf16_path = cifs_convert_path_to_utf16(path + 1, cifs_sb);
58078 +       utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
58079         if (utf16_path == NULL) {
58080                 rc = -ENOMEM;
58081                 goto notify_exit;
58082 @@ -4178,7 +4174,7 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
58083         }
58084         spin_unlock(&cifs_tcp_ses_lock);
58086 -       return 1;
58087 +       return -EAGAIN;
58089  /*
58090   * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
58091 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
58092 index 2199a9bfae8f..29272d99102c 100644
58093 --- a/fs/cifs/smb2pdu.c
58094 +++ b/fs/cifs/smb2pdu.c
58095 @@ -841,6 +841,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
58096                 req->SecurityMode = 0;
58098         req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
58099 +       if (ses->chan_max > 1)
58100 +               req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
58102         /* ClientGUID must be zero for SMB2.02 dialect */
58103         if (server->vals->protocol_id == SMB20_PROT_ID)
58104 @@ -1032,6 +1034,9 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
58106         pneg_inbuf->Capabilities =
58107                         cpu_to_le32(server->vals->req_capabilities);
58108 +       if (tcon->ses->chan_max > 1)
58109 +               pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
58111         memcpy(pneg_inbuf->Guid, server->client_guid,
58112                                         SMB2_CLIENT_GUID_SIZE);
58114 diff --git a/fs/dax.c b/fs/dax.c
58115 index b3d27fdc6775..df5485b4bddf 100644
58116 --- a/fs/dax.c
58117 +++ b/fs/dax.c
58118 @@ -144,6 +144,16 @@ struct wait_exceptional_entry_queue {
58119         struct exceptional_entry_key key;
58120  };
58122 +/**
58123 + * enum dax_wake_mode: waitqueue wakeup behaviour
58124 + * @WAKE_ALL: wake all waiters in the waitqueue
58125 + * @WAKE_NEXT: wake only the first waiter in the waitqueue
58126 + */
58127 +enum dax_wake_mode {
58128 +       WAKE_ALL,
58129 +       WAKE_NEXT,
58132  static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
58133                 void *entry, struct exceptional_entry_key *key)
58135 @@ -182,7 +192,8 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
58136   * The important information it's conveying is whether the entry at
58137   * this index used to be a PMD entry.
58138   */
58139 -static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
58140 +static void dax_wake_entry(struct xa_state *xas, void *entry,
58141 +                          enum dax_wake_mode mode)
58143         struct exceptional_entry_key key;
58144         wait_queue_head_t *wq;
58145 @@ -196,7 +207,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
58146          * must be in the waitqueue and the following check will see them.
58147          */
58148         if (waitqueue_active(wq))
58149 -               __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
58150 +               __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
58153  /*
58154 @@ -264,11 +275,11 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
58155         finish_wait(wq, &ewait.wait);
58158 -static void put_unlocked_entry(struct xa_state *xas, void *entry)
58159 +static void put_unlocked_entry(struct xa_state *xas, void *entry,
58160 +                              enum dax_wake_mode mode)
58162 -       /* If we were the only waiter woken, wake the next one */
58163         if (entry && !dax_is_conflict(entry))
58164 -               dax_wake_entry(xas, entry, false);
58165 +               dax_wake_entry(xas, entry, mode);
58168  /*
58169 @@ -286,7 +297,7 @@ static void dax_unlock_entry(struct xa_state *xas, void *entry)
58170         old = xas_store(xas, entry);
58171         xas_unlock_irq(xas);
58172         BUG_ON(!dax_is_locked(old));
58173 -       dax_wake_entry(xas, entry, false);
58174 +       dax_wake_entry(xas, entry, WAKE_NEXT);
58177  /*
58178 @@ -524,7 +535,7 @@ static void *grab_mapping_entry(struct xa_state *xas,
58180                 dax_disassociate_entry(entry, mapping, false);
58181                 xas_store(xas, NULL);   /* undo the PMD join */
58182 -               dax_wake_entry(xas, entry, true);
58183 +               dax_wake_entry(xas, entry, WAKE_ALL);
58184                 mapping->nrexceptional--;
58185                 entry = NULL;
58186                 xas_set(xas, index);
58187 @@ -622,7 +633,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
58188                         entry = get_unlocked_entry(&xas, 0);
58189                 if (entry)
58190                         page = dax_busy_page(entry);
58191 -               put_unlocked_entry(&xas, entry);
58192 +               put_unlocked_entry(&xas, entry, WAKE_NEXT);
58193                 if (page)
58194                         break;
58195                 if (++scanned % XA_CHECK_SCHED)
58196 @@ -664,7 +675,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
58197         mapping->nrexceptional--;
58198         ret = 1;
58199  out:
58200 -       put_unlocked_entry(&xas, entry);
58201 +       put_unlocked_entry(&xas, entry, WAKE_ALL);
58202         xas_unlock_irq(&xas);
58203         return ret;
58205 @@ -937,13 +948,13 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
58206         xas_lock_irq(xas);
58207         xas_store(xas, entry);
58208         xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
58209 -       dax_wake_entry(xas, entry, false);
58210 +       dax_wake_entry(xas, entry, WAKE_NEXT);
58212         trace_dax_writeback_one(mapping->host, index, count);
58213         return ret;
58215   put_unlocked:
58216 -       put_unlocked_entry(xas, entry);
58217 +       put_unlocked_entry(xas, entry, WAKE_NEXT);
58218         return ret;
58221 @@ -1684,7 +1695,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
58222         /* Did we race with someone splitting entry or so? */
58223         if (!entry || dax_is_conflict(entry) ||
58224             (order == 0 && !dax_is_pte_entry(entry))) {
58225 -               put_unlocked_entry(&xas, entry);
58226 +               put_unlocked_entry(&xas, entry, WAKE_NEXT);
58227                 xas_unlock_irq(&xas);
58228                 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
58229                                                       VM_FAULT_NOPAGE);
58230 diff --git a/fs/dcache.c b/fs/dcache.c
58231 index 7d24ff7eb206..9deb97404201 100644
58232 --- a/fs/dcache.c
58233 +++ b/fs/dcache.c
58234 @@ -71,7 +71,7 @@
58235   * If no ancestor relationship:
58236   * arbitrary, since it's serialized on rename_lock
58237   */
58238 -int sysctl_vfs_cache_pressure __read_mostly = 100;
58239 +int sysctl_vfs_cache_pressure __read_mostly = 50;
58240  EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
58242  __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
58243 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
58244 index 22e86ae4dd5a..1d252164d97b 100644
58245 --- a/fs/debugfs/inode.c
58246 +++ b/fs/debugfs/inode.c
58247 @@ -35,7 +35,7 @@
58248  static struct vfsmount *debugfs_mount;
58249  static int debugfs_mount_count;
58250  static bool debugfs_registered;
58251 -static unsigned int debugfs_allow = DEFAULT_DEBUGFS_ALLOW_BITS;
58252 +static unsigned int debugfs_allow __ro_after_init = DEFAULT_DEBUGFS_ALLOW_BITS;
58254  /*
58255   * Don't allow access attributes to be changed whilst the kernel is locked down
58256 diff --git a/fs/dlm/config.c b/fs/dlm/config.c
58257 index 49c5f9407098..88d95d96e36c 100644
58258 --- a/fs/dlm/config.c
58259 +++ b/fs/dlm/config.c
58260 @@ -125,7 +125,7 @@ static ssize_t cluster_cluster_name_store(struct config_item *item,
58261  CONFIGFS_ATTR(cluster_, cluster_name);
58263  static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
58264 -                          int *info_field, bool (*check_cb)(unsigned int x),
58265 +                          int *info_field, int (*check_cb)(unsigned int x),
58266                            const char *buf, size_t len)
58268         unsigned int x;
58269 @@ -137,8 +137,11 @@ static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
58270         if (rc)
58271                 return rc;
58273 -       if (check_cb && check_cb(x))
58274 -               return -EINVAL;
58275 +       if (check_cb) {
58276 +               rc = check_cb(x);
58277 +               if (rc)
58278 +                       return rc;
58279 +       }
58281         *cl_field = x;
58282         *info_field = x;
58283 @@ -161,17 +164,53 @@ static ssize_t cluster_##name##_show(struct config_item *item, char *buf)     \
58284  }                                                                             \
58285  CONFIGFS_ATTR(cluster_, name);
58287 -static bool dlm_check_zero(unsigned int x)
58288 +static int dlm_check_protocol_and_dlm_running(unsigned int x)
58290 +       switch (x) {
58291 +       case 0:
58292 +               /* TCP */
58293 +               break;
58294 +       case 1:
58295 +               /* SCTP */
58296 +               break;
58297 +       default:
58298 +               return -EINVAL;
58299 +       }
58301 +       if (dlm_allow_conn)
58302 +               return -EBUSY;
58304 +       return 0;
58307 +static int dlm_check_zero_and_dlm_running(unsigned int x)
58309 +       if (!x)
58310 +               return -EINVAL;
58312 +       if (dlm_allow_conn)
58313 +               return -EBUSY;
58315 +       return 0;
58318 +static int dlm_check_zero(unsigned int x)
58320 -       return !x;
58321 +       if (!x)
58322 +               return -EINVAL;
58324 +       return 0;
58327 -static bool dlm_check_buffer_size(unsigned int x)
58328 +static int dlm_check_buffer_size(unsigned int x)
58330 -       return (x < DEFAULT_BUFFER_SIZE);
58331 +       if (x < DEFAULT_BUFFER_SIZE)
58332 +               return -EINVAL;
58334 +       return 0;
58337 -CLUSTER_ATTR(tcp_port, dlm_check_zero);
58338 +CLUSTER_ATTR(tcp_port, dlm_check_zero_and_dlm_running);
58339  CLUSTER_ATTR(buffer_size, dlm_check_buffer_size);
58340  CLUSTER_ATTR(rsbtbl_size, dlm_check_zero);
58341  CLUSTER_ATTR(recover_timer, dlm_check_zero);
58342 @@ -179,7 +218,7 @@ CLUSTER_ATTR(toss_secs, dlm_check_zero);
58343  CLUSTER_ATTR(scan_secs, dlm_check_zero);
58344  CLUSTER_ATTR(log_debug, NULL);
58345  CLUSTER_ATTR(log_info, NULL);
58346 -CLUSTER_ATTR(protocol, NULL);
58347 +CLUSTER_ATTR(protocol, dlm_check_protocol_and_dlm_running);
58348  CLUSTER_ATTR(mark, NULL);
58349  CLUSTER_ATTR(timewarn_cs, dlm_check_zero);
58350  CLUSTER_ATTR(waitwarn_us, NULL);
58351 @@ -688,6 +727,7 @@ static ssize_t comm_mark_show(struct config_item *item, char *buf)
58352  static ssize_t comm_mark_store(struct config_item *item, const char *buf,
58353                                size_t len)
58355 +       struct dlm_comm *comm;
58356         unsigned int mark;
58357         int rc;
58359 @@ -695,7 +735,15 @@ static ssize_t comm_mark_store(struct config_item *item, const char *buf,
58360         if (rc)
58361                 return rc;
58363 -       config_item_to_comm(item)->mark = mark;
58364 +       if (mark == 0)
58365 +               mark = dlm_config.ci_mark;
58367 +       comm = config_item_to_comm(item);
58368 +       rc = dlm_lowcomms_nodes_set_mark(comm->nodeid, mark);
58369 +       if (rc)
58370 +               return rc;
58372 +       comm->mark = mark;
58373         return len;
58376 @@ -870,24 +918,6 @@ int dlm_comm_seq(int nodeid, uint32_t *seq)
58377         return 0;
58380 -void dlm_comm_mark(int nodeid, unsigned int *mark)
58382 -       struct dlm_comm *cm;
58384 -       cm = get_comm(nodeid);
58385 -       if (!cm) {
58386 -               *mark = dlm_config.ci_mark;
58387 -               return;
58388 -       }
58390 -       if (cm->mark)
58391 -               *mark = cm->mark;
58392 -       else
58393 -               *mark = dlm_config.ci_mark;
58395 -       put_comm(cm);
58398  int dlm_our_nodeid(void)
58400         return local_comm ? local_comm->nodeid : 0;
58401 diff --git a/fs/dlm/config.h b/fs/dlm/config.h
58402 index c210250a2581..d2cd4bd20313 100644
58403 --- a/fs/dlm/config.h
58404 +++ b/fs/dlm/config.h
58405 @@ -48,7 +48,6 @@ void dlm_config_exit(void);
58406  int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
58407                      int *count_out);
58408  int dlm_comm_seq(int nodeid, uint32_t *seq);
58409 -void dlm_comm_mark(int nodeid, unsigned int *mark);
58410  int dlm_our_nodeid(void);
58411  int dlm_our_addr(struct sockaddr_storage *addr, int num);
58413 diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
58414 index d6bbccb0ed15..d5bd990bcab8 100644
58415 --- a/fs/dlm/debug_fs.c
58416 +++ b/fs/dlm/debug_fs.c
58417 @@ -542,6 +542,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
58419                 if (bucket >= ls->ls_rsbtbl_size) {
58420                         kfree(ri);
58421 +                       ++*pos;
58422                         return NULL;
58423                 }
58424                 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
58425 diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
58426 index 561dcad08ad6..c14cf2b7faab 100644
58427 --- a/fs/dlm/lockspace.c
58428 +++ b/fs/dlm/lockspace.c
58429 @@ -404,12 +404,6 @@ static int threads_start(void)
58430         return error;
58433 -static void threads_stop(void)
58435 -       dlm_scand_stop();
58436 -       dlm_lowcomms_stop();
58439  static int new_lockspace(const char *name, const char *cluster,
58440                          uint32_t flags, int lvblen,
58441                          const struct dlm_lockspace_ops *ops, void *ops_arg,
58442 @@ -702,8 +696,11 @@ int dlm_new_lockspace(const char *name, const char *cluster,
58443                 ls_count++;
58444         if (error > 0)
58445                 error = 0;
58446 -       if (!ls_count)
58447 -               threads_stop();
58448 +       if (!ls_count) {
58449 +               dlm_scand_stop();
58450 +               dlm_lowcomms_shutdown();
58451 +               dlm_lowcomms_stop();
58452 +       }
58453   out:
58454         mutex_unlock(&ls_lock);
58455         return error;
58456 @@ -788,6 +785,11 @@ static int release_lockspace(struct dlm_ls *ls, int force)
58458         dlm_recoverd_stop(ls);
58460 +       if (ls_count == 1) {
58461 +               dlm_scand_stop();
58462 +               dlm_lowcomms_shutdown();
58463 +       }
58465         dlm_callback_stop(ls);
58467         remove_lockspace(ls);
58468 @@ -880,7 +882,7 @@ int dlm_release_lockspace(void *lockspace, int force)
58469         if (!error)
58470                 ls_count--;
58471         if (!ls_count)
58472 -               threads_stop();
58473 +               dlm_lowcomms_stop();
58474         mutex_unlock(&ls_lock);
58476         return error;
58477 diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
58478 index 372c34ff8594..45c2fdaf34c4 100644
58479 --- a/fs/dlm/lowcomms.c
58480 +++ b/fs/dlm/lowcomms.c
58481 @@ -116,6 +116,7 @@ struct writequeue_entry {
58482  struct dlm_node_addr {
58483         struct list_head list;
58484         int nodeid;
58485 +       int mark;
58486         int addr_count;
58487         int curr_addr_index;
58488         struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
58489 @@ -134,7 +135,7 @@ static DEFINE_SPINLOCK(dlm_node_addrs_spin);
58490  static struct listen_connection listen_con;
58491  static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
58492  static int dlm_local_count;
58493 -static int dlm_allow_conn;
58494 +int dlm_allow_conn;
58496  /* Work queues */
58497  static struct workqueue_struct *recv_workqueue;
58498 @@ -303,7 +304,8 @@ static int addr_compare(const struct sockaddr_storage *x,
58501  static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
58502 -                         struct sockaddr *sa_out, bool try_new_addr)
58503 +                         struct sockaddr *sa_out, bool try_new_addr,
58504 +                         unsigned int *mark)
58506         struct sockaddr_storage sas;
58507         struct dlm_node_addr *na;
58508 @@ -331,6 +333,8 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
58509         if (!na->addr_count)
58510                 return -ENOENT;
58512 +       *mark = na->mark;
58514         if (sas_out)
58515                 memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
58517 @@ -350,7 +354,8 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
58518         return 0;
58521 -static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
58522 +static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid,
58523 +                         unsigned int *mark)
58525         struct dlm_node_addr *na;
58526         int rv = -EEXIST;
58527 @@ -364,6 +369,7 @@ static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
58528                 for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
58529                         if (addr_compare(na->addr[addr_i], addr)) {
58530                                 *nodeid = na->nodeid;
58531 +                               *mark = na->mark;
58532                                 rv = 0;
58533                                 goto unlock;
58534                         }
58535 @@ -412,6 +418,7 @@ int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
58536                 new_node->nodeid = nodeid;
58537                 new_node->addr[0] = new_addr;
58538                 new_node->addr_count = 1;
58539 +               new_node->mark = dlm_config.ci_mark;
58540                 list_add(&new_node->list, &dlm_node_addrs);
58541                 spin_unlock(&dlm_node_addrs_spin);
58542                 return 0;
58543 @@ -519,6 +526,23 @@ int dlm_lowcomms_connect_node(int nodeid)
58544         return 0;
58547 +int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark)
58549 +       struct dlm_node_addr *na;
58551 +       spin_lock(&dlm_node_addrs_spin);
58552 +       na = find_node_addr(nodeid);
58553 +       if (!na) {
58554 +               spin_unlock(&dlm_node_addrs_spin);
58555 +               return -ENOENT;
58556 +       }
58558 +       na->mark = mark;
58559 +       spin_unlock(&dlm_node_addrs_spin);
58561 +       return 0;
58564  static void lowcomms_error_report(struct sock *sk)
58566         struct connection *con;
58567 @@ -685,10 +709,7 @@ static void shutdown_connection(struct connection *con)
58569         int ret;
58571 -       if (cancel_work_sync(&con->swork)) {
58572 -               log_print("canceled swork for node %d", con->nodeid);
58573 -               clear_bit(CF_WRITE_PENDING, &con->flags);
58574 -       }
58575 +       flush_work(&con->swork);
58577         mutex_lock(&con->sock_mutex);
58578         /* nothing to shutdown */
58579 @@ -867,7 +888,7 @@ static int accept_from_sock(struct listen_connection *con)
58581         /* Get the new node's NODEID */
58582         make_sockaddr(&peeraddr, 0, &len);
58583 -       if (addr_to_nodeid(&peeraddr, &nodeid)) {
58584 +       if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) {
58585                 unsigned char *b=(unsigned char *)&peeraddr;
58586                 log_print("connect from non cluster node");
58587                 print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE, 
58588 @@ -876,9 +897,6 @@ static int accept_from_sock(struct listen_connection *con)
58589                 return -1;
58590         }
58592 -       dlm_comm_mark(nodeid, &mark);
58593 -       sock_set_mark(newsock->sk, mark);
58595         log_print("got connection from %d", nodeid);
58597         /*  Check to see if we already have a connection to this node. This
58598 @@ -892,6 +910,8 @@ static int accept_from_sock(struct listen_connection *con)
58599                 goto accept_err;
58600         }
58602 +       sock_set_mark(newsock->sk, mark);
58604         mutex_lock(&newcon->sock_mutex);
58605         if (newcon->sock) {
58606                 struct connection *othercon = newcon->othercon;
58607 @@ -908,6 +928,7 @@ static int accept_from_sock(struct listen_connection *con)
58608                         result = dlm_con_init(othercon, nodeid);
58609                         if (result < 0) {
58610                                 kfree(othercon);
58611 +                               mutex_unlock(&newcon->sock_mutex);
58612                                 goto accept_err;
58613                         }
58615 @@ -1015,8 +1036,6 @@ static void sctp_connect_to_sock(struct connection *con)
58616         struct socket *sock;
58617         unsigned int mark;
58619 -       dlm_comm_mark(con->nodeid, &mark);
58621         mutex_lock(&con->sock_mutex);
58623         /* Some odd races can cause double-connects, ignore them */
58624 @@ -1029,7 +1048,7 @@ static void sctp_connect_to_sock(struct connection *con)
58625         }
58627         memset(&daddr, 0, sizeof(daddr));
58628 -       result = nodeid_to_addr(con->nodeid, &daddr, NULL, true);
58629 +       result = nodeid_to_addr(con->nodeid, &daddr, NULL, true, &mark);
58630         if (result < 0) {
58631                 log_print("no address for nodeid %d", con->nodeid);
58632                 goto out;
58633 @@ -1104,13 +1123,11 @@ static void sctp_connect_to_sock(struct connection *con)
58634  static void tcp_connect_to_sock(struct connection *con)
58636         struct sockaddr_storage saddr, src_addr;
58637 +       unsigned int mark;
58638         int addr_len;
58639         struct socket *sock = NULL;
58640 -       unsigned int mark;
58641         int result;
58643 -       dlm_comm_mark(con->nodeid, &mark);
58645         mutex_lock(&con->sock_mutex);
58646         if (con->retries++ > MAX_CONNECT_RETRIES)
58647                 goto out;
58648 @@ -1125,15 +1142,15 @@ static void tcp_connect_to_sock(struct connection *con)
58649         if (result < 0)
58650                 goto out_err;
58652 -       sock_set_mark(sock->sk, mark);
58654         memset(&saddr, 0, sizeof(saddr));
58655 -       result = nodeid_to_addr(con->nodeid, &saddr, NULL, false);
58656 +       result = nodeid_to_addr(con->nodeid, &saddr, NULL, false, &mark);
58657         if (result < 0) {
58658                 log_print("no address for nodeid %d", con->nodeid);
58659                 goto out_err;
58660         }
58662 +       sock_set_mark(sock->sk, mark);
58664         add_sock(sock, con);
58666         /* Bind to our cluster-known address connecting to avoid
58667 @@ -1355,9 +1372,11 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
58668         struct writequeue_entry *e;
58669         int offset = 0;
58671 -       if (len > LOWCOMMS_MAX_TX_BUFFER_LEN) {
58672 -               BUILD_BUG_ON(PAGE_SIZE < LOWCOMMS_MAX_TX_BUFFER_LEN);
58673 +       if (len > DEFAULT_BUFFER_SIZE ||
58674 +           len < sizeof(struct dlm_header)) {
58675 +               BUILD_BUG_ON(PAGE_SIZE < DEFAULT_BUFFER_SIZE);
58676                 log_print("failed to allocate a buffer of size %d", len);
58677 +               WARN_ON(1);
58678                 return NULL;
58679         }
58681 @@ -1589,6 +1608,29 @@ static int work_start(void)
58682         return 0;
58685 +static void shutdown_conn(struct connection *con)
58687 +       if (con->shutdown_action)
58688 +               con->shutdown_action(con);
58691 +void dlm_lowcomms_shutdown(void)
58693 +       /* Set all the flags to prevent any
58694 +        * socket activity.
58695 +        */
58696 +       dlm_allow_conn = 0;
58698 +       if (recv_workqueue)
58699 +               flush_workqueue(recv_workqueue);
58700 +       if (send_workqueue)
58701 +               flush_workqueue(send_workqueue);
58703 +       dlm_close_sock(&listen_con.sock);
58705 +       foreach_conn(shutdown_conn);
58708  static void _stop_conn(struct connection *con, bool and_other)
58710         mutex_lock(&con->sock_mutex);
58711 @@ -1610,12 +1652,6 @@ static void stop_conn(struct connection *con)
58712         _stop_conn(con, true);
58715 -static void shutdown_conn(struct connection *con)
58717 -       if (con->shutdown_action)
58718 -               con->shutdown_action(con);
58721  static void connection_release(struct rcu_head *rcu)
58723         struct connection *con = container_of(rcu, struct connection, rcu);
58724 @@ -1672,19 +1708,6 @@ static void work_flush(void)
58726  void dlm_lowcomms_stop(void)
58728 -       /* Set all the flags to prevent any
58729 -          socket activity.
58730 -       */
58731 -       dlm_allow_conn = 0;
58733 -       if (recv_workqueue)
58734 -               flush_workqueue(recv_workqueue);
58735 -       if (send_workqueue)
58736 -               flush_workqueue(send_workqueue);
58738 -       dlm_close_sock(&listen_con.sock);
58740 -       foreach_conn(shutdown_conn);
58741         work_flush();
58742         foreach_conn(free_conn);
58743         work_stop();
58744 diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
58745 index 0918f9376489..48bbc4e18761 100644
58746 --- a/fs/dlm/lowcomms.h
58747 +++ b/fs/dlm/lowcomms.h
58748 @@ -14,13 +14,18 @@
58750  #define LOWCOMMS_MAX_TX_BUFFER_LEN     4096
58752 +/* switch to check if dlm is running */
58753 +extern int dlm_allow_conn;
58755  int dlm_lowcomms_start(void);
58756 +void dlm_lowcomms_shutdown(void);
58757  void dlm_lowcomms_stop(void);
58758  void dlm_lowcomms_exit(void);
58759  int dlm_lowcomms_close(int nodeid);
58760  void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc);
58761  void dlm_lowcomms_commit_buffer(void *mh);
58762  int dlm_lowcomms_connect_node(int nodeid);
58763 +int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark);
58764  int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len);
58766  #endif                         /* __LOWCOMMS_DOT_H__ */
58767 diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
58768 index fde3a6afe4be..0bedfa8606a2 100644
58769 --- a/fs/dlm/midcomms.c
58770 +++ b/fs/dlm/midcomms.c
58771 @@ -49,9 +49,10 @@ int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len)
58772                  * cannot deliver this message to upper layers
58773                  */
58774                 msglen = get_unaligned_le16(&hd->h_length);
58775 -               if (msglen > DEFAULT_BUFFER_SIZE) {
58776 -                       log_print("received invalid length header: %u, will abort message parsing",
58777 -                                 msglen);
58778 +               if (msglen > DEFAULT_BUFFER_SIZE ||
58779 +                   msglen < sizeof(struct dlm_header)) {
58780 +                       log_print("received invalid length header: %u from node %d, will abort message parsing",
58781 +                                 msglen, nodeid);
58782                         return -EBADMSG;
58783                 }
58785 diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
58786 index cdf40a54a35d..cf772c72ab2b 100644
58787 --- a/fs/ecryptfs/main.c
58788 +++ b/fs/ecryptfs/main.c
58789 @@ -492,6 +492,12 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
58790                 goto out;
58791         }
58793 +       if (!dev_name) {
58794 +               rc = -EINVAL;
58795 +               err = "Device name cannot be null";
58796 +               goto out;
58797 +       }
58799         rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid);
58800         if (rc) {
58801                 err = "Error parsing options";
58802 diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
58803 index 9ad1615f4474..e8d04d808fa6 100644
58804 --- a/fs/erofs/erofs_fs.h
58805 +++ b/fs/erofs/erofs_fs.h
58806 @@ -75,6 +75,9 @@ static inline bool erofs_inode_is_data_compressed(unsigned int datamode)
58807  #define EROFS_I_VERSION_BIT             0
58808  #define EROFS_I_DATALAYOUT_BIT          1
58810 +#define EROFS_I_ALL    \
58811 +       ((1 << (EROFS_I_DATALAYOUT_BIT + EROFS_I_DATALAYOUT_BITS)) - 1)
58813  /* 32-byte reduced form of an ondisk inode */
58814  struct erofs_inode_compact {
58815         __le16 i_format;        /* inode format hints */
58816 diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
58817 index 119fdce1b520..7ed2d7391692 100644
58818 --- a/fs/erofs/inode.c
58819 +++ b/fs/erofs/inode.c
58820 @@ -44,6 +44,13 @@ static struct page *erofs_read_inode(struct inode *inode,
58821         dic = page_address(page) + *ofs;
58822         ifmt = le16_to_cpu(dic->i_format);
58824 +       if (ifmt & ~EROFS_I_ALL) {
58825 +               erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
58826 +                         ifmt, vi->nid);
58827 +               err = -EOPNOTSUPP;
58828 +               goto err_out;
58829 +       }
58831         vi->datalayout = erofs_inode_datalayout(ifmt);
58832         if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
58833                 erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
58834 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
58835 index 3196474cbe24..e42477fcbfa0 100644
58836 --- a/fs/eventpoll.c
58837 +++ b/fs/eventpoll.c
58838 @@ -657,6 +657,12 @@ static void ep_done_scan(struct eventpoll *ep,
58839          */
58840         list_splice(txlist, &ep->rdllist);
58841         __pm_relax(ep->ws);
58843 +       if (!list_empty(&ep->rdllist)) {
58844 +               if (waitqueue_active(&ep->wq))
58845 +                       wake_up(&ep->wq);
58846 +       }
58848         write_unlock_irq(&ep->lock);
58851 diff --git a/fs/exec.c b/fs/exec.c
58852 index 18594f11c31f..c691d4d7720c 100644
58853 --- a/fs/exec.c
58854 +++ b/fs/exec.c
58855 @@ -1008,6 +1008,7 @@ static int exec_mmap(struct mm_struct *mm)
58856         active_mm = tsk->active_mm;
58857         tsk->active_mm = mm;
58858         tsk->mm = mm;
58859 +       lru_gen_add_mm(mm);
58860         /*
58861          * This prevents preemption while active_mm is being loaded and
58862          * it and mm are being updated, which could cause problems for
58863 @@ -1018,6 +1019,7 @@ static int exec_mmap(struct mm_struct *mm)
58864         if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
58865                 local_irq_enable();
58866         activate_mm(active_mm, mm);
58867 +       lru_gen_switch_mm(active_mm, mm);
58868         if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
58869                 local_irq_enable();
58870         tsk->mm->vmacache_seqnum = 0;
58871 diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
58872 index 761c79c3a4ba..411fb0a8da10 100644
58873 --- a/fs/exfat/balloc.c
58874 +++ b/fs/exfat/balloc.c
58875 @@ -141,10 +141,6 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi)
58876         kfree(sbi->vol_amap);
58880 - * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
58881 - * the cluster heap.
58882 - */
58883  int exfat_set_bitmap(struct inode *inode, unsigned int clu)
58885         int i, b;
58886 @@ -162,10 +158,6 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu)
58887         return 0;
58891 - * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
58892 - * the cluster heap.
58893 - */
58894  void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
58896         int i, b;
58897 @@ -186,8 +178,7 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
58898                 int ret_discard;
58900                 ret_discard = sb_issue_discard(sb,
58901 -                       exfat_cluster_to_sector(sbi, clu +
58902 -                                               EXFAT_RESERVED_CLUSTERS),
58903 +                       exfat_cluster_to_sector(sbi, clu),
58904                         (1 << sbi->sect_per_clus_bits), GFP_NOFS, 0);
58906                 if (ret_discard == -EOPNOTSUPP) {
58907 diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
58908 index 7541d0b5d706..eda14f630def 100644
58909 --- a/fs/ext4/fast_commit.c
58910 +++ b/fs/ext4/fast_commit.c
58911 @@ -1088,8 +1088,10 @@ static int ext4_fc_perform_commit(journal_t *journal)
58912                 head.fc_tid = cpu_to_le32(
58913                         sbi->s_journal->j_running_transaction->t_tid);
58914                 if (!ext4_fc_add_tlv(sb, EXT4_FC_TAG_HEAD, sizeof(head),
58915 -                       (u8 *)&head, &crc))
58916 +                       (u8 *)&head, &crc)) {
58917 +                       ret = -ENOSPC;
58918                         goto out;
58919 +               }
58920         }
58922         spin_lock(&sbi->s_fc_lock);
58923 @@ -1734,7 +1736,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
58924                 }
58926                 /* Range is mapped and needs a state change */
58927 -               jbd_debug(1, "Converting from %d to %d %lld",
58928 +               jbd_debug(1, "Converting from %ld to %d %lld",
58929                                 map.m_flags & EXT4_MAP_UNWRITTEN,
58930                         ext4_ext_is_unwritten(ex), map.m_pblk);
58931                 ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
58932 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
58933 index 194f5d00fa32..7924634ab0bf 100644
58934 --- a/fs/ext4/file.c
58935 +++ b/fs/ext4/file.c
58936 @@ -371,15 +371,32 @@ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
58937  static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
58938                                  int error, unsigned int flags)
58940 -       loff_t offset = iocb->ki_pos;
58941 +       loff_t pos = iocb->ki_pos;
58942         struct inode *inode = file_inode(iocb->ki_filp);
58944         if (error)
58945                 return error;
58947 -       if (size && flags & IOMAP_DIO_UNWRITTEN)
58948 -               return ext4_convert_unwritten_extents(NULL, inode,
58949 -                                                     offset, size);
58950 +       if (size && flags & IOMAP_DIO_UNWRITTEN) {
58951 +               error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
58952 +               if (error < 0)
58953 +                       return error;
58954 +       }
58955 +       /*
58956 +        * If we are extending the file, we have to update i_size here before
58957 +        * page cache gets invalidated in iomap_dio_rw(). Otherwise racing
58958 +        * buffered reads could zero out too much from page cache pages. Update
58959 +        * of on-disk size will happen later in ext4_dio_write_iter() where
58960 +        * we have enough information to also perform orphan list handling etc.
58961 +        * Note that we perform all extending writes synchronously under
58962 +        * i_rwsem held exclusively so i_size update is safe here in that case.
58963 +        * If the write was not extending, we cannot see pos > i_size here
58964 +        * because operations reducing i_size like truncate wait for all
58965 +        * outstanding DIO before updating i_size.
58966 +        */
58967 +       pos += size;
58968 +       if (pos > i_size_read(inode))
58969 +               i_size_write(inode, pos);
58971         return 0;
58973 diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
58974 index 633ae7becd61..71d321b3b984 100644
58975 --- a/fs/ext4/ialloc.c
58976 +++ b/fs/ext4/ialloc.c
58977 @@ -1292,7 +1292,8 @@ struct inode *__ext4_new_inode(struct user_namespace *mnt_userns,
58979         ei->i_extra_isize = sbi->s_want_extra_isize;
58980         ei->i_inline_off = 0;
58981 -       if (ext4_has_feature_inline_data(sb))
58982 +       if (ext4_has_feature_inline_data(sb) &&
58983 +           (!(ei->i_flags & EXT4_DAX_FL) || S_ISDIR(mode)))
58984                 ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
58985         ret = inode;
58986         err = dquot_alloc_inode(inode);
58987 @@ -1513,6 +1514,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
58988         handle_t *handle;
58989         ext4_fsblk_t blk;
58990         int num, ret = 0, used_blks = 0;
58991 +       unsigned long used_inos = 0;
58993         /* This should not happen, but just to be sure check this */
58994         if (sb_rdonly(sb)) {
58995 @@ -1543,22 +1545,37 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
58996          * used inodes so we need to skip blocks with used inodes in
58997          * inode table.
58998          */
58999 -       if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
59000 -               used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
59001 -                           ext4_itable_unused_count(sb, gdp)),
59002 -                           sbi->s_inodes_per_block);
59004 -       if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
59005 -           ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
59006 -                              ext4_itable_unused_count(sb, gdp)) <
59007 -                             EXT4_FIRST_INO(sb)))) {
59008 -               ext4_error(sb, "Something is wrong with group %u: "
59009 -                          "used itable blocks: %d; "
59010 -                          "itable unused count: %u",
59011 -                          group, used_blks,
59012 -                          ext4_itable_unused_count(sb, gdp));
59013 -               ret = 1;
59014 -               goto err_out;
59015 +       if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
59016 +               used_inos = EXT4_INODES_PER_GROUP(sb) -
59017 +                           ext4_itable_unused_count(sb, gdp);
59018 +               used_blks = DIV_ROUND_UP(used_inos, sbi->s_inodes_per_block);
59020 +               /* Bogus inode unused count? */
59021 +               if (used_blks < 0 || used_blks > sbi->s_itb_per_group) {
59022 +                       ext4_error(sb, "Something is wrong with group %u: "
59023 +                                  "used itable blocks: %d; "
59024 +                                  "itable unused count: %u",
59025 +                                  group, used_blks,
59026 +                                  ext4_itable_unused_count(sb, gdp));
59027 +                       ret = 1;
59028 +                       goto err_out;
59029 +               }
59031 +               used_inos += group * EXT4_INODES_PER_GROUP(sb);
59032 +               /*
59033 +                * Are there some uninitialized inodes in the inode table
59034 +                * before the first normal inode?
59035 +                */
59036 +               if ((used_blks != sbi->s_itb_per_group) &&
59037 +                    (used_inos < EXT4_FIRST_INO(sb))) {
59038 +                       ext4_error(sb, "Something is wrong with group %u: "
59039 +                                  "itable unused count: %u; "
59040 +                                  "itables initialized count: %ld",
59041 +                                  group, ext4_itable_unused_count(sb, gdp),
59042 +                                  used_inos);
59043 +                       ret = 1;
59044 +                       goto err_out;
59045 +               }
59046         }
59048         blk = ext4_inode_table(sb, gdp) + used_blks;
59049 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
59050 index a2cf35066f46..0796bfa72829 100644
59051 --- a/fs/ext4/ioctl.c
59052 +++ b/fs/ext4/ioctl.c
59053 @@ -315,6 +315,12 @@ static void ext4_dax_dontcache(struct inode *inode, unsigned int flags)
59054  static bool dax_compatible(struct inode *inode, unsigned int oldflags,
59055                            unsigned int flags)
59057 +       /* Allow the DAX flag to be changed on inline directories */
59058 +       if (S_ISDIR(inode->i_mode)) {
59059 +               flags &= ~EXT4_INLINE_DATA_FL;
59060 +               oldflags &= ~EXT4_INLINE_DATA_FL;
59061 +       }
59063         if (flags & EXT4_DAX_FL) {
59064                 if ((oldflags & EXT4_DAX_MUT_EXCL) ||
59065                      ext4_test_inode_state(inode,
59066 diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
59067 index 795c3ff2907c..68fbeedd627b 100644
59068 --- a/fs/ext4/mmp.c
59069 +++ b/fs/ext4/mmp.c
59070 @@ -56,7 +56,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
59071         wait_on_buffer(bh);
59072         sb_end_write(sb);
59073         if (unlikely(!buffer_uptodate(bh)))
59074 -               return 1;
59075 +               return -EIO;
59077         return 0;
59079 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
59080 index b9693680463a..77c1cb258262 100644
59081 --- a/fs/ext4/super.c
59082 +++ b/fs/ext4/super.c
59083 @@ -667,9 +667,6 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
59084                         ext4_commit_super(sb);
59085         }
59087 -       if (sb_rdonly(sb) || continue_fs)
59088 -               return;
59090         /*
59091          * We force ERRORS_RO behavior when system is rebooting. Otherwise we
59092          * could panic during 'reboot -f' as the underlying device got already
59093 @@ -679,6 +676,10 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
59094                 panic("EXT4-fs (device %s): panic forced after error\n",
59095                         sb->s_id);
59096         }
59098 +       if (sb_rdonly(sb) || continue_fs)
59099 +               return;
59101         ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
59102         /*
59103          * Make sure updated value of ->s_mount_flags will be visible before
59104 @@ -3023,9 +3024,6 @@ static void ext4_orphan_cleanup(struct super_block *sb,
59105                 sb->s_flags &= ~SB_RDONLY;
59106         }
59107  #ifdef CONFIG_QUOTA
59108 -       /* Needed for iput() to work correctly and not trash data */
59109 -       sb->s_flags |= SB_ACTIVE;
59111         /*
59112          * Turn on quotas which were not enabled for read-only mounts if
59113          * filesystem has quota feature, so that they are updated correctly.
59114 @@ -5561,8 +5559,10 @@ static int ext4_commit_super(struct super_block *sb)
59115         struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
59116         int error = 0;
59118 -       if (!sbh || block_device_ejected(sb))
59119 -               return error;
59120 +       if (!sbh)
59121 +               return -EINVAL;
59122 +       if (block_device_ejected(sb))
59123 +               return -ENODEV;
59125         ext4_update_super(sb);
59127 diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
59128 index 77fa342de38f..582b11afb0d5 100644
59129 --- a/fs/f2fs/compress.c
59130 +++ b/fs/f2fs/compress.c
59131 @@ -123,19 +123,6 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
59132         f2fs_drop_rpages(cc, len, true);
59135 -static void f2fs_put_rpages_mapping(struct address_space *mapping,
59136 -                               pgoff_t start, int len)
59138 -       int i;
59140 -       for (i = 0; i < len; i++) {
59141 -               struct page *page = find_get_page(mapping, start + i);
59143 -               put_page(page);
59144 -               put_page(page);
59145 -       }
59148  static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
59149                 struct writeback_control *wbc, bool redirty, int unlock)
59151 @@ -164,13 +151,14 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
59152         return cc->rpages ? 0 : -ENOMEM;
59155 -void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
59156 +void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
59158         page_array_free(cc->inode, cc->rpages, cc->cluster_size);
59159         cc->rpages = NULL;
59160         cc->nr_rpages = 0;
59161         cc->nr_cpages = 0;
59162 -       cc->cluster_idx = NULL_CLUSTER;
59163 +       if (!reuse)
59164 +               cc->cluster_idx = NULL_CLUSTER;
59167  void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
59168 @@ -351,8 +339,8 @@ static const struct f2fs_compress_ops f2fs_lz4_ops = {
59170  static int zstd_init_compress_ctx(struct compress_ctx *cc)
59172 -       ZSTD_parameters params;
59173 -       ZSTD_CStream *stream;
59174 +       zstd_parameters params;
59175 +       zstd_cstream *stream;
59176         void *workspace;
59177         unsigned int workspace_size;
59178         unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
59179 @@ -361,17 +349,17 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
59180         if (!level)
59181                 level = F2FS_ZSTD_DEFAULT_CLEVEL;
59183 -       params = ZSTD_getParams(level, cc->rlen, 0);
59184 -       workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
59185 +       params = zstd_get_params(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen);
59186 +       workspace_size = zstd_cstream_workspace_bound(&params.cParams);
59188         workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
59189                                         workspace_size, GFP_NOFS);
59190         if (!workspace)
59191                 return -ENOMEM;
59193 -       stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
59194 +       stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
59195         if (!stream) {
59196 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
59197 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_cstream failed\n",
59198                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
59199                                 __func__);
59200                 kvfree(workspace);
59201 @@ -394,9 +382,9 @@ static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
59203  static int zstd_compress_pages(struct compress_ctx *cc)
59205 -       ZSTD_CStream *stream = cc->private2;
59206 -       ZSTD_inBuffer inbuf;
59207 -       ZSTD_outBuffer outbuf;
59208 +       zstd_cstream *stream = cc->private2;
59209 +       zstd_in_buffer inbuf;
59210 +       zstd_out_buffer outbuf;
59211         int src_size = cc->rlen;
59212         int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
59213         int ret;
59214 @@ -409,19 +397,19 @@ static int zstd_compress_pages(struct compress_ctx *cc)
59215         outbuf.dst = cc->cbuf->cdata;
59216         outbuf.size = dst_size;
59218 -       ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
59219 -       if (ZSTD_isError(ret)) {
59220 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
59221 +       ret = zstd_compress_stream(stream, &outbuf, &inbuf);
59222 +       if (zstd_is_error(ret)) {
59223 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_compress_stream failed, ret: %d\n",
59224                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
59225 -                               __func__, ZSTD_getErrorCode(ret));
59226 +                               __func__, zstd_get_error_code(ret));
59227                 return -EIO;
59228         }
59230 -       ret = ZSTD_endStream(stream, &outbuf);
59231 -       if (ZSTD_isError(ret)) {
59232 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
59233 +       ret = zstd_end_stream(stream, &outbuf);
59234 +       if (zstd_is_error(ret)) {
59235 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_end_stream returned %d\n",
59236                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
59237 -                               __func__, ZSTD_getErrorCode(ret));
59238 +                               __func__, zstd_get_error_code(ret));
59239                 return -EIO;
59240         }
59242 @@ -438,22 +426,22 @@ static int zstd_compress_pages(struct compress_ctx *cc)
59244  static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
59246 -       ZSTD_DStream *stream;
59247 +       zstd_dstream *stream;
59248         void *workspace;
59249         unsigned int workspace_size;
59250         unsigned int max_window_size =
59251                         MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
59253 -       workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
59254 +       workspace_size = zstd_dstream_workspace_bound(max_window_size);
59256         workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
59257                                         workspace_size, GFP_NOFS);
59258         if (!workspace)
59259                 return -ENOMEM;
59261 -       stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
59262 +       stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
59263         if (!stream) {
59264 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
59265 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_dstream failed\n",
59266                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
59267                                 __func__);
59268                 kvfree(workspace);
59269 @@ -475,9 +463,9 @@ static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
59271  static int zstd_decompress_pages(struct decompress_io_ctx *dic)
59273 -       ZSTD_DStream *stream = dic->private2;
59274 -       ZSTD_inBuffer inbuf;
59275 -       ZSTD_outBuffer outbuf;
59276 +       zstd_dstream *stream = dic->private2;
59277 +       zstd_in_buffer inbuf;
59278 +       zstd_out_buffer outbuf;
59279         int ret;
59281         inbuf.pos = 0;
59282 @@ -488,11 +476,11 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic)
59283         outbuf.dst = dic->rbuf;
59284         outbuf.size = dic->rlen;
59286 -       ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
59287 -       if (ZSTD_isError(ret)) {
59288 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
59289 +       ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
59290 +       if (zstd_is_error(ret)) {
59291 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_decompress_stream failed, ret: %d\n",
59292                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
59293 -                               __func__, ZSTD_getErrorCode(ret));
59294 +                               __func__, zstd_get_error_code(ret));
59295                 return -EIO;
59296         }
59298 @@ -1048,7 +1036,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
59299                 }
59301                 if (PageUptodate(page))
59302 -                       unlock_page(page);
59303 +                       f2fs_put_page(page, 1);
59304                 else
59305                         f2fs_compress_ctx_add_page(cc, page);
59306         }
59307 @@ -1058,33 +1046,35 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
59309                 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
59310                                         &last_block_in_bio, false, true);
59311 -               f2fs_destroy_compress_ctx(cc);
59312 +               f2fs_put_rpages(cc);
59313 +               f2fs_destroy_compress_ctx(cc, true);
59314                 if (ret)
59315 -                       goto release_pages;
59316 +                       goto out;
59317                 if (bio)
59318                         f2fs_submit_bio(sbi, bio, DATA);
59320                 ret = f2fs_init_compress_ctx(cc);
59321                 if (ret)
59322 -                       goto release_pages;
59323 +                       goto out;
59324         }
59326         for (i = 0; i < cc->cluster_size; i++) {
59327                 f2fs_bug_on(sbi, cc->rpages[i]);
59329                 page = find_lock_page(mapping, start_idx + i);
59330 -               f2fs_bug_on(sbi, !page);
59331 +               if (!page) {
59332 +                       /* page can be truncated */
59333 +                       goto release_and_retry;
59334 +               }
59336                 f2fs_wait_on_page_writeback(page, DATA, true, true);
59338                 f2fs_compress_ctx_add_page(cc, page);
59339 -               f2fs_put_page(page, 0);
59341                 if (!PageUptodate(page)) {
59342 +release_and_retry:
59343 +                       f2fs_put_rpages(cc);
59344                         f2fs_unlock_rpages(cc, i + 1);
59345 -                       f2fs_put_rpages_mapping(mapping, start_idx,
59346 -                                       cc->cluster_size);
59347 -                       f2fs_destroy_compress_ctx(cc);
59348 +                       f2fs_destroy_compress_ctx(cc, true);
59349                         goto retry;
59350                 }
59351         }
59352 @@ -1115,10 +1105,10 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
59353         }
59355  unlock_pages:
59356 +       f2fs_put_rpages(cc);
59357         f2fs_unlock_rpages(cc, i);
59358 -release_pages:
59359 -       f2fs_put_rpages_mapping(mapping, start_idx, i);
59360 -       f2fs_destroy_compress_ctx(cc);
59361 +       f2fs_destroy_compress_ctx(cc, true);
59362 +out:
59363         return ret;
59366 @@ -1153,7 +1143,7 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
59367                 set_cluster_dirty(&cc);
59369         f2fs_put_rpages_wbc(&cc, NULL, false, 1);
59370 -       f2fs_destroy_compress_ctx(&cc);
59371 +       f2fs_destroy_compress_ctx(&cc, false);
59373         return first_index;
59375 @@ -1372,7 +1362,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
59376         f2fs_put_rpages(cc);
59377         page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
59378         cc->cpages = NULL;
59379 -       f2fs_destroy_compress_ctx(cc);
59380 +       f2fs_destroy_compress_ctx(cc, false);
59381         return 0;
59383  out_destroy_crypt:
59384 @@ -1383,7 +1373,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
59385         for (i = 0; i < cc->nr_cpages; i++) {
59386                 if (!cc->cpages[i])
59387                         continue;
59388 -               f2fs_put_page(cc->cpages[i], 1);
59389 +               f2fs_compress_free_page(cc->cpages[i]);
59390 +               cc->cpages[i] = NULL;
59391         }
59392  out_put_cic:
59393         kmem_cache_free(cic_entry_slab, cic);
59394 @@ -1533,7 +1524,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
59395         err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
59396         f2fs_put_rpages_wbc(cc, wbc, false, 0);
59397  destroy_out:
59398 -       f2fs_destroy_compress_ctx(cc);
59399 +       f2fs_destroy_compress_ctx(cc, false);
59400         return err;
59403 diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
59404 index 4e5257c763d0..8804a5d51380 100644
59405 --- a/fs/f2fs/data.c
59406 +++ b/fs/f2fs/data.c
59407 @@ -2276,7 +2276,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
59408                                                         max_nr_pages,
59409                                                         &last_block_in_bio,
59410                                                         rac != NULL, false);
59411 -                               f2fs_destroy_compress_ctx(&cc);
59412 +                               f2fs_destroy_compress_ctx(&cc, false);
59413                                 if (ret)
59414                                         goto set_error_page;
59415                         }
59416 @@ -2321,7 +2321,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
59417                                                         max_nr_pages,
59418                                                         &last_block_in_bio,
59419                                                         rac != NULL, false);
59420 -                               f2fs_destroy_compress_ctx(&cc);
59421 +                               f2fs_destroy_compress_ctx(&cc, false);
59422                         }
59423                 }
59424  #endif
59425 @@ -3022,7 +3022,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
59426                 }
59427         }
59428         if (f2fs_compressed_file(inode))
59429 -               f2fs_destroy_compress_ctx(&cc);
59430 +               f2fs_destroy_compress_ctx(&cc, false);
59431  #endif
59432         if (retry) {
59433                 index = 0;
59434 diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
59435 index e2d302ae3a46..f3fabb1edfe9 100644
59436 --- a/fs/f2fs/f2fs.h
59437 +++ b/fs/f2fs/f2fs.h
59438 @@ -3376,6 +3376,7 @@ block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
59439  int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
59440  void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
59441  int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
59442 +bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
59443  void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
59444  void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
59445  void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
59446 @@ -3383,7 +3384,7 @@ void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
59447                         unsigned int *newseg, bool new_sec, int dir);
59448  void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
59449                                         unsigned int start, unsigned int end);
59450 -void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type);
59451 +void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type);
59452  void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
59453  int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
59454  bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
59455 @@ -3547,7 +3548,7 @@ void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
59456  int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
59457  void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
59458  block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
59459 -int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background,
59460 +int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force,
59461                         unsigned int segno);
59462  void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
59463  int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
59464 @@ -3949,7 +3950,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
59465  void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
59466  void f2fs_put_page_dic(struct page *page);
59467  int f2fs_init_compress_ctx(struct compress_ctx *cc);
59468 -void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
59469 +void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
59470  void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
59471  int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
59472  void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
59473 diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
59474 index d26ff2ae3f5e..dc79694e512c 100644
59475 --- a/fs/f2fs/file.c
59476 +++ b/fs/f2fs/file.c
59477 @@ -1619,9 +1619,10 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
59478         struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
59479                         .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
59480                         .m_may_create = true };
59481 -       pgoff_t pg_end;
59482 +       pgoff_t pg_start, pg_end;
59483         loff_t new_size = i_size_read(inode);
59484         loff_t off_end;
59485 +       block_t expanded = 0;
59486         int err;
59488         err = inode_newsize_ok(inode, (len + offset));
59489 @@ -1634,11 +1635,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
59491         f2fs_balance_fs(sbi, true);
59493 +       pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
59494         pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
59495         off_end = (offset + len) & (PAGE_SIZE - 1);
59497 -       map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
59498 -       map.m_len = pg_end - map.m_lblk;
59499 +       map.m_lblk = pg_start;
59500 +       map.m_len = pg_end - pg_start;
59501         if (off_end)
59502                 map.m_len++;
59504 @@ -1646,19 +1648,15 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
59505                 return 0;
59507         if (f2fs_is_pinned_file(inode)) {
59508 -               block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
59509 -                                       sbi->log_blocks_per_seg;
59510 -               block_t done = 0;
59512 -               if (map.m_len % sbi->blocks_per_seg)
59513 -                       len += sbi->blocks_per_seg;
59514 +               block_t sec_blks = BLKS_PER_SEC(sbi);
59515 +               block_t sec_len = roundup(map.m_len, sec_blks);
59517 -               map.m_len = sbi->blocks_per_seg;
59518 +               map.m_len = sec_blks;
59519  next_alloc:
59520                 if (has_not_enough_free_secs(sbi, 0,
59521                         GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
59522                         down_write(&sbi->gc_lock);
59523 -                       err = f2fs_gc(sbi, true, false, NULL_SEGNO);
59524 +                       err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
59525                         if (err && err != -ENODATA && err != -EAGAIN)
59526                                 goto out_err;
59527                 }
59528 @@ -1666,7 +1664,7 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
59529                 down_write(&sbi->pin_sem);
59531                 f2fs_lock_op(sbi);
59532 -               f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA_PINNED);
59533 +               f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED);
59534                 f2fs_unlock_op(sbi);
59536                 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
59537 @@ -1674,24 +1672,25 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
59539                 up_write(&sbi->pin_sem);
59541 -               done += map.m_len;
59542 -               len -= map.m_len;
59543 +               expanded += map.m_len;
59544 +               sec_len -= map.m_len;
59545                 map.m_lblk += map.m_len;
59546 -               if (!err && len)
59547 +               if (!err && sec_len)
59548                         goto next_alloc;
59550 -               map.m_len = done;
59551 +               map.m_len = expanded;
59552         } else {
59553                 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
59554 +               expanded = map.m_len;
59555         }
59556  out_err:
59557         if (err) {
59558                 pgoff_t last_off;
59560 -               if (!map.m_len)
59561 +               if (!expanded)
59562                         return err;
59564 -               last_off = map.m_lblk + map.m_len - 1;
59565 +               last_off = pg_start + expanded - 1;
59567                 /* update new size to the failed position */
59568                 new_size = (last_off == pg_end) ? offset + len :
59569 @@ -2489,7 +2488,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
59570                 down_write(&sbi->gc_lock);
59571         }
59573 -       ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
59574 +       ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
59575  out:
59576         mnt_drop_write_file(filp);
59577         return ret;
59578 @@ -2525,7 +2524,8 @@ static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
59579                 down_write(&sbi->gc_lock);
59580         }
59582 -       ret = f2fs_gc(sbi, range->sync, true, GET_SEGNO(sbi, range->start));
59583 +       ret = f2fs_gc(sbi, range->sync, true, false,
59584 +                               GET_SEGNO(sbi, range->start));
59585         if (ret) {
59586                 if (ret == -EBUSY)
59587                         ret = -EAGAIN;
59588 @@ -2978,7 +2978,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
59589                 sm->last_victim[GC_CB] = end_segno + 1;
59590                 sm->last_victim[GC_GREEDY] = end_segno + 1;
59591                 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
59592 -               ret = f2fs_gc(sbi, true, true, start_segno);
59593 +               ret = f2fs_gc(sbi, true, true, true, start_segno);
59594                 if (ret == -EAGAIN)
59595                         ret = 0;
59596                 else if (ret < 0)
59597 diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
59598 index 39330ad3c44e..a8567cb47621 100644
59599 --- a/fs/f2fs/gc.c
59600 +++ b/fs/f2fs/gc.c
59601 @@ -112,7 +112,7 @@ static int gc_thread_func(void *data)
59602                 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
59604                 /* if return value is not zero, no victim was selected */
59605 -               if (f2fs_gc(sbi, sync_mode, true, NULL_SEGNO))
59606 +               if (f2fs_gc(sbi, sync_mode, true, false, NULL_SEGNO))
59607                         wait_ms = gc_th->no_gc_sleep_time;
59609                 trace_f2fs_background_gc(sbi->sb, wait_ms,
59610 @@ -392,10 +392,6 @@ static void add_victim_entry(struct f2fs_sb_info *sbi,
59611                 if (p->gc_mode == GC_AT &&
59612                         get_valid_blocks(sbi, segno, true) == 0)
59613                         return;
59615 -               if (p->alloc_mode == AT_SSR &&
59616 -                       get_seg_entry(sbi, segno)->ckpt_valid_blocks == 0)
59617 -                       return;
59618         }
59620         for (i = 0; i < sbi->segs_per_sec; i++)
59621 @@ -728,11 +724,27 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
59623                 if (sec_usage_check(sbi, secno))
59624                         goto next;
59626                 /* Don't touch checkpointed data */
59627 -               if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
59628 -                                       get_ckpt_valid_blocks(sbi, segno) &&
59629 -                                       p.alloc_mode == LFS))
59630 -                       goto next;
59631 +               if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
59632 +                       if (p.alloc_mode == LFS) {
59633 +                               /*
59634 +                                * LFS is set to find source section during GC.
59635 +                                * The victim should have no checkpointed data.
59636 +                                */
59637 +                               if (get_ckpt_valid_blocks(sbi, segno, true))
59638 +                                       goto next;
59639 +                       } else {
59640 +                               /*
59641 +                                * SSR | AT_SSR are set to find target segment
59642 +                                * for writes which can be full by checkpointed
59643 +                                * and newly written blocks.
59644 +                                */
59645 +                               if (!f2fs_segment_has_free_slot(sbi, segno))
59646 +                                       goto next;
59647 +                       }
59648 +               }
59650                 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
59651                         goto next;
59653 @@ -1354,7 +1366,8 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
59654   * the victim data block is ignored.
59655   */
59656  static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
59657 -               struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
59658 +               struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
59659 +               bool force_migrate)
59661         struct super_block *sb = sbi->sb;
59662         struct f2fs_summary *entry;
59663 @@ -1383,8 +1396,8 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
59664                  * race condition along with SSR block allocation.
59665                  */
59666                 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
59667 -                               get_valid_blocks(sbi, segno, true) ==
59668 -                                                       BLKS_PER_SEC(sbi))
59669 +                       (!force_migrate && get_valid_blocks(sbi, segno, true) ==
59670 +                                                       BLKS_PER_SEC(sbi)))
59671                         return submitted;
59673                 if (check_valid_map(sbi, segno, off) == 0)
59674 @@ -1519,7 +1532,8 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
59676  static int do_garbage_collect(struct f2fs_sb_info *sbi,
59677                                 unsigned int start_segno,
59678 -                               struct gc_inode_list *gc_list, int gc_type)
59679 +                               struct gc_inode_list *gc_list, int gc_type,
59680 +                               bool force_migrate)
59682         struct page *sum_page;
59683         struct f2fs_summary_block *sum;
59684 @@ -1606,7 +1620,8 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
59685                                                                 gc_type);
59686                 else
59687                         submitted += gc_data_segment(sbi, sum->entries, gc_list,
59688 -                                                       segno, gc_type);
59689 +                                                       segno, gc_type,
59690 +                                                       force_migrate);
59692                 stat_inc_seg_count(sbi, type, gc_type);
59693                 migrated++;
59694 @@ -1634,7 +1649,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
59697  int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
59698 -                       bool background, unsigned int segno)
59699 +                       bool background, bool force, unsigned int segno)
59701         int gc_type = sync ? FG_GC : BG_GC;
59702         int sec_freed = 0, seg_freed = 0, total_freed = 0;
59703 @@ -1696,7 +1711,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
59704         if (ret)
59705                 goto stop;
59707 -       seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
59708 +       seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
59709         if (gc_type == FG_GC &&
59710                 seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
59711                 sec_freed++;
59712 @@ -1835,7 +1850,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
59713                         .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
59714                 };
59716 -               do_garbage_collect(sbi, segno, &gc_list, FG_GC);
59717 +               do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
59718                 put_gc_inode(&gc_list);
59720                 if (!gc_only && get_valid_blocks(sbi, segno, true)) {
59721 @@ -1974,7 +1989,20 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
59723         /* stop CP to protect MAIN_SEC in free_segment_range */
59724         f2fs_lock_op(sbi);
59726 +       spin_lock(&sbi->stat_lock);
59727 +       if (shrunk_blocks + valid_user_blocks(sbi) +
59728 +               sbi->current_reserved_blocks + sbi->unusable_block_count +
59729 +               F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
59730 +               err = -ENOSPC;
59731 +       spin_unlock(&sbi->stat_lock);
59733 +       if (err)
59734 +               goto out_unlock;
59736         err = free_segment_range(sbi, secs, true);
59738 +out_unlock:
59739         f2fs_unlock_op(sbi);
59740         up_write(&sbi->gc_lock);
59741         if (err)
59742 diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
59743 index 993caefcd2bb..92652ca7a7c8 100644
59744 --- a/fs/f2fs/inline.c
59745 +++ b/fs/f2fs/inline.c
59746 @@ -219,7 +219,8 @@ int f2fs_convert_inline_inode(struct inode *inode)
59748         f2fs_put_page(page, 1);
59750 -       f2fs_balance_fs(sbi, dn.node_changed);
59751 +       if (!err)
59752 +               f2fs_balance_fs(sbi, dn.node_changed);
59754         return err;
59756 diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
59757 index 4b0e2e3c2c88..45c8cf1afe66 100644
59758 --- a/fs/f2fs/node.c
59759 +++ b/fs/f2fs/node.c
59760 @@ -2785,6 +2785,9 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
59761                 struct f2fs_nat_entry raw_ne;
59762                 nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
59764 +               if (f2fs_check_nid_range(sbi, nid))
59765 +                       continue;
59767                 raw_ne = nat_in_journal(journal, i);
59769                 ne = __lookup_nat_cache(nm_i, nid);
59770 diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
59771 index c2866561263e..77456d228f2a 100644
59772 --- a/fs/f2fs/segment.c
59773 +++ b/fs/f2fs/segment.c
59774 @@ -324,23 +324,27 @@ void f2fs_drop_inmem_pages(struct inode *inode)
59775         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
59776         struct f2fs_inode_info *fi = F2FS_I(inode);
59778 -       while (!list_empty(&fi->inmem_pages)) {
59779 +       do {
59780                 mutex_lock(&fi->inmem_lock);
59781 +               if (list_empty(&fi->inmem_pages)) {
59782 +                       fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
59784 +                       spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
59785 +                       if (!list_empty(&fi->inmem_ilist))
59786 +                               list_del_init(&fi->inmem_ilist);
59787 +                       if (f2fs_is_atomic_file(inode)) {
59788 +                               clear_inode_flag(inode, FI_ATOMIC_FILE);
59789 +                               sbi->atomic_files--;
59790 +                       }
59791 +                       spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
59793 +                       mutex_unlock(&fi->inmem_lock);
59794 +                       break;
59795 +               }
59796                 __revoke_inmem_pages(inode, &fi->inmem_pages,
59797                                                 true, false, true);
59798                 mutex_unlock(&fi->inmem_lock);
59799 -       }
59801 -       fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
59803 -       spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
59804 -       if (!list_empty(&fi->inmem_ilist))
59805 -               list_del_init(&fi->inmem_ilist);
59806 -       if (f2fs_is_atomic_file(inode)) {
59807 -               clear_inode_flag(inode, FI_ATOMIC_FILE);
59808 -               sbi->atomic_files--;
59809 -       }
59810 -       spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
59811 +       } while (1);
59814  void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
59815 @@ -504,7 +508,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
59816          */
59817         if (has_not_enough_free_secs(sbi, 0, 0)) {
59818                 down_write(&sbi->gc_lock);
59819 -               f2fs_gc(sbi, false, false, NULL_SEGNO);
59820 +               f2fs_gc(sbi, false, false, false, NULL_SEGNO);
59821         }
59824 @@ -861,7 +865,7 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
59825         mutex_lock(&dirty_i->seglist_lock);
59827         valid_blocks = get_valid_blocks(sbi, segno, false);
59828 -       ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno);
59829 +       ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
59831         if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
59832                 ckpt_valid_blocks == usable_blocks)) {
59833 @@ -946,7 +950,7 @@ static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
59834         for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
59835                 if (get_valid_blocks(sbi, segno, false))
59836                         continue;
59837 -               if (get_ckpt_valid_blocks(sbi, segno))
59838 +               if (get_ckpt_valid_blocks(sbi, segno, false))
59839                         continue;
59840                 mutex_unlock(&dirty_i->seglist_lock);
59841                 return segno;
59842 @@ -2636,6 +2640,23 @@ static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
59843                 seg->next_blkoff++;
59846 +bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
59848 +       struct seg_entry *se = get_seg_entry(sbi, segno);
59849 +       int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
59850 +       unsigned long *target_map = SIT_I(sbi)->tmp_map;
59851 +       unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
59852 +       unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
59853 +       int i, pos;
59855 +       for (i = 0; i < entries; i++)
59856 +               target_map[i] = ckpt_map[i] | cur_map[i];
59858 +       pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, 0);
59860 +       return pos < sbi->blocks_per_seg;
59863  /*
59864   * This function always allocates a used segment(from dirty seglist) by SSR
59865   * manner, so it should recover the existing segment information of valid blocks
59866 @@ -2893,7 +2914,8 @@ void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
59867         up_read(&SM_I(sbi)->curseg_lock);
59870 -static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type)
59871 +static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
59872 +                                                               bool new_sec)
59874         struct curseg_info *curseg = CURSEG_I(sbi, type);
59875         unsigned int old_segno;
59876 @@ -2901,32 +2923,42 @@ static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type)
59877         if (!curseg->inited)
59878                 goto alloc;
59880 -       if (!curseg->next_blkoff &&
59881 -               !get_valid_blocks(sbi, curseg->segno, false) &&
59882 -               !get_ckpt_valid_blocks(sbi, curseg->segno))
59883 -               return;
59884 +       if (curseg->next_blkoff ||
59885 +               get_valid_blocks(sbi, curseg->segno, new_sec))
59886 +               goto alloc;
59888 +       if (!get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
59889 +               return;
59890  alloc:
59891         old_segno = curseg->segno;
59892         SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
59893         locate_dirty_segment(sbi, old_segno);
59896 -void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type)
59897 +static void __allocate_new_section(struct f2fs_sb_info *sbi, int type)
59899 +       __allocate_new_segment(sbi, type, true);
59902 +void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type)
59904 +       down_read(&SM_I(sbi)->curseg_lock);
59905         down_write(&SIT_I(sbi)->sentry_lock);
59906 -       __allocate_new_segment(sbi, type);
59907 +       __allocate_new_section(sbi, type);
59908         up_write(&SIT_I(sbi)->sentry_lock);
59909 +       up_read(&SM_I(sbi)->curseg_lock);
59912  void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
59914         int i;
59916 +       down_read(&SM_I(sbi)->curseg_lock);
59917         down_write(&SIT_I(sbi)->sentry_lock);
59918         for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
59919 -               __allocate_new_segment(sbi, i);
59920 +               __allocate_new_segment(sbi, i, false);
59921         up_write(&SIT_I(sbi)->sentry_lock);
59922 +       up_read(&SM_I(sbi)->curseg_lock);
59925  static const struct segment_allocation default_salloc_ops = {
59926 @@ -3365,12 +3397,12 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
59927                 f2fs_inode_chksum_set(sbi, page);
59928         }
59930 -       if (F2FS_IO_ALIGNED(sbi))
59931 -               fio->retry = false;
59933         if (fio) {
59934                 struct f2fs_bio_info *io;
59936 +               if (F2FS_IO_ALIGNED(sbi))
59937 +                       fio->retry = false;
59939                 INIT_LIST_HEAD(&fio->list);
59940                 fio->in_list = true;
59941                 io = sbi->write_io[fio->type] + fio->temp;
59942 diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
59943 index e9a7a637d688..afb175739de5 100644
59944 --- a/fs/f2fs/segment.h
59945 +++ b/fs/f2fs/segment.h
59946 @@ -361,8 +361,20 @@ static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
59949  static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
59950 -                               unsigned int segno)
59951 +                               unsigned int segno, bool use_section)
59953 +       if (use_section && __is_large_section(sbi)) {
59954 +               unsigned int start_segno = START_SEGNO(segno);
59955 +               unsigned int blocks = 0;
59956 +               int i;
59958 +               for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) {
59959 +                       struct seg_entry *se = get_seg_entry(sbi, start_segno);
59961 +                       blocks += se->ckpt_valid_blocks;
59962 +               }
59963 +               return blocks;
59964 +       }
59965         return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
59968 diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
59969 index 82592b19b4e0..3c8426709f34 100644
59970 --- a/fs/f2fs/super.c
59971 +++ b/fs/f2fs/super.c
59972 @@ -525,7 +525,7 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
59973         if (kstrtouint(str + 1, 10, &level))
59974                 return -EINVAL;
59976 -       if (!level || level > ZSTD_maxCLevel()) {
59977 +       if (!level || level > zstd_max_clevel()) {
59978                 f2fs_info(sbi, "invalid zstd compress level: %d", level);
59979                 return -EINVAL;
59980         }
59981 @@ -1865,7 +1865,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
59983         while (!f2fs_time_over(sbi, DISABLE_TIME)) {
59984                 down_write(&sbi->gc_lock);
59985 -               err = f2fs_gc(sbi, true, false, NULL_SEGNO);
59986 +               err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
59987                 if (err == -ENODATA) {
59988                         err = 0;
59989                         break;
59990 @@ -3929,10 +3929,18 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
59991                  * previous checkpoint was not done by clean system shutdown.
59992                  */
59993                 if (f2fs_hw_is_readonly(sbi)) {
59994 -                       if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))
59995 -                               f2fs_err(sbi, "Need to recover fsync data, but write access unavailable");
59996 -                       else
59997 -                               f2fs_info(sbi, "write access unavailable, skipping recovery");
59998 +                       if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
59999 +                               err = f2fs_recover_fsync_data(sbi, true);
60000 +                               if (err > 0) {
60001 +                                       err = -EROFS;
60002 +                                       f2fs_err(sbi, "Need to recover fsync data, but "
60003 +                                               "write access unavailable, please try "
60004 +                                               "mount w/ disable_roll_forward or norecovery");
60005 +                               }
60006 +                               if (err < 0)
60007 +                                       goto free_meta;
60008 +                       }
60009 +                       f2fs_info(sbi, "write access unavailable, skipping recovery");
60010                         goto reset_checkpoint;
60011                 }
60013 diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
60014 index 054ec852b5ea..15ba36926fad 100644
60015 --- a/fs/f2fs/verity.c
60016 +++ b/fs/f2fs/verity.c
60017 @@ -152,40 +152,73 @@ static int f2fs_end_enable_verity(struct file *filp, const void *desc,
60018                                   size_t desc_size, u64 merkle_tree_size)
60020         struct inode *inode = file_inode(filp);
60021 +       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
60022         u64 desc_pos = f2fs_verity_metadata_pos(inode) + merkle_tree_size;
60023         struct fsverity_descriptor_location dloc = {
60024                 .version = cpu_to_le32(F2FS_VERIFY_VER),
60025                 .size = cpu_to_le32(desc_size),
60026                 .pos = cpu_to_le64(desc_pos),
60027         };
60028 -       int err = 0;
60029 +       int err = 0, err2 = 0;
60031 -       if (desc != NULL) {
60032 -               /* Succeeded; write the verity descriptor. */
60033 -               err = pagecache_write(inode, desc, desc_size, desc_pos);
60034 +       /*
60035 +        * If an error already occurred (which fs/verity/ signals by passing
60036 +        * desc == NULL), then only clean-up is needed.
60037 +        */
60038 +       if (desc == NULL)
60039 +               goto cleanup;
60041 -               /* Write all pages before clearing FI_VERITY_IN_PROGRESS. */
60042 -               if (!err)
60043 -                       err = filemap_write_and_wait(inode->i_mapping);
60044 -       }
60045 +       /* Append the verity descriptor. */
60046 +       err = pagecache_write(inode, desc, desc_size, desc_pos);
60047 +       if (err)
60048 +               goto cleanup;
60050 +       /*
60051 +        * Write all pages (both data and verity metadata).  Note that this must
60052 +        * happen before clearing FI_VERITY_IN_PROGRESS; otherwise pages beyond
60053 +        * i_size won't be written properly.  For crash consistency, this also
60054 +        * must happen before the verity inode flag gets persisted.
60055 +        */
60056 +       err = filemap_write_and_wait(inode->i_mapping);
60057 +       if (err)
60058 +               goto cleanup;
60060 +       /* Set the verity xattr. */
60061 +       err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY,
60062 +                           F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc),
60063 +                           NULL, XATTR_CREATE);
60064 +       if (err)
60065 +               goto cleanup;
60067 -       /* If we failed, truncate anything we wrote past i_size. */
60068 -       if (desc == NULL || err)
60069 -               f2fs_truncate(inode);
60070 +       /* Finally, set the verity inode flag. */
60071 +       file_set_verity(inode);
60072 +       f2fs_set_inode_flags(inode);
60073 +       f2fs_mark_inode_dirty_sync(inode, true);
60075         clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
60076 +       return 0;
60078 -       if (desc != NULL && !err) {
60079 -               err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY,
60080 -                                   F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc),
60081 -                                   NULL, XATTR_CREATE);
60082 -               if (!err) {
60083 -                       file_set_verity(inode);
60084 -                       f2fs_set_inode_flags(inode);
60085 -                       f2fs_mark_inode_dirty_sync(inode, true);
60086 -               }
60087 +cleanup:
60088 +       /*
60089 +        * Verity failed to be enabled, so clean up by truncating any verity
60090 +        * metadata that was written beyond i_size (both from cache and from
60091 +        * disk) and clearing FI_VERITY_IN_PROGRESS.
60092 +        *
60093 +        * Taking i_gc_rwsem[WRITE] is needed to stop f2fs garbage collection
60094 +        * from re-instantiating cached pages we are truncating (since unlike
60095 +        * normal file accesses, garbage collection isn't limited by i_size).
60096 +        */
60097 +       down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
60098 +       truncate_inode_pages(inode->i_mapping, inode->i_size);
60099 +       err2 = f2fs_truncate(inode);
60100 +       if (err2) {
60101 +               f2fs_err(sbi, "Truncating verity metadata failed (errno=%d)",
60102 +                        err2);
60103 +               set_sbi_flag(sbi, SBI_NEED_FSCK);
60104         }
60105 -       return err;
60106 +       up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
60107 +       clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
60108 +       return err ?: err2;
60111  static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
60112 diff --git a/fs/file.c b/fs/file.c
60113 index f633348029a5..b56c4dd78a19 100644
60114 --- a/fs/file.c
60115 +++ b/fs/file.c
60116 @@ -771,6 +771,7 @@ int __close_fd_get_file(unsigned int fd, struct file **res)
60117         *res = NULL;
60118         return -ENOENT;
60120 +EXPORT_SYMBOL(close_fd_get_file);
60122  /*
60123   * variant of close_fd that gets a ref on the file for later fput.
60124 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
60125 index 45082269e698..a37528b51798 100644
60126 --- a/fs/fuse/cuse.c
60127 +++ b/fs/fuse/cuse.c
60128 @@ -627,6 +627,8 @@ static int __init cuse_init(void)
60129         cuse_channel_fops.owner         = THIS_MODULE;
60130         cuse_channel_fops.open          = cuse_channel_open;
60131         cuse_channel_fops.release       = cuse_channel_release;
60132 +       /* CUSE is not prepared for FUSE_DEV_IOC_CLONE */
60133 +       cuse_channel_fops.unlocked_ioctl        = NULL;
60135         cuse_class = class_create(THIS_MODULE, "cuse");
60136         if (IS_ERR(cuse_class))
60137 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
60138 index c0fee830a34e..f784c118f00f 100644
60139 --- a/fs/fuse/dev.c
60140 +++ b/fs/fuse/dev.c
60141 @@ -784,7 +784,8 @@ static int fuse_check_page(struct page *page)
60142                1 << PG_lru |
60143                1 << PG_active |
60144                1 << PG_reclaim |
60145 -              1 << PG_waiters))) {
60146 +              1 << PG_waiters |
60147 +              LRU_GEN_MASK | LRU_USAGE_MASK))) {
60148                 dump_page(page, "fuse: trying to steal weird page");
60149                 return 1;
60150         }
60151 @@ -2233,11 +2234,8 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
60152         int oldfd;
60153         struct fuse_dev *fud = NULL;
60155 -       if (_IOC_TYPE(cmd) != FUSE_DEV_IOC_MAGIC)
60156 -               return -ENOTTY;
60158 -       switch (_IOC_NR(cmd)) {
60159 -       case _IOC_NR(FUSE_DEV_IOC_CLONE):
60160 +       switch (cmd) {
60161 +       case FUSE_DEV_IOC_CLONE:
60162                 res = -EFAULT;
60163                 if (!get_user(oldfd, (__u32 __user *)arg)) {
60164                         struct file *old = fget(oldfd);
60165 diff --git a/fs/fuse/file.c b/fs/fuse/file.c
60166 index 8cccecb55fb8..6e6d1e599869 100644
60167 --- a/fs/fuse/file.c
60168 +++ b/fs/fuse/file.c
60169 @@ -1099,6 +1099,7 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
60170         struct fuse_file *ff = file->private_data;
60171         struct fuse_mount *fm = ff->fm;
60172         unsigned int offset, i;
60173 +       bool short_write;
60174         int err;
60176         for (i = 0; i < ap->num_pages; i++)
60177 @@ -1113,32 +1114,38 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
60178         if (!err && ia->write.out.size > count)
60179                 err = -EIO;
60181 +       short_write = ia->write.out.size < count;
60182         offset = ap->descs[0].offset;
60183         count = ia->write.out.size;
60184         for (i = 0; i < ap->num_pages; i++) {
60185                 struct page *page = ap->pages[i];
60187 -               if (!err && !offset && count >= PAGE_SIZE)
60188 -                       SetPageUptodate(page);
60190 -               if (count > PAGE_SIZE - offset)
60191 -                       count -= PAGE_SIZE - offset;
60192 -               else
60193 -                       count = 0;
60194 -               offset = 0;
60196 -               unlock_page(page);
60197 +               if (err) {
60198 +                       ClearPageUptodate(page);
60199 +               } else {
60200 +                       if (count >= PAGE_SIZE - offset)
60201 +                               count -= PAGE_SIZE - offset;
60202 +                       else {
60203 +                               if (short_write)
60204 +                                       ClearPageUptodate(page);
60205 +                               count = 0;
60206 +                       }
60207 +                       offset = 0;
60208 +               }
60209 +               if (ia->write.page_locked && (i == ap->num_pages - 1))
60210 +                       unlock_page(page);
60211                 put_page(page);
60212         }
60214         return err;
60217 -static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap,
60218 +static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
60219                                      struct address_space *mapping,
60220                                      struct iov_iter *ii, loff_t pos,
60221                                      unsigned int max_pages)
60223 +       struct fuse_args_pages *ap = &ia->ap;
60224         struct fuse_conn *fc = get_fuse_conn(mapping->host);
60225         unsigned offset = pos & (PAGE_SIZE - 1);
60226         size_t count = 0;
60227 @@ -1191,6 +1198,16 @@ static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap,
60228                 if (offset == PAGE_SIZE)
60229                         offset = 0;
60231 +               /* If we copied full page, mark it uptodate */
60232 +               if (tmp == PAGE_SIZE)
60233 +                       SetPageUptodate(page);
60235 +               if (PageUptodate(page)) {
60236 +                       unlock_page(page);
60237 +               } else {
60238 +                       ia->write.page_locked = true;
60239 +                       break;
60240 +               }
60241                 if (!fc->big_writes)
60242                         break;
60243         } while (iov_iter_count(ii) && count < fc->max_write &&
60244 @@ -1234,7 +1251,7 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
60245                         break;
60246                 }
60248 -               count = fuse_fill_write_pages(ap, mapping, ii, pos, nr_pages);
60249 +               count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
60250                 if (count <= 0) {
60251                         err = count;
60252                 } else {
60253 @@ -1759,8 +1776,17 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
60254                 container_of(args, typeof(*wpa), ia.ap.args);
60255         struct inode *inode = wpa->inode;
60256         struct fuse_inode *fi = get_fuse_inode(inode);
60257 +       struct fuse_conn *fc = get_fuse_conn(inode);
60259         mapping_set_error(inode->i_mapping, error);
60260 +       /*
60261 +        * A writeback finished and this might have updated mtime/ctime on
60262 +        * server making local mtime/ctime stale.  Hence invalidate attrs.
60263 +        * Do this only if writeback_cache is not enabled.  If writeback_cache
60264 +        * is enabled, we trust local ctime/mtime.
60265 +        */
60266 +       if (!fc->writeback_cache)
60267 +               fuse_invalidate_attr(inode);
60268         spin_lock(&fi->lock);
60269         rb_erase(&wpa->writepages_entry, &fi->writepages);
60270         while (wpa->next) {
60271 diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
60272 index 63d97a15ffde..74d888c78fa4 100644
60273 --- a/fs/fuse/fuse_i.h
60274 +++ b/fs/fuse/fuse_i.h
60275 @@ -912,6 +912,7 @@ struct fuse_io_args {
60276                 struct {
60277                         struct fuse_write_in in;
60278                         struct fuse_write_out out;
60279 +                       bool page_locked;
60280                 } write;
60281         };
60282         struct fuse_args_pages ap;
60283 diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
60284 index 4ee6f734ba83..005209b1cd50 100644
60285 --- a/fs/fuse/virtio_fs.c
60286 +++ b/fs/fuse/virtio_fs.c
60287 @@ -896,6 +896,7 @@ static int virtio_fs_probe(struct virtio_device *vdev)
60288  out_vqs:
60289         vdev->config->reset(vdev);
60290         virtio_fs_cleanup_vqs(vdev, fs);
60291 +       kfree(fs->vqs);
60293  out:
60294         vdev->priv = NULL;
60295 @@ -1436,8 +1437,7 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
60296         if (!fm)
60297                 goto out_err;
60299 -       fuse_conn_init(fc, fm, get_user_ns(current_user_ns()),
60300 -                      &virtio_fs_fiq_ops, fs);
60301 +       fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
60302         fc->release = fuse_free_conn;
60303         fc->delete_stale = true;
60304         fc->auto_submounts = true;
60305 diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
60306 index a930ddd15681..7054a542689f 100644
60307 --- a/fs/hfsplus/extents.c
60308 +++ b/fs/hfsplus/extents.c
60309 @@ -598,13 +598,15 @@ void hfsplus_file_truncate(struct inode *inode)
60310                 res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
60311                 if (res)
60312                         break;
60313 -               hfs_brec_remove(&fd);
60315 -               mutex_unlock(&fd.tree->tree_lock);
60316                 start = hip->cached_start;
60317 +               if (blk_cnt <= start)
60318 +                       hfs_brec_remove(&fd);
60319 +               mutex_unlock(&fd.tree->tree_lock);
60320                 hfsplus_free_extents(sb, hip->cached_extents,
60321                                      alloc_cnt - start, alloc_cnt - blk_cnt);
60322                 hfsplus_dump_extent(hip->cached_extents);
60323 +               mutex_lock(&fd.tree->tree_lock);
60324                 if (blk_cnt > start) {
60325                         hip->extent_state |= HFSPLUS_EXT_DIRTY;
60326                         break;
60327 @@ -612,7 +614,6 @@ void hfsplus_file_truncate(struct inode *inode)
60328                 alloc_cnt = start;
60329                 hip->cached_start = hip->cached_blocks = 0;
60330                 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
60331 -               mutex_lock(&fd.tree->tree_lock);
60332         }
60333         hfs_find_exit(&fd);
60335 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
60336 index 701c82c36138..99df69b84822 100644
60337 --- a/fs/hugetlbfs/inode.c
60338 +++ b/fs/hugetlbfs/inode.c
60339 @@ -131,6 +131,7 @@ static void huge_pagevec_release(struct pagevec *pvec)
60340  static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
60342         struct inode *inode = file_inode(file);
60343 +       struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
60344         loff_t len, vma_len;
60345         int ret;
60346         struct hstate *h = hstate_file(file);
60347 @@ -146,6 +147,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
60348         vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
60349         vma->vm_ops = &hugetlb_vm_ops;
60351 +       ret = seal_check_future_write(info->seals, vma);
60352 +       if (ret)
60353 +               return ret;
60355         /*
60356          * page based offset in vm_pgoff could be sufficiently large to
60357          * overflow a loff_t when converted to byte offset.  This can
60358 diff --git a/fs/inode.c b/fs/inode.c
60359 index a047ab306f9a..c5e1dd13fd40 100644
60360 --- a/fs/inode.c
60361 +++ b/fs/inode.c
60362 @@ -139,6 +139,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
60363         inode->i_blkbits = sb->s_blocksize_bits;
60364         inode->i_flags = 0;
60365         atomic64_set(&inode->i_sequence, 0);
60366 +       atomic64_set(&inode->i_sequence2, 0);
60367         atomic_set(&inode->i_count, 1);
60368         inode->i_op = &empty_iops;
60369         inode->i_fop = &no_open_fops;
60370 diff --git a/fs/io_uring.c b/fs/io_uring.c
60371 index dff34975d86b..144056b0cac9 100644
60372 --- a/fs/io_uring.c
60373 +++ b/fs/io_uring.c
60374 @@ -238,7 +238,7 @@ struct fixed_rsrc_data {
60375  struct io_buffer {
60376         struct list_head list;
60377         __u64 addr;
60378 -       __s32 len;
60379 +       __u32 len;
60380         __u16 bid;
60381  };
60383 @@ -614,7 +614,7 @@ struct io_splice {
60384  struct io_provide_buf {
60385         struct file                     *file;
60386         __u64                           addr;
60387 -       __s32                           len;
60388 +       __u32                           len;
60389         __u32                           bgid;
60390         __u16                           nbufs;
60391         __u16                           bid;
60392 @@ -1008,7 +1008,7 @@ static void io_uring_del_task_file(unsigned long index);
60393  static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
60394                                          struct task_struct *task,
60395                                          struct files_struct *files);
60396 -static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx);
60397 +static void io_uring_cancel_sqpoll(struct io_sq_data *sqd);
60398  static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
60399  static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
60400                         struct io_ring_ctx *ctx);
60401 @@ -3979,7 +3979,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
60402  static int io_provide_buffers_prep(struct io_kiocb *req,
60403                                    const struct io_uring_sqe *sqe)
60405 -       unsigned long size;
60406 +       unsigned long size, tmp_check;
60407         struct io_provide_buf *p = &req->pbuf;
60408         u64 tmp;
60410 @@ -3993,6 +3993,12 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
60411         p->addr = READ_ONCE(sqe->addr);
60412         p->len = READ_ONCE(sqe->len);
60414 +       if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
60415 +                               &size))
60416 +               return -EOVERFLOW;
60417 +       if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
60418 +               return -EOVERFLOW;
60420         size = (unsigned long)p->len * p->nbufs;
60421         if (!access_ok(u64_to_user_ptr(p->addr), size))
60422                 return -EFAULT;
60423 @@ -4017,7 +4023,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
60424                         break;
60426                 buf->addr = addr;
60427 -               buf->len = pbuf->len;
60428 +               buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
60429                 buf->bid = bid;
60430                 addr += pbuf->len;
60431                 bid++;
60432 @@ -6710,6 +6716,10 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
60433                 if (!list_empty(&ctx->iopoll_list))
60434                         io_do_iopoll(ctx, &nr_events, 0);
60436 +               /*
60437 +                * Don't submit if refs are dying, good for io_uring_register(),
60438 +                * but also it is relied upon by io_ring_exit_work()
60439 +                */
60440                 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
60441                     !(ctx->flags & IORING_SETUP_R_DISABLED))
60442                         ret = io_submit_sqes(ctx, to_submit);
60443 @@ -6832,15 +6842,14 @@ static int io_sq_thread(void *data)
60444                 timeout = jiffies + sqd->sq_thread_idle;
60445         }
60447 -       list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
60448 -               io_uring_cancel_sqpoll(ctx);
60449 +       io_uring_cancel_sqpoll(sqd);
60450         sqd->thread = NULL;
60451         list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
60452                 io_ring_set_wakeup_flag(ctx);
60453 -       mutex_unlock(&sqd->lock);
60455         io_run_task_work();
60456         io_run_task_work_head(&sqd->park_task_work);
60457 +       mutex_unlock(&sqd->lock);
60459         complete(&sqd->exited);
60460         do_exit(0);
60462 @@ -7200,8 +7209,6 @@ static void io_sq_thread_finish(struct io_ring_ctx *ctx)
60464                 io_put_sq_data(sqd);
60465                 ctx->sq_data = NULL;
60466 -               if (ctx->sq_creds)
60467 -                       put_cred(ctx->sq_creds);
60468         }
60471 @@ -8469,6 +8476,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
60472         mutex_unlock(&ctx->uring_lock);
60473         io_eventfd_unregister(ctx);
60474         io_destroy_buffers(ctx);
60475 +       if (ctx->sq_creds)
60476 +               put_cred(ctx->sq_creds);
60478  #if defined(CONFIG_UNIX)
60479         if (ctx->ring_sock) {
60480 @@ -8568,6 +8577,13 @@ static void io_tctx_exit_cb(struct callback_head *cb)
60481         complete(&work->completion);
60484 +static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
60486 +       struct io_kiocb *req = container_of(work, struct io_kiocb, work);
60488 +       return req->ctx == data;
60491  static void io_ring_exit_work(struct work_struct *work)
60493         struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
60494 @@ -8576,14 +8592,6 @@ static void io_ring_exit_work(struct work_struct *work)
60495         struct io_tctx_node *node;
60496         int ret;
60498 -       /* prevent SQPOLL from submitting new requests */
60499 -       if (ctx->sq_data) {
60500 -               io_sq_thread_park(ctx->sq_data);
60501 -               list_del_init(&ctx->sqd_list);
60502 -               io_sqd_update_thread_idle(ctx->sq_data);
60503 -               io_sq_thread_unpark(ctx->sq_data);
60504 -       }
60506         /*
60507          * If we're doing polled IO and end up having requests being
60508          * submitted async (out-of-line), then completions can come in while
60509 @@ -8592,6 +8600,17 @@ static void io_ring_exit_work(struct work_struct *work)
60510          */
60511         do {
60512                 io_uring_try_cancel_requests(ctx, NULL, NULL);
60513 +               if (ctx->sq_data) {
60514 +                       struct io_sq_data *sqd = ctx->sq_data;
60515 +                       struct task_struct *tsk;
60517 +                       io_sq_thread_park(sqd);
60518 +                       tsk = sqd->thread;
60519 +                       if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
60520 +                               io_wq_cancel_cb(tsk->io_uring->io_wq,
60521 +                                               io_cancel_ctx_cb, ctx, true);
60522 +                       io_sq_thread_unpark(sqd);
60523 +               }
60525                 WARN_ON_ONCE(time_after(jiffies, timeout));
60526         } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
60527 @@ -8736,13 +8755,6 @@ static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
60528         return true;
60531 -static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
60533 -       struct io_kiocb *req = container_of(work, struct io_kiocb, work);
60535 -       return req->ctx == data;
60538  static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
60540         struct io_tctx_node *node;
60541 @@ -8935,11 +8947,11 @@ static s64 tctx_inflight(struct io_uring_task *tctx)
60542  static void io_sqpoll_cancel_cb(struct callback_head *cb)
60544         struct io_tctx_exit *work = container_of(cb, struct io_tctx_exit, task_work);
60545 -       struct io_ring_ctx *ctx = work->ctx;
60546 -       struct io_sq_data *sqd = ctx->sq_data;
60547 +       struct io_sq_data *sqd = work->ctx->sq_data;
60549         if (sqd->thread)
60550 -               io_uring_cancel_sqpoll(ctx);
60551 +               io_uring_cancel_sqpoll(sqd);
60552 +       list_del_init(&work->ctx->sqd_list);
60553         complete(&work->completion);
60556 @@ -8950,7 +8962,6 @@ static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
60557         struct task_struct *task;
60559         io_sq_thread_park(sqd);
60560 -       list_del_init(&ctx->sqd_list);
60561         io_sqd_update_thread_idle(sqd);
60562         task = sqd->thread;
60563         if (task) {
60564 @@ -8958,6 +8969,8 @@ static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
60565                 init_task_work(&work.task_work, io_sqpoll_cancel_cb);
60566                 io_task_work_add_head(&sqd->park_task_work, &work.task_work);
60567                 wake_up_process(task);
60568 +       } else {
60569 +               list_del_init(&ctx->sqd_list);
60570         }
60571         io_sq_thread_unpark(sqd);
60573 @@ -8991,14 +9004,16 @@ void __io_uring_files_cancel(struct files_struct *files)
60576  /* should only be called by SQPOLL task */
60577 -static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
60578 +static void io_uring_cancel_sqpoll(struct io_sq_data *sqd)
60580 -       struct io_sq_data *sqd = ctx->sq_data;
60581         struct io_uring_task *tctx = current->io_uring;
60582 +       struct io_ring_ctx *ctx;
60583         s64 inflight;
60584         DEFINE_WAIT(wait);
60586 -       WARN_ON_ONCE(!sqd || ctx->sq_data->thread != current);
60587 +       if (!current->io_uring)
60588 +               return;
60589 +       WARN_ON_ONCE(!sqd || sqd->thread != current);
60591         atomic_inc(&tctx->in_idle);
60592         do {
60593 @@ -9006,7 +9021,8 @@ static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
60594                 inflight = tctx_inflight(tctx);
60595                 if (!inflight)
60596                         break;
60597 -               io_uring_try_cancel_requests(ctx, current, NULL);
60598 +               list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
60599 +                       io_uring_try_cancel_requests(ctx, current, NULL);
60601                 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
60602                 /*
60603 diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
60604 index 69f18fe20923..d47a0d96bf30 100644
60605 --- a/fs/jbd2/recovery.c
60606 +++ b/fs/jbd2/recovery.c
60607 @@ -245,15 +245,14 @@ static int fc_do_one_pass(journal_t *journal,
60608                 return 0;
60610         while (next_fc_block <= journal->j_fc_last) {
60611 -               jbd_debug(3, "Fast commit replay: next block %ld",
60612 +               jbd_debug(3, "Fast commit replay: next block %ld\n",
60613                           next_fc_block);
60614                 err = jread(&bh, journal, next_fc_block);
60615                 if (err) {
60616 -                       jbd_debug(3, "Fast commit replay: read error");
60617 +                       jbd_debug(3, "Fast commit replay: read error\n");
60618                         break;
60619                 }
60621 -               jbd_debug(3, "Processing fast commit blk with seq %d");
60622                 err = journal->j_fc_replay_callback(journal, bh, pass,
60623                                         next_fc_block - journal->j_fc_first,
60624                                         expected_commit_id);
60625 diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
60626 index 9396666b7314..e8fc45fd751f 100644
60627 --- a/fs/jbd2/transaction.c
60628 +++ b/fs/jbd2/transaction.c
60629 @@ -349,7 +349,12 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
60630         }
60632  alloc_transaction:
60633 -       if (!journal->j_running_transaction) {
60634 +       /*
60635 +        * This check is racy but it is just an optimization of allocating new
60636 +        * transaction early if there are high chances we'll need it. If we
60637 +        * guess wrong, we'll retry or free unused transaction.
60638 +        */
60639 +       if (!data_race(journal->j_running_transaction)) {
60640                 /*
60641                  * If __GFP_FS is not present, then we may be being called from
60642                  * inside the fs writeback layer, so we MUST NOT fail.
60643 @@ -1474,8 +1479,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
60644          * crucial to catch bugs so let's do a reliable check until the
60645          * lockless handling is fully proven.
60646          */
60647 -       if (jh->b_transaction != transaction &&
60648 -           jh->b_next_transaction != transaction) {
60649 +       if (data_race(jh->b_transaction != transaction &&
60650 +           jh->b_next_transaction != transaction)) {
60651                 spin_lock(&jh->b_state_lock);
60652                 J_ASSERT_JH(jh, jh->b_transaction == transaction ||
60653                                 jh->b_next_transaction == transaction);
60654 @@ -1483,8 +1488,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
60655         }
60656         if (jh->b_modified == 1) {
60657                 /* If it's in our transaction it must be in BJ_Metadata list. */
60658 -               if (jh->b_transaction == transaction &&
60659 -                   jh->b_jlist != BJ_Metadata) {
60660 +               if (data_race(jh->b_transaction == transaction &&
60661 +                   jh->b_jlist != BJ_Metadata)) {
60662                         spin_lock(&jh->b_state_lock);
60663                         if (jh->b_transaction == transaction &&
60664                             jh->b_jlist != BJ_Metadata)
60665 diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
60666 index f8fb89b10227..4fc8cd698d1a 100644
60667 --- a/fs/jffs2/file.c
60668 +++ b/fs/jffs2/file.c
60669 @@ -57,6 +57,7 @@ const struct file_operations jffs2_file_operations =
60670         .mmap =         generic_file_readonly_mmap,
60671         .fsync =        jffs2_fsync,
60672         .splice_read =  generic_file_splice_read,
60673 +       .splice_write = iter_file_splice_write,
60674  };
60676  /* jffs2_file_inode_operations */
60677 diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
60678 index db72a9d2d0af..b676056826be 100644
60679 --- a/fs/jffs2/scan.c
60680 +++ b/fs/jffs2/scan.c
60681 @@ -1079,7 +1079,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
60682         memcpy(&fd->name, rd->name, checkedlen);
60683         fd->name[checkedlen] = 0;
60685 -       crc = crc32(0, fd->name, rd->nsize);
60686 +       crc = crc32(0, fd->name, checkedlen);
60687         if (crc != je32_to_cpu(rd->name_crc)) {
60688                 pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
60689                           __func__, ofs, je32_to_cpu(rd->name_crc), crc);
60690 diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
60691 index f7786e00a6a7..ed9d580826f5 100644
60692 --- a/fs/nfs/callback_proc.c
60693 +++ b/fs/nfs/callback_proc.c
60694 @@ -137,12 +137,12 @@ static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
60695                 list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
60696                         if (!pnfs_layout_is_valid(lo))
60697                                 continue;
60698 -                       if (stateid != NULL &&
60699 -                           !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
60700 +                       if (!nfs4_stateid_match_other(stateid, &lo->plh_stateid))
60701                                 continue;
60702 -                       if (!nfs_sb_active(server->super))
60703 -                               continue;
60704 -                       inode = igrab(lo->plh_inode);
60705 +                       if (nfs_sb_active(server->super))
60706 +                               inode = igrab(lo->plh_inode);
60707 +                       else
60708 +                               inode = ERR_PTR(-EAGAIN);
60709                         rcu_read_unlock();
60710                         if (inode)
60711                                 return inode;
60712 @@ -176,9 +176,10 @@ static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
60713                                 continue;
60714                         if (nfsi->layout != lo)
60715                                 continue;
60716 -                       if (!nfs_sb_active(server->super))
60717 -                               continue;
60718 -                       inode = igrab(lo->plh_inode);
60719 +                       if (nfs_sb_active(server->super))
60720 +                               inode = igrab(lo->plh_inode);
60721 +                       else
60722 +                               inode = ERR_PTR(-EAGAIN);
60723                         rcu_read_unlock();
60724                         if (inode)
60725                                 return inode;
60726 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
60727 index fc4f490f2d78..0cd7c59a6601 100644
60728 --- a/fs/nfs/dir.c
60729 +++ b/fs/nfs/dir.c
60730 @@ -866,6 +866,8 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc,
60731                         break;
60732                 }
60734 +               verf_arg = verf_res;
60736                 status = nfs_readdir_page_filler(desc, entry, pages, pglen,
60737                                                  arrays, narrays);
60738         } while (!status && nfs_readdir_page_needs_filling(page));
60739 @@ -927,7 +929,12 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc)
60740                         }
60741                         return res;
60742                 }
60743 -               memcpy(nfsi->cookieverf, verf, sizeof(nfsi->cookieverf));
60744 +               /*
60745 +                * Set the cookie verifier if the page cache was empty
60746 +                */
60747 +               if (desc->page_index == 0)
60748 +                       memcpy(nfsi->cookieverf, verf,
60749 +                              sizeof(nfsi->cookieverf));
60750         }
60751         res = nfs_readdir_search_array(desc);
60752         if (res == 0) {
60753 @@ -974,10 +981,10 @@ static int readdir_search_pagecache(struct nfs_readdir_descriptor *desc)
60754  /*
60755   * Once we've found the start of the dirent within a page: fill 'er up...
60756   */
60757 -static void nfs_do_filldir(struct nfs_readdir_descriptor *desc)
60758 +static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
60759 +                          const __be32 *verf)
60761         struct file     *file = desc->file;
60762 -       struct nfs_inode *nfsi = NFS_I(file_inode(file));
60763         struct nfs_cache_array *array;
60764         unsigned int i = 0;
60766 @@ -991,7 +998,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc)
60767                         desc->eof = true;
60768                         break;
60769                 }
60770 -               memcpy(desc->verf, nfsi->cookieverf, sizeof(desc->verf));
60771 +               memcpy(desc->verf, verf, sizeof(desc->verf));
60772                 if (i < (array->size-1))
60773                         desc->dir_cookie = array->array[i+1].cookie;
60774                 else
60775 @@ -1048,7 +1055,7 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc)
60777         for (i = 0; !desc->eof && i < sz && arrays[i]; i++) {
60778                 desc->page = arrays[i];
60779 -               nfs_do_filldir(desc);
60780 +               nfs_do_filldir(desc, verf);
60781         }
60782         desc->page = NULL;
60784 @@ -1069,6 +1076,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
60786         struct dentry   *dentry = file_dentry(file);
60787         struct inode    *inode = d_inode(dentry);
60788 +       struct nfs_inode *nfsi = NFS_I(inode);
60789         struct nfs_open_dir_context *dir_ctx = file->private_data;
60790         struct nfs_readdir_descriptor *desc;
60791         int res;
60792 @@ -1122,7 +1130,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
60793                         break;
60794                 }
60795                 if (res == -ETOOSMALL && desc->plus) {
60796 -                       clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
60797 +                       clear_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
60798                         nfs_zap_caches(inode);
60799                         desc->page_index = 0;
60800                         desc->plus = false;
60801 @@ -1132,7 +1140,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
60802                 if (res < 0)
60803                         break;
60805 -               nfs_do_filldir(desc);
60806 +               nfs_do_filldir(desc, nfsi->cookieverf);
60807                 nfs_readdir_page_unlock_and_put_cached(desc);
60808         } while (!desc->eof);
60810 diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
60811 index 872112bffcab..d383de00d486 100644
60812 --- a/fs/nfs/flexfilelayout/flexfilelayout.c
60813 +++ b/fs/nfs/flexfilelayout/flexfilelayout.c
60814 @@ -106,7 +106,7 @@ static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
60815         if (unlikely(!p))
60816                 return -ENOBUFS;
60817         fh->size = be32_to_cpup(p++);
60818 -       if (fh->size > sizeof(struct nfs_fh)) {
60819 +       if (fh->size > NFS_MAXFHSIZE) {
60820                 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
60821                        fh->size);
60822                 return -EOVERFLOW;
60823 diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
60824 index 971a9251c1d9..902db1262d2b 100644
60825 --- a/fs/nfs/fs_context.c
60826 +++ b/fs/nfs/fs_context.c
60827 @@ -973,6 +973,15 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
60828                         memset(mntfh->data + mntfh->size, 0,
60829                                sizeof(mntfh->data) - mntfh->size);
60831 +               /*
60832 +                * for proto == XPRT_TRANSPORT_UDP, which is what uses
60833 +                * to_exponential, implying shift: limit the shift value
60834 +                * to BITS_PER_LONG (majortimeo is unsigned long)
60835 +                */
60836 +               if (!(data->flags & NFS_MOUNT_TCP)) /* this will be UDP */
60837 +                       if (data->retrans >= 64) /* shift value is too large */
60838 +                               goto out_invalid_data;
60840                 /*
60841                  * Translate to nfs_fs_context, which nfs_fill_super
60842                  * can deal with.
60843 @@ -1073,6 +1082,9 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
60845  out_invalid_fh:
60846         return nfs_invalf(fc, "NFS: invalid root filehandle");
60848 +out_invalid_data:
60849 +       return nfs_invalf(fc, "NFS: invalid binary mount data");
60852  #if IS_ENABLED(CONFIG_NFS_V4)
60853 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
60854 index a7fb076a5f44..7cfeee3eeef7 100644
60855 --- a/fs/nfs/inode.c
60856 +++ b/fs/nfs/inode.c
60857 @@ -1662,10 +1662,10 @@ EXPORT_SYMBOL_GPL(_nfs_display_fhandle);
60858   */
60859  static int nfs_inode_attrs_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
60861 -       const struct nfs_inode *nfsi = NFS_I(inode);
60862 +       unsigned long attr_gencount = NFS_I(inode)->attr_gencount;
60864 -       return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 ||
60865 -               ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
60866 +       return (long)(fattr->gencount - attr_gencount) > 0 ||
60867 +              (long)(attr_gencount - nfs_read_attr_generation_counter()) > 0;
60870  static int nfs_refresh_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
60871 @@ -2094,7 +2094,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
60872                         nfsi->attrtimeo_timestamp = now;
60873                 }
60874                 /* Set the barrier to be more recent than this fattr */
60875 -               if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
60876 +               if ((long)(fattr->gencount - nfsi->attr_gencount) > 0)
60877                         nfsi->attr_gencount = fattr->gencount;
60878         }
60880 diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
60881 index 094024b0aca1..3875120ef3ef 100644
60882 --- a/fs/nfs/nfs42proc.c
60883 +++ b/fs/nfs/nfs42proc.c
60884 @@ -46,11 +46,12 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
60886         struct inode *inode = file_inode(filep);
60887         struct nfs_server *server = NFS_SERVER(inode);
60888 +       u32 bitmask[3];
60889         struct nfs42_falloc_args args = {
60890                 .falloc_fh      = NFS_FH(inode),
60891                 .falloc_offset  = offset,
60892                 .falloc_length  = len,
60893 -               .falloc_bitmask = nfs4_fattr_bitmap,
60894 +               .falloc_bitmask = bitmask,
60895         };
60896         struct nfs42_falloc_res res = {
60897                 .falloc_server  = server,
60898 @@ -68,6 +69,10 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
60899                 return status;
60900         }
60902 +       memcpy(bitmask, server->cache_consistency_bitmask, sizeof(bitmask));
60903 +       if (server->attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)
60904 +               bitmask[1] |= FATTR4_WORD1_SPACE_USED;
60906         res.falloc_fattr = nfs_alloc_fattr();
60907         if (!res.falloc_fattr)
60908                 return -ENOMEM;
60909 @@ -75,7 +80,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
60910         status = nfs4_call_sync(server->client, server, msg,
60911                                 &args.seq_args, &res.seq_res, 0);
60912         if (status == 0)
60913 -               status = nfs_post_op_update_inode(inode, res.falloc_fattr);
60914 +               status = nfs_post_op_update_inode_force_wcc(inode,
60915 +                                                           res.falloc_fattr);
60917         kfree(res.falloc_fattr);
60918         return status;
60919 @@ -84,7 +90,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
60920  static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
60921                                 loff_t offset, loff_t len)
60923 -       struct nfs_server *server = NFS_SERVER(file_inode(filep));
60924 +       struct inode *inode = file_inode(filep);
60925 +       struct nfs_server *server = NFS_SERVER(inode);
60926         struct nfs4_exception exception = { };
60927         struct nfs_lock_context *lock;
60928         int err;
60929 @@ -93,9 +100,13 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
60930         if (IS_ERR(lock))
60931                 return PTR_ERR(lock);
60933 -       exception.inode = file_inode(filep);
60934 +       exception.inode = inode;
60935         exception.state = lock->open_context->state;
60937 +       err = nfs_sync_inode(inode);
60938 +       if (err)
60939 +               goto out;
60941         do {
60942                 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
60943                 if (err == -ENOTSUPP) {
60944 @@ -104,7 +115,7 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
60945                 }
60946                 err = nfs4_handle_exception(server, err, &exception);
60947         } while (exception.retry);
60949 +out:
60950         nfs_put_lock_context(lock);
60951         return err;
60953 @@ -142,16 +153,13 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
60954                 return -EOPNOTSUPP;
60956         inode_lock(inode);
60957 -       err = nfs_sync_inode(inode);
60958 -       if (err)
60959 -               goto out_unlock;
60961         err = nfs42_proc_fallocate(&msg, filep, offset, len);
60962         if (err == 0)
60963                 truncate_pagecache_range(inode, offset, (offset + len) -1);
60964         if (err == -EOPNOTSUPP)
60965                 NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
60966 -out_unlock:
60968         inode_unlock(inode);
60969         return err;
60971 @@ -261,6 +269,33 @@ static int process_copy_commit(struct file *dst, loff_t pos_dst,
60972         return status;
60975 +/**
60976 + * nfs42_copy_dest_done - perform inode cache updates after clone/copy offload
60977 + * @inode: pointer to destination inode
60978 + * @pos: destination offset
60979 + * @len: copy length
60980 + *
60981 + * Punch a hole in the inode page cache, so that the NFS client will
60982 + * know to retrieve new data.
60983 + * Update the file size if necessary, and then mark the inode as having
60984 + * invalid cached values for change attribute, ctime, mtime and space used.
60985 + */
60986 +static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len)
60988 +       loff_t newsize = pos + len;
60989 +       loff_t end = newsize - 1;
60991 +       truncate_pagecache_range(inode, pos, end);
60992 +       spin_lock(&inode->i_lock);
60993 +       if (newsize > i_size_read(inode))
60994 +               i_size_write(inode, newsize);
60995 +       nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
60996 +                                            NFS_INO_INVALID_CTIME |
60997 +                                            NFS_INO_INVALID_MTIME |
60998 +                                            NFS_INO_INVALID_BLOCKS);
60999 +       spin_unlock(&inode->i_lock);
61002  static ssize_t _nfs42_proc_copy(struct file *src,
61003                                 struct nfs_lock_context *src_lock,
61004                                 struct file *dst,
61005 @@ -354,14 +389,8 @@ static ssize_t _nfs42_proc_copy(struct file *src,
61006                         goto out;
61007         }
61009 -       truncate_pagecache_range(dst_inode, pos_dst,
61010 -                                pos_dst + res->write_res.count);
61011 -       spin_lock(&dst_inode->i_lock);
61012 -       nfs_set_cache_invalid(
61013 -               dst_inode, NFS_INO_REVAL_PAGECACHE | NFS_INO_REVAL_FORCED |
61014 -                                  NFS_INO_INVALID_SIZE | NFS_INO_INVALID_ATTR |
61015 -                                  NFS_INO_INVALID_DATA);
61016 -       spin_unlock(&dst_inode->i_lock);
61017 +       nfs42_copy_dest_done(dst_inode, pos_dst, res->write_res.count);
61019         spin_lock(&src_inode->i_lock);
61020         nfs_set_cache_invalid(src_inode, NFS_INO_REVAL_PAGECACHE |
61021                                                  NFS_INO_REVAL_FORCED |
61022 @@ -659,7 +688,10 @@ static loff_t _nfs42_proc_llseek(struct file *filep,
61023         if (status)
61024                 return status;
61026 -       return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
61027 +       if (whence == SEEK_DATA && res.sr_eof)
61028 +               return -NFS4ERR_NXIO;
61029 +       else
61030 +               return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
61033  loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
61034 @@ -1044,8 +1076,10 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
61036         status = nfs4_call_sync(server->client, server, msg,
61037                                 &args.seq_args, &res.seq_res, 0);
61038 -       if (status == 0)
61039 +       if (status == 0) {
61040 +               nfs42_copy_dest_done(dst_inode, dst_offset, count);
61041                 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);
61042 +       }
61044         kfree(res.dst_fattr);
61045         return status;
61046 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
61047 index c65c4b41e2c1..820abae88cf0 100644
61048 --- a/fs/nfs/nfs4proc.c
61049 +++ b/fs/nfs/nfs4proc.c
61050 @@ -108,9 +108,10 @@ static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
61051  static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
61052                 const struct cred *, bool);
61053  #endif
61054 -static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
61055 -               struct nfs_server *server,
61056 -               struct nfs4_label *label);
61057 +static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ],
61058 +                            const __u32 *src, struct inode *inode,
61059 +                            struct nfs_server *server,
61060 +                            struct nfs4_label *label);
61062  #ifdef CONFIG_NFS_V4_SECURITY_LABEL
61063  static inline struct nfs4_label *
61064 @@ -3591,6 +3592,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
61065         struct nfs4_closedata *calldata = data;
61066         struct nfs4_state *state = calldata->state;
61067         struct inode *inode = calldata->inode;
61068 +       struct nfs_server *server = NFS_SERVER(inode);
61069         struct pnfs_layout_hdr *lo;
61070         bool is_rdonly, is_wronly, is_rdwr;
61071         int call_close = 0;
61072 @@ -3647,8 +3649,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
61073         if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
61074                 /* Close-to-open cache consistency revalidation */
61075                 if (!nfs4_have_delegation(inode, FMODE_READ)) {
61076 -                       calldata->arg.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
61077 -                       nfs4_bitmask_adjust(calldata->arg.bitmask, inode, NFS_SERVER(inode), NULL);
61078 +                       nfs4_bitmask_set(calldata->arg.bitmask_store,
61079 +                                        server->cache_consistency_bitmask,
61080 +                                        inode, server, NULL);
61081 +                       calldata->arg.bitmask = calldata->arg.bitmask_store;
61082                 } else
61083                         calldata->arg.bitmask = NULL;
61084         }
61085 @@ -5416,19 +5420,17 @@ bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
61086         return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
61089 -static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
61090 -                               struct nfs_server *server,
61091 -                               struct nfs4_label *label)
61092 +static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ], const __u32 *src,
61093 +                            struct inode *inode, struct nfs_server *server,
61094 +                            struct nfs4_label *label)
61097         unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
61098 +       unsigned int i;
61100 -       if ((cache_validity & NFS_INO_INVALID_DATA) ||
61101 -               (cache_validity & NFS_INO_REVAL_PAGECACHE) ||
61102 -               (cache_validity & NFS_INO_REVAL_FORCED) ||
61103 -               (cache_validity & NFS_INO_INVALID_OTHER))
61104 -               nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, label), inode);
61105 +       memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ);
61107 +       if (cache_validity & (NFS_INO_INVALID_CHANGE | NFS_INO_REVAL_PAGECACHE))
61108 +               bitmask[0] |= FATTR4_WORD0_CHANGE;
61109         if (cache_validity & NFS_INO_INVALID_ATIME)
61110                 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS;
61111         if (cache_validity & NFS_INO_INVALID_OTHER)
61112 @@ -5437,16 +5439,22 @@ static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
61113                                 FATTR4_WORD1_NUMLINKS;
61114         if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL)
61115                 bitmask[2] |= FATTR4_WORD2_SECURITY_LABEL;
61116 -       if (cache_validity & NFS_INO_INVALID_CHANGE)
61117 -               bitmask[0] |= FATTR4_WORD0_CHANGE;
61118         if (cache_validity & NFS_INO_INVALID_CTIME)
61119                 bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
61120         if (cache_validity & NFS_INO_INVALID_MTIME)
61121                 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
61122 -       if (cache_validity & NFS_INO_INVALID_SIZE)
61123 -               bitmask[0] |= FATTR4_WORD0_SIZE;
61124         if (cache_validity & NFS_INO_INVALID_BLOCKS)
61125                 bitmask[1] |= FATTR4_WORD1_SPACE_USED;
61127 +       if (nfs4_have_delegation(inode, FMODE_READ) &&
61128 +           !(cache_validity & NFS_INO_REVAL_FORCED))
61129 +               bitmask[0] &= ~FATTR4_WORD0_SIZE;
61130 +       else if (cache_validity &
61131 +                (NFS_INO_INVALID_SIZE | NFS_INO_REVAL_PAGECACHE))
61132 +               bitmask[0] |= FATTR4_WORD0_SIZE;
61134 +       for (i = 0; i < NFS4_BITMASK_SZ; i++)
61135 +               bitmask[i] &= server->attr_bitmask[i];
61138  static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
61139 @@ -5459,8 +5467,10 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
61140                 hdr->args.bitmask = NULL;
61141                 hdr->res.fattr = NULL;
61142         } else {
61143 -               hdr->args.bitmask = server->cache_consistency_bitmask;
61144 -               nfs4_bitmask_adjust(hdr->args.bitmask, hdr->inode, server, NULL);
61145 +               nfs4_bitmask_set(hdr->args.bitmask_store,
61146 +                                server->cache_consistency_bitmask,
61147 +                                hdr->inode, server, NULL);
61148 +               hdr->args.bitmask = hdr->args.bitmask_store;
61149         }
61151         if (!hdr->pgio_done_cb)
61152 @@ -6502,8 +6512,10 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
61154         data->args.fhandle = &data->fh;
61155         data->args.stateid = &data->stateid;
61156 -       data->args.bitmask = server->cache_consistency_bitmask;
61157 -       nfs4_bitmask_adjust(data->args.bitmask, inode, server, NULL);
61158 +       nfs4_bitmask_set(data->args.bitmask_store,
61159 +                        server->cache_consistency_bitmask, inode, server,
61160 +                        NULL);
61161 +       data->args.bitmask = data->args.bitmask_store;
61162         nfs_copy_fh(&data->fh, NFS_FH(inode));
61163         nfs4_stateid_copy(&data->stateid, stateid);
61164         data->res.fattr = &data->fattr;
61165 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
61166 index 102b66e0bdef..f726f8b12b7e 100644
61167 --- a/fs/nfs/pnfs.c
61168 +++ b/fs/nfs/pnfs.c
61169 @@ -1344,7 +1344,7 @@ _pnfs_return_layout(struct inode *ino)
61170         }
61171         valid_layout = pnfs_layout_is_valid(lo);
61172         pnfs_clear_layoutcommit(ino, &tmp_list);
61173 -       pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
61174 +       pnfs_mark_matching_lsegs_return(lo, &tmp_list, NULL, 0);
61176         if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
61177                 struct pnfs_layout_range range = {
61178 @@ -2468,6 +2468,9 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
61180         assert_spin_locked(&lo->plh_inode->i_lock);
61182 +       if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
61183 +               tmp_list = &lo->plh_return_segs;
61185         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
61186                 if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
61187                         dprintk("%s: marking lseg %p iomode %d "
61188 @@ -2475,6 +2478,8 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
61189                                 lseg, lseg->pls_range.iomode,
61190                                 lseg->pls_range.offset,
61191                                 lseg->pls_range.length);
61192 +                       if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
61193 +                               tmp_list = &lo->plh_return_segs;
61194                         if (mark_lseg_invalid(lseg, tmp_list))
61195                                 continue;
61196                         remaining++;
61197 diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
61198 index dd9f38d072dd..e13c4c81fb89 100644
61199 --- a/fs/nfsd/nfs4proc.c
61200 +++ b/fs/nfsd/nfs4proc.c
61201 @@ -1538,8 +1538,8 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
61202                 if (!nfs4_init_copy_state(nn, copy))
61203                         goto out_err;
61204                 refcount_set(&async_copy->refcount, 1);
61205 -               memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid,
61206 -                       sizeof(copy->cp_stateid));
61207 +               memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid.stid,
61208 +                       sizeof(copy->cp_res.cb_stateid));
61209                 dup_copy_fields(copy, async_copy);
61210                 async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
61211                                 async_copy, "%s", "copy thread");
61212 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
61213 index 97447a64bad0..886e50ed07c2 100644
61214 --- a/fs/nfsd/nfs4state.c
61215 +++ b/fs/nfsd/nfs4state.c
61216 @@ -4869,6 +4869,11 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
61217         if (nf)
61218                 nfsd_file_put(nf);
61220 +       status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
61221 +                                                               access));
61222 +       if (status)
61223 +               goto out_put_access;
61225         status = nfsd4_truncate(rqstp, cur_fh, open);
61226         if (status)
61227                 goto out_put_access;
61228 @@ -6849,11 +6854,20 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
61229  static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
61231         struct nfsd_file *nf;
61232 -       __be32 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
61233 -       if (!err) {
61234 -               err = nfserrno(vfs_test_lock(nf->nf_file, lock));
61235 -               nfsd_file_put(nf);
61236 -       }
61237 +       __be32 err;
61239 +       err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
61240 +       if (err)
61241 +               return err;
61242 +       fh_lock(fhp); /* to block new leases till after test_lock: */
61243 +       err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode,
61244 +                                                       NFSD_MAY_READ));
61245 +       if (err)
61246 +               goto out;
61247 +       err = nfserrno(vfs_test_lock(nf->nf_file, lock));
61248 +out:
61249 +       fh_unlock(fhp);
61250 +       nfsd_file_put(nf);
61251         return err;
61254 diff --git a/fs/ntfs3/Kconfig b/fs/ntfs3/Kconfig
61255 new file mode 100644
61256 index 000000000000..6e4cbc48ab8e
61257 --- /dev/null
61258 +++ b/fs/ntfs3/Kconfig
61259 @@ -0,0 +1,46 @@
61260 +# SPDX-License-Identifier: GPL-2.0-only
61261 +config NTFS3_FS
61262 +       tristate "NTFS Read-Write file system support"
61263 +       select NLS
61264 +       help
61265 +         Windows OS native file system (NTFS) support up to NTFS version 3.1.
61267 +         Y or M enables the NTFS3 driver with full features enabled (read,
61268 +         write, journal replaying, sparse/compressed files support).
61269 +         File system type to use on mount is "ntfs3". Module name (M option)
61270 +         is also "ntfs3".
61272 +         Documentation: <file:Documentation/filesystems/ntfs3.rst>
61274 +config NTFS3_64BIT_CLUSTER
61275 +       bool "64 bits per NTFS clusters"
61276 +       depends on NTFS3_FS && 64BIT
61277 +       help
61278 +         Windows implementation of ntfs.sys uses 32 bits per clusters.
61279 +         If activated 64 bits per clusters you will be able to use 4k cluster
61280 +         for 16T+ volumes. Windows will not be able to mount such volumes.
61282 +         It is recommended to say N here.
61284 +config NTFS3_LZX_XPRESS
61285 +       bool "activate support of external compressions lzx/xpress"
61286 +       depends on NTFS3_FS
61287 +       help
61288 +         In Windows 10 one can use command "compact" to compress any files.
61289 +         4 possible variants of compression are: xpress4k, xpress8k, xpress16k and lzx.
61290 +         If activated you will be able to read such files correctly.
61292 +         It is recommended to say Y here.
61294 +config NTFS3_FS_POSIX_ACL
61295 +       bool "NTFS POSIX Access Control Lists"
61296 +       depends on NTFS3_FS
61297 +       select FS_POSIX_ACL
61298 +       help
61299 +         POSIX Access Control Lists (ACLs) support additional access rights
61300 +         for users and groups beyond the standard owner/group/world scheme,
61301 +         and this option selects support for ACLs specifically for ntfs
61302 +         filesystems.
61303 +         NOTE: this is linux only feature. Windows will ignore these ACLs.
61305 +         If you don't know what Access Control Lists are, say N.
61306 diff --git a/fs/ntfs3/Makefile b/fs/ntfs3/Makefile
61307 new file mode 100644
61308 index 000000000000..5adc54ebac5a
61309 --- /dev/null
61310 +++ b/fs/ntfs3/Makefile
61311 @@ -0,0 +1,38 @@
61312 +# SPDX-License-Identifier: GPL-2.0
61314 +# Makefile for the ntfs3 filesystem support.
61317 +# to check robot warnings
61318 +ccflags-y += -Wint-to-pointer-cast
61319 +condflags := \
61320 +       $(call cc-option, -Wunused-but-set-variable) \
61321 +       $(call cc-option, -Wold-style-declaration)
61322 +ccflags-y += $(condflags)
61324 +obj-$(CONFIG_NTFS3_FS) += ntfs3.o
61326 +ntfs3-y :=     attrib.o \
61327 +               attrlist.o \
61328 +               bitfunc.o \
61329 +               bitmap.o \
61330 +               dir.o \
61331 +               fsntfs.o \
61332 +               frecord.o \
61333 +               file.o \
61334 +               fslog.o \
61335 +               inode.o \
61336 +               index.o \
61337 +               lznt.o \
61338 +               namei.o \
61339 +               record.o \
61340 +               run.o \
61341 +               super.o \
61342 +               upcase.o \
61343 +               xattr.o
61345 +ntfs3-$(CONFIG_NTFS3_LZX_XPRESS) += $(addprefix lib/,\
61346 +               decompress_common.o \
61347 +               lzx_decompress.o \
61348 +               xpress_decompress.o \
61349 +               )
61350 diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
61351 new file mode 100644
61352 index 000000000000..bca85e7b6eaf
61353 --- /dev/null
61354 +++ b/fs/ntfs3/attrib.c
61355 @@ -0,0 +1,2082 @@
61356 +// SPDX-License-Identifier: GPL-2.0
61358 + *
61359 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
61360 + *
61361 + * TODO: merge attr_set_size/attr_data_get_block/attr_allocate_frame?
61362 + */
61364 +#include <linux/blkdev.h>
61365 +#include <linux/buffer_head.h>
61366 +#include <linux/fs.h>
61367 +#include <linux/hash.h>
61368 +#include <linux/nls.h>
61369 +#include <linux/ratelimit.h>
61370 +#include <linux/slab.h>
61372 +#include "debug.h"
61373 +#include "ntfs.h"
61374 +#include "ntfs_fs.h"
61377 + * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
61378 + * preallocate algorithm
61379 + */
61380 +#ifndef NTFS_MIN_LOG2_OF_CLUMP
61381 +#define NTFS_MIN_LOG2_OF_CLUMP 16
61382 +#endif
61384 +#ifndef NTFS_MAX_LOG2_OF_CLUMP
61385 +#define NTFS_MAX_LOG2_OF_CLUMP 26
61386 +#endif
61388 +// 16M
61389 +#define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
61390 +// 16G
61391 +#define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
61394 + * get_pre_allocated
61395 + *
61396 + */
61397 +static inline u64 get_pre_allocated(u64 size)
61399 +       u32 clump;
61400 +       u8 align_shift;
61401 +       u64 ret;
61403 +       if (size <= NTFS_CLUMP_MIN) {
61404 +               clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
61405 +               align_shift = NTFS_MIN_LOG2_OF_CLUMP;
61406 +       } else if (size >= NTFS_CLUMP_MAX) {
61407 +               clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
61408 +               align_shift = NTFS_MAX_LOG2_OF_CLUMP;
61409 +       } else {
61410 +               align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
61411 +                             __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
61412 +               clump = 1u << align_shift;
61413 +       }
61415 +       ret = (((size + clump - 1) >> align_shift)) << align_shift;
61417 +       return ret;
61421 + * attr_must_be_resident
61422 + *
61423 + * returns true if attribute must be resident
61424 + */
61425 +static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
61426 +                                        enum ATTR_TYPE type)
61428 +       const struct ATTR_DEF_ENTRY *de;
61430 +       switch (type) {
61431 +       case ATTR_STD:
61432 +       case ATTR_NAME:
61433 +       case ATTR_ID:
61434 +       case ATTR_LABEL:
61435 +       case ATTR_VOL_INFO:
61436 +       case ATTR_ROOT:
61437 +       case ATTR_EA_INFO:
61438 +               return true;
61439 +       default:
61440 +               de = ntfs_query_def(sbi, type);
61441 +               if (de && (de->flags & NTFS_ATTR_MUST_BE_RESIDENT))
61442 +                       return true;
61443 +               return false;
61444 +       }
61448 + * attr_load_runs
61449 + *
61450 + * load all runs stored in 'attr'
61451 + */
61452 +int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
61453 +                  struct runs_tree *run, const CLST *vcn)
61455 +       int err;
61456 +       CLST svcn = le64_to_cpu(attr->nres.svcn);
61457 +       CLST evcn = le64_to_cpu(attr->nres.evcn);
61458 +       u32 asize;
61459 +       u16 run_off;
61461 +       if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
61462 +               return 0;
61464 +       if (vcn && (evcn < *vcn || *vcn < svcn))
61465 +               return -EINVAL;
61467 +       asize = le32_to_cpu(attr->size);
61468 +       run_off = le16_to_cpu(attr->nres.run_off);
61469 +       err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
61470 +                           vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
61471 +                           asize - run_off);
61472 +       if (err < 0)
61473 +               return err;
61475 +       return 0;
61479 + * int run_deallocate_ex
61480 + *
61481 + * Deallocate clusters
61482 + */
61483 +static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
61484 +                            CLST vcn, CLST len, CLST *done, bool trim)
61486 +       int err = 0;
61487 +       CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
61488 +       size_t idx;
61490 +       if (!len)
61491 +               goto out;
61493 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
61494 +failed:
61495 +               run_truncate(run, vcn0);
61496 +               err = -EINVAL;
61497 +               goto out;
61498 +       }
61500 +       for (;;) {
61501 +               if (clen > len)
61502 +                       clen = len;
61504 +               if (!clen) {
61505 +                       err = -EINVAL;
61506 +                       goto out;
61507 +               }
61509 +               if (lcn != SPARSE_LCN) {
61510 +                       mark_as_free_ex(sbi, lcn, clen, trim);
61511 +                       dn += clen;
61512 +               }
61514 +               len -= clen;
61515 +               if (!len)
61516 +                       break;
61518 +               vcn_next = vcn + clen;
61519 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
61520 +                   vcn != vcn_next) {
61521 +                       // save memory - don't load entire run
61522 +                       goto failed;
61523 +               }
61524 +       }
61526 +out:
61527 +       if (done)
61528 +               *done += dn;
61530 +       return err;
61534 + * attr_allocate_clusters
61535 + *
61536 + * find free space, mark it as used and store in 'run'
61537 + */
61538 +int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
61539 +                          CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
61540 +                          enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
61541 +                          CLST *new_lcn)
61543 +       int err;
61544 +       CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
61545 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
61546 +       size_t cnt = run->count;
61548 +       for (;;) {
61549 +               err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
61550 +                                              opt);
61552 +               if (err == -ENOSPC && pre) {
61553 +                       pre = 0;
61554 +                       if (*pre_alloc)
61555 +                               *pre_alloc = 0;
61556 +                       continue;
61557 +               }
61559 +               if (err)
61560 +                       goto out;
61562 +               if (new_lcn && vcn == vcn0)
61563 +                       *new_lcn = lcn;
61565 +               /* Add new fragment into run storage */
61566 +               if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
61567 +                       down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
61568 +                       wnd_set_free(wnd, lcn, flen);
61569 +                       up_write(&wnd->rw_lock);
61570 +                       err = -ENOMEM;
61571 +                       goto out;
61572 +               }
61574 +               vcn += flen;
61576 +               if (flen >= len || opt == ALLOCATE_MFT ||
61577 +                   (fr && run->count - cnt >= fr)) {
61578 +                       *alen = vcn - vcn0;
61579 +                       return 0;
61580 +               }
61582 +               len -= flen;
61583 +       }
61585 +out:
61586 +       /* undo */
61587 +       run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
61588 +       run_truncate(run, vcn0);
61590 +       return err;
61594 + * if page is not NULL - it is already contains resident data
61595 + * and locked (called from ni_write_frame)
61596 + */
61597 +int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
61598 +                         struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
61599 +                         u64 new_size, struct runs_tree *run,
61600 +                         struct ATTRIB **ins_attr, struct page *page)
61602 +       struct ntfs_sb_info *sbi;
61603 +       struct ATTRIB *attr_s;
61604 +       struct MFT_REC *rec;
61605 +       u32 used, asize, rsize, aoff, align;
61606 +       bool is_data;
61607 +       CLST len, alen;
61608 +       char *next;
61609 +       int err;
61611 +       if (attr->non_res) {
61612 +               *ins_attr = attr;
61613 +               return 0;
61614 +       }
61616 +       sbi = mi->sbi;
61617 +       rec = mi->mrec;
61618 +       attr_s = NULL;
61619 +       used = le32_to_cpu(rec->used);
61620 +       asize = le32_to_cpu(attr->size);
61621 +       next = Add2Ptr(attr, asize);
61622 +       aoff = PtrOffset(rec, attr);
61623 +       rsize = le32_to_cpu(attr->res.data_size);
61624 +       is_data = attr->type == ATTR_DATA && !attr->name_len;
61626 +       align = sbi->cluster_size;
61627 +       if (is_attr_compressed(attr))
61628 +               align <<= COMPRESSION_UNIT;
61629 +       len = (rsize + align - 1) >> sbi->cluster_bits;
61631 +       run_init(run);
61633 +       /* make a copy of original attribute */
61634 +       attr_s = ntfs_memdup(attr, asize);
61635 +       if (!attr_s) {
61636 +               err = -ENOMEM;
61637 +               goto out;
61638 +       }
61640 +       if (!len) {
61641 +               /* empty resident -> empty nonresident */
61642 +               alen = 0;
61643 +       } else {
61644 +               const char *data = resident_data(attr);
61646 +               err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
61647 +                                            ALLOCATE_DEF, &alen, 0, NULL);
61648 +               if (err)
61649 +                       goto out1;
61651 +               if (!rsize) {
61652 +                       /* empty resident -> non empty nonresident */
61653 +               } else if (!is_data) {
61654 +                       err = ntfs_sb_write_run(sbi, run, 0, data, rsize);
61655 +                       if (err)
61656 +                               goto out2;
61657 +               } else if (!page) {
61658 +                       char *kaddr;
61660 +                       page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
61661 +                       if (!page) {
61662 +                               err = -ENOMEM;
61663 +                               goto out2;
61664 +                       }
61665 +                       kaddr = kmap_atomic(page);
61666 +                       memcpy(kaddr, data, rsize);
61667 +                       memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
61668 +                       kunmap_atomic(kaddr);
61669 +                       flush_dcache_page(page);
61670 +                       SetPageUptodate(page);
61671 +                       set_page_dirty(page);
61672 +                       unlock_page(page);
61673 +                       put_page(page);
61674 +               }
61675 +       }
61677 +       /* remove original attribute */
61678 +       used -= asize;
61679 +       memmove(attr, Add2Ptr(attr, asize), used - aoff);
61680 +       rec->used = cpu_to_le32(used);
61681 +       mi->dirty = true;
61682 +       if (le)
61683 +               al_remove_le(ni, le);
61685 +       err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
61686 +                                   attr_s->name_len, run, 0, alen,
61687 +                                   attr_s->flags, &attr, NULL);
61688 +       if (err)
61689 +               goto out3;
61691 +       ntfs_free(attr_s);
61692 +       attr->nres.data_size = cpu_to_le64(rsize);
61693 +       attr->nres.valid_size = attr->nres.data_size;
61695 +       *ins_attr = attr;
61697 +       if (is_data)
61698 +               ni->ni_flags &= ~NI_FLAG_RESIDENT;
61700 +       /* Resident attribute becomes non resident */
61701 +       return 0;
61703 +out3:
61704 +       attr = Add2Ptr(rec, aoff);
61705 +       memmove(next, attr, used - aoff);
61706 +       memcpy(attr, attr_s, asize);
61707 +       rec->used = cpu_to_le32(used + asize);
61708 +       mi->dirty = true;
61709 +out2:
61710 +       /* undo: do not trim new allocated clusters */
61711 +       run_deallocate(sbi, run, false);
61712 +       run_close(run);
61713 +out1:
61714 +       ntfs_free(attr_s);
61715 +       /*reinsert le*/
61716 +out:
61717 +       return err;
61721 + * attr_set_size_res
61722 + *
61723 + * helper for attr_set_size
61724 + */
61725 +static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
61726 +                            struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
61727 +                            u64 new_size, struct runs_tree *run,
61728 +                            struct ATTRIB **ins_attr)
61730 +       struct ntfs_sb_info *sbi = mi->sbi;
61731 +       struct MFT_REC *rec = mi->mrec;
61732 +       u32 used = le32_to_cpu(rec->used);
61733 +       u32 asize = le32_to_cpu(attr->size);
61734 +       u32 aoff = PtrOffset(rec, attr);
61735 +       u32 rsize = le32_to_cpu(attr->res.data_size);
61736 +       u32 tail = used - aoff - asize;
61737 +       char *next = Add2Ptr(attr, asize);
61738 +       s64 dsize = QuadAlign(new_size) - QuadAlign(rsize);
61740 +       if (dsize < 0) {
61741 +               memmove(next + dsize, next, tail);
61742 +       } else if (dsize > 0) {
61743 +               if (used + dsize > sbi->max_bytes_per_attr)
61744 +                       return attr_make_nonresident(ni, attr, le, mi, new_size,
61745 +                                                    run, ins_attr, NULL);
61747 +               memmove(next + dsize, next, tail);
61748 +               memset(next, 0, dsize);
61749 +       }
61751 +       if (new_size > rsize)
61752 +               memset(Add2Ptr(resident_data(attr), rsize), 0,
61753 +                      new_size - rsize);
61755 +       rec->used = cpu_to_le32(used + dsize);
61756 +       attr->size = cpu_to_le32(asize + dsize);
61757 +       attr->res.data_size = cpu_to_le32(new_size);
61758 +       mi->dirty = true;
61759 +       *ins_attr = attr;
61761 +       return 0;
61765 + * attr_set_size
61766 + *
61767 + * change the size of attribute
61768 + * Extend:
61769 + *   - sparse/compressed: no allocated clusters
61770 + *   - normal: append allocated and preallocated new clusters
61771 + * Shrink:
61772 + *   - no deallocate if keep_prealloc is set
61773 + */
61774 +int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
61775 +                 const __le16 *name, u8 name_len, struct runs_tree *run,
61776 +                 u64 new_size, const u64 *new_valid, bool keep_prealloc,
61777 +                 struct ATTRIB **ret)
61779 +       int err = 0;
61780 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
61781 +       u8 cluster_bits = sbi->cluster_bits;
61782 +       bool is_mft =
61783 +               ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len;
61784 +       u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
61785 +       struct ATTRIB *attr = NULL, *attr_b;
61786 +       struct ATTR_LIST_ENTRY *le, *le_b;
61787 +       struct mft_inode *mi, *mi_b;
61788 +       CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
61789 +       CLST next_svcn, pre_alloc = -1, done = 0;
61790 +       bool is_ext;
61791 +       u32 align;
61792 +       struct MFT_REC *rec;
61794 +again:
61795 +       le_b = NULL;
61796 +       attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
61797 +                             &mi_b);
61798 +       if (!attr_b) {
61799 +               err = -ENOENT;
61800 +               goto out;
61801 +       }
61803 +       if (!attr_b->non_res) {
61804 +               err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
61805 +                                       &attr_b);
61806 +               if (err || !attr_b->non_res)
61807 +                       goto out;
61809 +               /* layout of records may be changed, so do a full search */
61810 +               goto again;
61811 +       }
61813 +       is_ext = is_attr_ext(attr_b);
61815 +again_1:
61816 +       align = sbi->cluster_size;
61818 +       if (is_ext) {
61819 +               align <<= attr_b->nres.c_unit;
61820 +               if (is_attr_sparsed(attr_b))
61821 +                       keep_prealloc = false;
61822 +       }
61824 +       old_valid = le64_to_cpu(attr_b->nres.valid_size);
61825 +       old_size = le64_to_cpu(attr_b->nres.data_size);
61826 +       old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
61827 +       old_alen = old_alloc >> cluster_bits;
61829 +       new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
61830 +       new_alen = new_alloc >> cluster_bits;
61832 +       if (keep_prealloc && is_ext)
61833 +               keep_prealloc = false;
61835 +       if (keep_prealloc && new_size < old_size) {
61836 +               attr_b->nres.data_size = cpu_to_le64(new_size);
61837 +               mi_b->dirty = true;
61838 +               goto ok;
61839 +       }
61841 +       vcn = old_alen - 1;
61843 +       svcn = le64_to_cpu(attr_b->nres.svcn);
61844 +       evcn = le64_to_cpu(attr_b->nres.evcn);
61846 +       if (svcn <= vcn && vcn <= evcn) {
61847 +               attr = attr_b;
61848 +               le = le_b;
61849 +               mi = mi_b;
61850 +       } else if (!le_b) {
61851 +               err = -EINVAL;
61852 +               goto out;
61853 +       } else {
61854 +               le = le_b;
61855 +               attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
61856 +                                   &mi);
61857 +               if (!attr) {
61858 +                       err = -EINVAL;
61859 +                       goto out;
61860 +               }
61862 +next_le_1:
61863 +               svcn = le64_to_cpu(attr->nres.svcn);
61864 +               evcn = le64_to_cpu(attr->nres.evcn);
61865 +       }
61867 +next_le:
61868 +       rec = mi->mrec;
61870 +       err = attr_load_runs(attr, ni, run, NULL);
61871 +       if (err)
61872 +               goto out;
61874 +       if (new_size > old_size) {
61875 +               CLST to_allocate;
61876 +               size_t free;
61878 +               if (new_alloc <= old_alloc) {
61879 +                       attr_b->nres.data_size = cpu_to_le64(new_size);
61880 +                       mi_b->dirty = true;
61881 +                       goto ok;
61882 +               }
61884 +               to_allocate = new_alen - old_alen;
61885 +add_alloc_in_same_attr_seg:
61886 +               lcn = 0;
61887 +               if (is_mft) {
61888 +                       /* mft allocates clusters from mftzone */
61889 +                       pre_alloc = 0;
61890 +               } else if (is_ext) {
61891 +                       /* no preallocate for sparse/compress */
61892 +                       pre_alloc = 0;
61893 +               } else if (pre_alloc == -1) {
61894 +                       pre_alloc = 0;
61895 +                       if (type == ATTR_DATA && !name_len &&
61896 +                           sbi->options.prealloc) {
61897 +                               CLST new_alen2 = bytes_to_cluster(
61898 +                                       sbi, get_pre_allocated(new_size));
61899 +                               pre_alloc = new_alen2 - new_alen;
61900 +                       }
61902 +                       /* Get the last lcn to allocate from */
61903 +                       if (old_alen &&
61904 +                           !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
61905 +                               lcn = SPARSE_LCN;
61906 +                       }
61908 +                       if (lcn == SPARSE_LCN)
61909 +                               lcn = 0;
61910 +                       else if (lcn)
61911 +                               lcn += 1;
61913 +                       free = wnd_zeroes(&sbi->used.bitmap);
61914 +                       if (to_allocate > free) {
61915 +                               err = -ENOSPC;
61916 +                               goto out;
61917 +                       }
61919 +                       if (pre_alloc && to_allocate + pre_alloc > free)
61920 +                               pre_alloc = 0;
61921 +               }
61923 +               vcn = old_alen;
61925 +               if (is_ext) {
61926 +                       if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
61927 +                                          false)) {
61928 +                               err = -ENOMEM;
61929 +                               goto out;
61930 +                       }
61931 +                       alen = to_allocate;
61932 +               } else {
61933 +                       /* ~3 bytes per fragment */
61934 +                       err = attr_allocate_clusters(
61935 +                               sbi, run, vcn, lcn, to_allocate, &pre_alloc,
61936 +                               is_mft ? ALLOCATE_MFT : 0, &alen,
61937 +                               is_mft ? 0
61938 +                                      : (sbi->record_size -
61939 +                                         le32_to_cpu(rec->used) + 8) /
61940 +                                                        3 +
61941 +                                                1,
61942 +                               NULL);
61943 +                       if (err)
61944 +                               goto out;
61945 +               }
61947 +               done += alen;
61948 +               vcn += alen;
61949 +               if (to_allocate > alen)
61950 +                       to_allocate -= alen;
61951 +               else
61952 +                       to_allocate = 0;
61954 +pack_runs:
61955 +               err = mi_pack_runs(mi, attr, run, vcn - svcn);
61956 +               if (err)
61957 +                       goto out;
61959 +               next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
61960 +               new_alloc_tmp = (u64)next_svcn << cluster_bits;
61961 +               attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
61962 +               mi_b->dirty = true;
61964 +               if (next_svcn >= vcn && !to_allocate) {
61965 +                       /* Normal way. update attribute and exit */
61966 +                       attr_b->nres.data_size = cpu_to_le64(new_size);
61967 +                       goto ok;
61968 +               }
61970 +               /* at least two mft to avoid recursive loop*/
61971 +               if (is_mft && next_svcn == vcn &&
61972 +                   ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
61973 +                       new_size = new_alloc_tmp;
61974 +                       attr_b->nres.data_size = attr_b->nres.alloc_size;
61975 +                       goto ok;
61976 +               }
61978 +               if (le32_to_cpu(rec->used) < sbi->record_size) {
61979 +                       old_alen = next_svcn;
61980 +                       evcn = old_alen - 1;
61981 +                       goto add_alloc_in_same_attr_seg;
61982 +               }
61984 +               attr_b->nres.data_size = attr_b->nres.alloc_size;
61985 +               if (new_alloc_tmp < old_valid)
61986 +                       attr_b->nres.valid_size = attr_b->nres.data_size;
61988 +               if (type == ATTR_LIST) {
61989 +                       err = ni_expand_list(ni);
61990 +                       if (err)
61991 +                               goto out;
61992 +                       if (next_svcn < vcn)
61993 +                               goto pack_runs;
61995 +                       /* layout of records is changed */
61996 +                       goto again;
61997 +               }
61999 +               if (!ni->attr_list.size) {
62000 +                       err = ni_create_attr_list(ni);
62001 +                       if (err)
62002 +                               goto out;
62003 +                       /* layout of records is changed */
62004 +               }
62006 +               if (next_svcn >= vcn) {
62007 +                       /* this is mft data, repeat */
62008 +                       goto again;
62009 +               }
62011 +               /* insert new attribute segment */
62012 +               err = ni_insert_nonresident(ni, type, name, name_len, run,
62013 +                                           next_svcn, vcn - next_svcn,
62014 +                                           attr_b->flags, &attr, &mi);
62015 +               if (err)
62016 +                       goto out;
62018 +               if (!is_mft)
62019 +                       run_truncate_head(run, evcn + 1);
62021 +               svcn = le64_to_cpu(attr->nres.svcn);
62022 +               evcn = le64_to_cpu(attr->nres.evcn);
62024 +               le_b = NULL;
62025 +               /* layout of records maybe changed */
62026 +               /* find base attribute to update*/
62027 +               attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
62028 +                                     NULL, &mi_b);
62029 +               if (!attr_b) {
62030 +                       err = -ENOENT;
62031 +                       goto out;
62032 +               }
62034 +               attr_b->nres.alloc_size = cpu_to_le64((u64)vcn << cluster_bits);
62035 +               attr_b->nres.data_size = attr_b->nres.alloc_size;
62036 +               attr_b->nres.valid_size = attr_b->nres.alloc_size;
62037 +               mi_b->dirty = true;
62038 +               goto again_1;
62039 +       }
62041 +       if (new_size != old_size ||
62042 +           (new_alloc != old_alloc && !keep_prealloc)) {
62043 +               vcn = max(svcn, new_alen);
62044 +               new_alloc_tmp = (u64)vcn << cluster_bits;
62046 +               alen = 0;
62047 +               err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &alen,
62048 +                                       true);
62049 +               if (err)
62050 +                       goto out;
62052 +               run_truncate(run, vcn);
62054 +               if (vcn > svcn) {
62055 +                       err = mi_pack_runs(mi, attr, run, vcn - svcn);
62056 +                       if (err)
62057 +                               goto out;
62058 +               } else if (le && le->vcn) {
62059 +                       u16 le_sz = le16_to_cpu(le->size);
62061 +                       /*
62062 +                        * NOTE: list entries for one attribute are always
62063 +                        * the same size. We deal with last entry (vcn==0)
62064 +                        * and it is not first in entries array
62065 +                        * (list entry for std attribute always first)
62066 +                        * So it is safe to step back
62067 +                        */
62068 +                       mi_remove_attr(mi, attr);
62070 +                       if (!al_remove_le(ni, le)) {
62071 +                               err = -EINVAL;
62072 +                               goto out;
62073 +                       }
62075 +                       le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
62076 +               } else {
62077 +                       attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
62078 +                       mi->dirty = true;
62079 +               }
62081 +               attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
62083 +               if (vcn == new_alen) {
62084 +                       attr_b->nres.data_size = cpu_to_le64(new_size);
62085 +                       if (new_size < old_valid)
62086 +                               attr_b->nres.valid_size =
62087 +                                       attr_b->nres.data_size;
62088 +               } else {
62089 +                       if (new_alloc_tmp <=
62090 +                           le64_to_cpu(attr_b->nres.data_size))
62091 +                               attr_b->nres.data_size =
62092 +                                       attr_b->nres.alloc_size;
62093 +                       if (new_alloc_tmp <
62094 +                           le64_to_cpu(attr_b->nres.valid_size))
62095 +                               attr_b->nres.valid_size =
62096 +                                       attr_b->nres.alloc_size;
62097 +               }
62099 +               if (is_ext)
62100 +                       le64_sub_cpu(&attr_b->nres.total_size,
62101 +                                    ((u64)alen << cluster_bits));
62103 +               mi_b->dirty = true;
62105 +               if (new_alloc_tmp <= new_alloc)
62106 +                       goto ok;
62108 +               old_size = new_alloc_tmp;
62109 +               vcn = svcn - 1;
62111 +               if (le == le_b) {
62112 +                       attr = attr_b;
62113 +                       mi = mi_b;
62114 +                       evcn = svcn - 1;
62115 +                       svcn = 0;
62116 +                       goto next_le;
62117 +               }
62119 +               if (le->type != type || le->name_len != name_len ||
62120 +                   memcmp(le_name(le), name, name_len * sizeof(short))) {
62121 +                       err = -EINVAL;
62122 +                       goto out;
62123 +               }
62125 +               err = ni_load_mi(ni, le, &mi);
62126 +               if (err)
62127 +                       goto out;
62129 +               attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
62130 +               if (!attr) {
62131 +                       err = -EINVAL;
62132 +                       goto out;
62133 +               }
62134 +               goto next_le_1;
62135 +       }
62137 +ok:
62138 +       if (new_valid) {
62139 +               __le64 valid = cpu_to_le64(min(*new_valid, new_size));
62141 +               if (attr_b->nres.valid_size != valid) {
62142 +                       attr_b->nres.valid_size = valid;
62143 +                       mi_b->dirty = true;
62144 +               }
62145 +       }
62147 +out:
62148 +       if (!err && attr_b && ret)
62149 +               *ret = attr_b;
62151 +       /* update inode_set_bytes*/
62152 +       if (!err && ((type == ATTR_DATA && !name_len) ||
62153 +                    (type == ATTR_ALLOC && name == I30_NAME))) {
62154 +               bool dirty = false;
62156 +               if (ni->vfs_inode.i_size != new_size) {
62157 +                       ni->vfs_inode.i_size = new_size;
62158 +                       dirty = true;
62159 +               }
62161 +               if (attr_b && attr_b->non_res) {
62162 +                       new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
62163 +                       if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
62164 +                               inode_set_bytes(&ni->vfs_inode, new_alloc);
62165 +                               dirty = true;
62166 +                       }
62167 +               }
62169 +               if (dirty) {
62170 +                       ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
62171 +                       mark_inode_dirty(&ni->vfs_inode);
62172 +               }
62173 +       }
62175 +       return err;
62178 +int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
62179 +                       CLST *len, bool *new)
62181 +       int err = 0;
62182 +       struct runs_tree *run = &ni->file.run;
62183 +       struct ntfs_sb_info *sbi;
62184 +       u8 cluster_bits;
62185 +       struct ATTRIB *attr = NULL, *attr_b;
62186 +       struct ATTR_LIST_ENTRY *le, *le_b;
62187 +       struct mft_inode *mi, *mi_b;
62188 +       CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
62189 +       u64 total_size;
62190 +       u32 clst_per_frame;
62191 +       bool ok;
62193 +       if (new)
62194 +               *new = false;
62196 +       down_read(&ni->file.run_lock);
62197 +       ok = run_lookup_entry(run, vcn, lcn, len, NULL);
62198 +       up_read(&ni->file.run_lock);
62200 +       if (ok && (*lcn != SPARSE_LCN || !new)) {
62201 +               /* normal way */
62202 +               return 0;
62203 +       }
62205 +       if (!clen)
62206 +               clen = 1;
62208 +       if (ok && clen > *len)
62209 +               clen = *len;
62211 +       sbi = ni->mi.sbi;
62212 +       cluster_bits = sbi->cluster_bits;
62214 +       ni_lock(ni);
62215 +       down_write(&ni->file.run_lock);
62217 +       le_b = NULL;
62218 +       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
62219 +       if (!attr_b) {
62220 +               err = -ENOENT;
62221 +               goto out;
62222 +       }
62224 +       if (!attr_b->non_res) {
62225 +               *lcn = RESIDENT_LCN;
62226 +               *len = 1;
62227 +               goto out;
62228 +       }
62230 +       asize = le64_to_cpu(attr_b->nres.alloc_size) >> sbi->cluster_bits;
62231 +       if (vcn >= asize) {
62232 +               err = -EINVAL;
62233 +               goto out;
62234 +       }
62236 +       clst_per_frame = 1u << attr_b->nres.c_unit;
62237 +       to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);
62239 +       if (vcn + to_alloc > asize)
62240 +               to_alloc = asize - vcn;
62242 +       svcn = le64_to_cpu(attr_b->nres.svcn);
62243 +       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
62245 +       attr = attr_b;
62246 +       le = le_b;
62247 +       mi = mi_b;
62249 +       if (le_b && (vcn < svcn || evcn1 <= vcn)) {
62250 +               attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
62251 +                                   &mi);
62252 +               if (!attr) {
62253 +                       err = -EINVAL;
62254 +                       goto out;
62255 +               }
62256 +               svcn = le64_to_cpu(attr->nres.svcn);
62257 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
62258 +       }
62260 +       err = attr_load_runs(attr, ni, run, NULL);
62261 +       if (err)
62262 +               goto out;
62264 +       if (!ok) {
62265 +               ok = run_lookup_entry(run, vcn, lcn, len, NULL);
62266 +               if (ok && (*lcn != SPARSE_LCN || !new)) {
62267 +                       /* normal way */
62268 +                       err = 0;
62269 +                       goto ok;
62270 +               }
62272 +               if (!ok && !new) {
62273 +                       *len = 0;
62274 +                       err = 0;
62275 +                       goto ok;
62276 +               }
62278 +               if (ok && clen > *len) {
62279 +                       clen = *len;
62280 +                       to_alloc = (clen + clst_per_frame - 1) &
62281 +                                  ~(clst_per_frame - 1);
62282 +               }
62283 +       }
62285 +       if (!is_attr_ext(attr_b)) {
62286 +               err = -EINVAL;
62287 +               goto out;
62288 +       }
62290 +       /* Get the last lcn to allocate from */
62291 +       hint = 0;
62293 +       if (vcn > evcn1) {
62294 +               if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
62295 +                                  false)) {
62296 +                       err = -ENOMEM;
62297 +                       goto out;
62298 +               }
62299 +       } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
62300 +               hint = -1;
62301 +       }
62303 +       err = attr_allocate_clusters(
62304 +               sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
62305 +               (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
62306 +               lcn);
62307 +       if (err)
62308 +               goto out;
62309 +       *new = true;
62311 +       end = vcn + *len;
62313 +       total_size = le64_to_cpu(attr_b->nres.total_size) +
62314 +                    ((u64)*len << cluster_bits);
62316 +repack:
62317 +       err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
62318 +       if (err)
62319 +               goto out;
62321 +       attr_b->nres.total_size = cpu_to_le64(total_size);
62322 +       inode_set_bytes(&ni->vfs_inode, total_size);
62323 +       ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
62325 +       mi_b->dirty = true;
62326 +       mark_inode_dirty(&ni->vfs_inode);
62328 +       /* stored [vcn : next_svcn) from [vcn : end) */
62329 +       next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
62331 +       if (end <= evcn1) {
62332 +               if (next_svcn == evcn1) {
62333 +                       /* Normal way. update attribute and exit */
62334 +                       goto ok;
62335 +               }
62336 +               /* add new segment [next_svcn : evcn1 - next_svcn )*/
62337 +               if (!ni->attr_list.size) {
62338 +                       err = ni_create_attr_list(ni);
62339 +                       if (err)
62340 +                               goto out;
62341 +                       /* layout of records is changed */
62342 +                       le_b = NULL;
62343 +                       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
62344 +                                             0, NULL, &mi_b);
62345 +                       if (!attr_b) {
62346 +                               err = -ENOENT;
62347 +                               goto out;
62348 +                       }
62350 +                       attr = attr_b;
62351 +                       le = le_b;
62352 +                       mi = mi_b;
62353 +                       goto repack;
62354 +               }
62355 +       }
62357 +       svcn = evcn1;
62359 +       /* Estimate next attribute */
62360 +       attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
62362 +       if (attr) {
62363 +               CLST alloc = bytes_to_cluster(
62364 +                       sbi, le64_to_cpu(attr_b->nres.alloc_size));
62365 +               CLST evcn = le64_to_cpu(attr->nres.evcn);
62367 +               if (end < next_svcn)
62368 +                       end = next_svcn;
62369 +               while (end > evcn) {
62370 +                       /* remove segment [svcn : evcn)*/
62371 +                       mi_remove_attr(mi, attr);
62373 +                       if (!al_remove_le(ni, le)) {
62374 +                               err = -EINVAL;
62375 +                               goto out;
62376 +                       }
62378 +                       if (evcn + 1 >= alloc) {
62379 +                               /* last attribute segment */
62380 +                               evcn1 = evcn + 1;
62381 +                               goto ins_ext;
62382 +                       }
62384 +                       if (ni_load_mi(ni, le, &mi)) {
62385 +                               attr = NULL;
62386 +                               goto out;
62387 +                       }
62389 +                       attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
62390 +                                           &le->id);
62391 +                       if (!attr) {
62392 +                               err = -EINVAL;
62393 +                               goto out;
62394 +                       }
62395 +                       svcn = le64_to_cpu(attr->nres.svcn);
62396 +                       evcn = le64_to_cpu(attr->nres.evcn);
62397 +               }
62399 +               if (end < svcn)
62400 +                       end = svcn;
62402 +               err = attr_load_runs(attr, ni, run, &end);
62403 +               if (err)
62404 +                       goto out;
62406 +               evcn1 = evcn + 1;
62407 +               attr->nres.svcn = cpu_to_le64(next_svcn);
62408 +               err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
62409 +               if (err)
62410 +                       goto out;
62412 +               le->vcn = cpu_to_le64(next_svcn);
62413 +               ni->attr_list.dirty = true;
62414 +               mi->dirty = true;
62416 +               next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
62417 +       }
62418 +ins_ext:
62419 +       if (evcn1 > next_svcn) {
62420 +               err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
62421 +                                           next_svcn, evcn1 - next_svcn,
62422 +                                           attr_b->flags, &attr, &mi);
62423 +               if (err)
62424 +                       goto out;
62425 +       }
62426 +ok:
62427 +       run_truncate_around(run, vcn);
62428 +out:
62429 +       up_write(&ni->file.run_lock);
62430 +       ni_unlock(ni);
62432 +       return err;
62435 +int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
62437 +       u64 vbo;
62438 +       struct ATTRIB *attr;
62439 +       u32 data_size;
62441 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
62442 +       if (!attr)
62443 +               return -EINVAL;
62445 +       if (attr->non_res)
62446 +               return E_NTFS_NONRESIDENT;
62448 +       vbo = page->index << PAGE_SHIFT;
62449 +       data_size = le32_to_cpu(attr->res.data_size);
62450 +       if (vbo < data_size) {
62451 +               const char *data = resident_data(attr);
62452 +               char *kaddr = kmap_atomic(page);
62453 +               u32 use = data_size - vbo;
62455 +               if (use > PAGE_SIZE)
62456 +                       use = PAGE_SIZE;
62458 +               memcpy(kaddr, data + vbo, use);
62459 +               memset(kaddr + use, 0, PAGE_SIZE - use);
62460 +               kunmap_atomic(kaddr);
62461 +               flush_dcache_page(page);
62462 +               SetPageUptodate(page);
62463 +       } else if (!PageUptodate(page)) {
62464 +               zero_user_segment(page, 0, PAGE_SIZE);
62465 +               SetPageUptodate(page);
62466 +       }
62468 +       return 0;
62471 +int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
62473 +       u64 vbo;
62474 +       struct mft_inode *mi;
62475 +       struct ATTRIB *attr;
62476 +       u32 data_size;
62478 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
62479 +       if (!attr)
62480 +               return -EINVAL;
62482 +       if (attr->non_res) {
62483 +               /*return special error code to check this case*/
62484 +               return E_NTFS_NONRESIDENT;
62485 +       }
62487 +       vbo = page->index << PAGE_SHIFT;
62488 +       data_size = le32_to_cpu(attr->res.data_size);
62489 +       if (vbo < data_size) {
62490 +               char *data = resident_data(attr);
62491 +               char *kaddr = kmap_atomic(page);
62492 +               u32 use = data_size - vbo;
62494 +               if (use > PAGE_SIZE)
62495 +                       use = PAGE_SIZE;
62496 +               memcpy(data + vbo, kaddr, use);
62497 +               kunmap_atomic(kaddr);
62498 +               mi->dirty = true;
62499 +       }
62500 +       ni->i_valid = data_size;
62502 +       return 0;
62506 + * attr_load_runs_vcn
62507 + *
62508 + * load runs with vcn
62509 + */
62510 +int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
62511 +                      const __le16 *name, u8 name_len, struct runs_tree *run,
62512 +                      CLST vcn)
62514 +       struct ATTRIB *attr;
62515 +       int err;
62516 +       CLST svcn, evcn;
62517 +       u16 ro;
62519 +       attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
62520 +       if (!attr)
62521 +               return -ENOENT;
62523 +       svcn = le64_to_cpu(attr->nres.svcn);
62524 +       evcn = le64_to_cpu(attr->nres.evcn);
62526 +       if (evcn < vcn || vcn < svcn)
62527 +               return -EINVAL;
62529 +       ro = le16_to_cpu(attr->nres.run_off);
62530 +       err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
62531 +                           Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
62532 +       if (err < 0)
62533 +               return err;
62534 +       return 0;
62538 + * load runs for given range [from to)
62539 + */
62540 +int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
62541 +                        const __le16 *name, u8 name_len, struct runs_tree *run,
62542 +                        u64 from, u64 to)
62544 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
62545 +       u8 cluster_bits = sbi->cluster_bits;
62546 +       CLST vcn = from >> cluster_bits;
62547 +       CLST vcn_last = (to - 1) >> cluster_bits;
62548 +       CLST lcn, clen;
62549 +       int err;
62551 +       for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
62552 +               if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
62553 +                       err = attr_load_runs_vcn(ni, type, name, name_len, run,
62554 +                                                vcn);
62555 +                       if (err)
62556 +                               return err;
62557 +                       clen = 0; /*next run_lookup_entry(vcn) must be success*/
62558 +               }
62559 +       }
62561 +       return 0;
62564 +#ifdef CONFIG_NTFS3_LZX_XPRESS
62566 + * attr_wof_frame_info
62567 + *
62568 + * read header of xpress/lzx file to get info about frame
62569 + */
62570 +int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
62571 +                       struct runs_tree *run, u64 frame, u64 frames,
62572 +                       u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
62574 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
62575 +       u64 vbo[2], off[2], wof_size;
62576 +       u32 voff;
62577 +       u8 bytes_per_off;
62578 +       char *addr;
62579 +       struct page *page;
62580 +       int i, err;
62581 +       __le32 *off32;
62582 +       __le64 *off64;
62584 +       if (ni->vfs_inode.i_size < 0x100000000ull) {
62585 +               /* file starts with array of 32 bit offsets */
62586 +               bytes_per_off = sizeof(__le32);
62587 +               vbo[1] = frame << 2;
62588 +               *vbo_data = frames << 2;
62589 +       } else {
62590 +               /* file starts with array of 64 bit offsets */
62591 +               bytes_per_off = sizeof(__le64);
62592 +               vbo[1] = frame << 3;
62593 +               *vbo_data = frames << 3;
62594 +       }
62596 +       /*
62597 +        * read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts
62598 +        * read 4/8 bytes at [vbo] == offset where compressed frame ends
62599 +        */
62600 +       if (!attr->non_res) {
62601 +               if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
62602 +                       ntfs_inode_err(&ni->vfs_inode, "is corrupted");
62603 +                       return -EINVAL;
62604 +               }
62605 +               addr = resident_data(attr);
62607 +               if (bytes_per_off == sizeof(__le32)) {
62608 +                       off32 = Add2Ptr(addr, vbo[1]);
62609 +                       off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
62610 +                       off[1] = le32_to_cpu(off32[0]);
62611 +               } else {
62612 +                       off64 = Add2Ptr(addr, vbo[1]);
62613 +                       off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
62614 +                       off[1] = le64_to_cpu(off64[0]);
62615 +               }
62617 +               *vbo_data += off[0];
62618 +               *ondisk_size = off[1] - off[0];
62619 +               return 0;
62620 +       }
62622 +       wof_size = le64_to_cpu(attr->nres.data_size);
62623 +       down_write(&ni->file.run_lock);
62624 +       page = ni->file.offs_page;
62625 +       if (!page) {
62626 +               page = alloc_page(GFP_KERNEL);
62627 +               if (!page) {
62628 +                       err = -ENOMEM;
62629 +                       goto out;
62630 +               }
62631 +               page->index = -1;
62632 +               ni->file.offs_page = page;
62633 +       }
62634 +       lock_page(page);
62635 +       addr = page_address(page);
62637 +       if (vbo[1]) {
62638 +               voff = vbo[1] & (PAGE_SIZE - 1);
62639 +               vbo[0] = vbo[1] - bytes_per_off;
62640 +               i = 0;
62641 +       } else {
62642 +               voff = 0;
62643 +               vbo[0] = 0;
62644 +               off[0] = 0;
62645 +               i = 1;
62646 +       }
62648 +       do {
62649 +               pgoff_t index = vbo[i] >> PAGE_SHIFT;
62651 +               if (index != page->index) {
62652 +                       u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
62653 +                       u64 to = min(from + PAGE_SIZE, wof_size);
62655 +                       err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
62656 +                                                  ARRAY_SIZE(WOF_NAME), run,
62657 +                                                  from, to);
62658 +                       if (err)
62659 +                               goto out1;
62661 +                       err = ntfs_bio_pages(sbi, run, &page, 1, from,
62662 +                                            to - from, REQ_OP_READ);
62663 +                       if (err) {
62664 +                               page->index = -1;
62665 +                               goto out1;
62666 +                       }
62667 +                       page->index = index;
62668 +               }
62670 +               if (i) {
62671 +                       if (bytes_per_off == sizeof(__le32)) {
62672 +                               off32 = Add2Ptr(addr, voff);
62673 +                               off[1] = le32_to_cpu(*off32);
62674 +                       } else {
62675 +                               off64 = Add2Ptr(addr, voff);
62676 +                               off[1] = le64_to_cpu(*off64);
62677 +                       }
62678 +               } else if (!voff) {
62679 +                       if (bytes_per_off == sizeof(__le32)) {
62680 +                               off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
62681 +                               off[0] = le32_to_cpu(*off32);
62682 +                       } else {
62683 +                               off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
62684 +                               off[0] = le64_to_cpu(*off64);
62685 +                       }
62686 +               } else {
62687 +                       /* two values in one page*/
62688 +                       if (bytes_per_off == sizeof(__le32)) {
62689 +                               off32 = Add2Ptr(addr, voff);
62690 +                               off[0] = le32_to_cpu(off32[-1]);
62691 +                               off[1] = le32_to_cpu(off32[0]);
62692 +                       } else {
62693 +                               off64 = Add2Ptr(addr, voff);
62694 +                               off[0] = le64_to_cpu(off64[-1]);
62695 +                               off[1] = le64_to_cpu(off64[0]);
62696 +                       }
62697 +                       break;
62698 +               }
62699 +       } while (++i < 2);
62701 +       *vbo_data += off[0];
62702 +       *ondisk_size = off[1] - off[0];
62704 +out1:
62705 +       unlock_page(page);
62706 +out:
62707 +       up_write(&ni->file.run_lock);
62708 +       return err;
62710 +#endif
62713 + * attr_is_frame_compressed
62714 + *
62715 + * This function is used to detect compressed frame
62716 + */
62717 +int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
62718 +                            CLST frame, CLST *clst_data)
62720 +       int err;
62721 +       u32 clst_frame;
62722 +       CLST clen, lcn, vcn, alen, slen, vcn_next;
62723 +       size_t idx;
62724 +       struct runs_tree *run;
62726 +       *clst_data = 0;
62728 +       if (!is_attr_compressed(attr))
62729 +               return 0;
62731 +       if (!attr->non_res)
62732 +               return 0;
62734 +       clst_frame = 1u << attr->nres.c_unit;
62735 +       vcn = frame * clst_frame;
62736 +       run = &ni->file.run;
62738 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
62739 +               err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
62740 +                                        attr->name_len, run, vcn);
62741 +               if (err)
62742 +                       return err;
62744 +               if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
62745 +                       return -EINVAL;
62746 +       }
62748 +       if (lcn == SPARSE_LCN) {
62749 +               /* sparsed frame */
62750 +               return 0;
62751 +       }
62753 +       if (clen >= clst_frame) {
62754 +               /*
62755 +                * The frame is not compressed 'cause
62756 +                * it does not contain any sparse clusters
62757 +                */
62758 +               *clst_data = clst_frame;
62759 +               return 0;
62760 +       }
62762 +       alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
62763 +       slen = 0;
62764 +       *clst_data = clen;
62766 +       /*
62767 +        * The frame is compressed if *clst_data + slen >= clst_frame
62768 +        * Check next fragments
62769 +        */
62770 +       while ((vcn += clen) < alen) {
62771 +               vcn_next = vcn;
62773 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
62774 +                   vcn_next != vcn) {
62775 +                       err = attr_load_runs_vcn(ni, attr->type,
62776 +                                                attr_name(attr),
62777 +                                                attr->name_len, run, vcn_next);
62778 +                       if (err)
62779 +                               return err;
62780 +                       vcn = vcn_next;
62782 +                       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
62783 +                               return -EINVAL;
62784 +               }
62786 +               if (lcn == SPARSE_LCN) {
62787 +                       slen += clen;
62788 +               } else {
62789 +                       if (slen) {
62790 +                               /*
62791 +                                * data_clusters + sparse_clusters =
62792 +                                * not enough for frame
62793 +                                */
62794 +                               return -EINVAL;
62795 +                       }
62796 +                       *clst_data += clen;
62797 +               }
62799 +               if (*clst_data + slen >= clst_frame) {
62800 +                       if (!slen) {
62801 +                               /*
62802 +                                * There is no sparsed clusters in this frame
62803 +                                * So it is not compressed
62804 +                                */
62805 +                               *clst_data = clst_frame;
62806 +                       } else {
62807 +                               /*frame is compressed*/
62808 +                       }
62809 +                       break;
62810 +               }
62811 +       }
62813 +       return 0;
62817 + * attr_allocate_frame
62818 + *
62819 + * allocate/free clusters for 'frame'
62820 + * assumed: down_write(&ni->file.run_lock);
62821 + */
62822 +int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
62823 +                       u64 new_valid)
62825 +       int err = 0;
62826 +       struct runs_tree *run = &ni->file.run;
62827 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
62828 +       struct ATTRIB *attr = NULL, *attr_b;
62829 +       struct ATTR_LIST_ENTRY *le, *le_b;
62830 +       struct mft_inode *mi, *mi_b;
62831 +       CLST svcn, evcn1, next_svcn, lcn, len;
62832 +       CLST vcn, end, clst_data;
62833 +       u64 total_size, valid_size, data_size;
62835 +       le_b = NULL;
62836 +       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
62837 +       if (!attr_b)
62838 +               return -ENOENT;
62840 +       if (!is_attr_ext(attr_b))
62841 +               return -EINVAL;
62843 +       vcn = frame << NTFS_LZNT_CUNIT;
62844 +       total_size = le64_to_cpu(attr_b->nres.total_size);
62846 +       svcn = le64_to_cpu(attr_b->nres.svcn);
62847 +       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
62848 +       data_size = le64_to_cpu(attr_b->nres.data_size);
62850 +       if (svcn <= vcn && vcn < evcn1) {
62851 +               attr = attr_b;
62852 +               le = le_b;
62853 +               mi = mi_b;
62854 +       } else if (!le_b) {
62855 +               err = -EINVAL;
62856 +               goto out;
62857 +       } else {
62858 +               le = le_b;
62859 +               attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
62860 +                                   &mi);
62861 +               if (!attr) {
62862 +                       err = -EINVAL;
62863 +                       goto out;
62864 +               }
62865 +               svcn = le64_to_cpu(attr->nres.svcn);
62866 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
62867 +       }
62869 +       err = attr_load_runs(attr, ni, run, NULL);
62870 +       if (err)
62871 +               goto out;
62873 +       err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
62874 +       if (err)
62875 +               goto out;
62877 +       total_size -= (u64)clst_data << sbi->cluster_bits;
62879 +       len = bytes_to_cluster(sbi, compr_size);
62881 +       if (len == clst_data)
62882 +               goto out;
62884 +       if (len < clst_data) {
62885 +               err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
62886 +                                       NULL, true);
62887 +               if (err)
62888 +                       goto out;
62890 +               if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
62891 +                                  false)) {
62892 +                       err = -ENOMEM;
62893 +                       goto out;
62894 +               }
62895 +               end = vcn + clst_data;
62896 +               /* run contains updated range [vcn + len : end) */
62897 +       } else {
62898 +               CLST alen, hint = 0;
62899 +               /* Get the last lcn to allocate from */
62900 +               if (vcn + clst_data &&
62901 +                   !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
62902 +                                     NULL)) {
62903 +                       hint = -1;
62904 +               }
62906 +               err = attr_allocate_clusters(sbi, run, vcn + clst_data,
62907 +                                            hint + 1, len - clst_data, NULL, 0,
62908 +                                            &alen, 0, &lcn);
62909 +               if (err)
62910 +                       goto out;
62912 +               end = vcn + len;
62913 +               /* run contains updated range [vcn + clst_data : end) */
62914 +       }
62916 +       total_size += (u64)len << sbi->cluster_bits;
62918 +repack:
62919 +       err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
62920 +       if (err)
62921 +               goto out;
62923 +       attr_b->nres.total_size = cpu_to_le64(total_size);
62924 +       inode_set_bytes(&ni->vfs_inode, total_size);
62926 +       mi_b->dirty = true;
62927 +       mark_inode_dirty(&ni->vfs_inode);
62929 +       /* stored [vcn : next_svcn) from [vcn : end) */
62930 +       next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
62932 +       if (end <= evcn1) {
62933 +               if (next_svcn == evcn1) {
62934 +                       /* Normal way. update attribute and exit */
62935 +                       goto ok;
62936 +               }
62937 +               /* add new segment [next_svcn : evcn1 - next_svcn )*/
62938 +               if (!ni->attr_list.size) {
62939 +                       err = ni_create_attr_list(ni);
62940 +                       if (err)
62941 +                               goto out;
62942 +                       /* layout of records is changed */
62943 +                       le_b = NULL;
62944 +                       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
62945 +                                             0, NULL, &mi_b);
62946 +                       if (!attr_b) {
62947 +                               err = -ENOENT;
62948 +                               goto out;
62949 +                       }
62951 +                       attr = attr_b;
62952 +                       le = le_b;
62953 +                       mi = mi_b;
62954 +                       goto repack;
62955 +               }
62956 +       }
62958 +       svcn = evcn1;
62960 +       /* Estimate next attribute */
62961 +       attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
62963 +       if (attr) {
62964 +               CLST alloc = bytes_to_cluster(
62965 +                       sbi, le64_to_cpu(attr_b->nres.alloc_size));
62966 +               CLST evcn = le64_to_cpu(attr->nres.evcn);
62968 +               if (end < next_svcn)
62969 +                       end = next_svcn;
62970 +               while (end > evcn) {
62971 +                       /* remove segment [svcn : evcn)*/
62972 +                       mi_remove_attr(mi, attr);
62974 +                       if (!al_remove_le(ni, le)) {
62975 +                               err = -EINVAL;
62976 +                               goto out;
62977 +                       }
62979 +                       if (evcn + 1 >= alloc) {
62980 +                               /* last attribute segment */
62981 +                               evcn1 = evcn + 1;
62982 +                               goto ins_ext;
62983 +                       }
62985 +                       if (ni_load_mi(ni, le, &mi)) {
62986 +                               attr = NULL;
62987 +                               goto out;
62988 +                       }
62990 +                       attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
62991 +                                           &le->id);
62992 +                       if (!attr) {
62993 +                               err = -EINVAL;
62994 +                               goto out;
62995 +                       }
62996 +                       svcn = le64_to_cpu(attr->nres.svcn);
62997 +                       evcn = le64_to_cpu(attr->nres.evcn);
62998 +               }
63000 +               if (end < svcn)
63001 +                       end = svcn;
63003 +               err = attr_load_runs(attr, ni, run, &end);
63004 +               if (err)
63005 +                       goto out;
63007 +               evcn1 = evcn + 1;
63008 +               attr->nres.svcn = cpu_to_le64(next_svcn);
63009 +               err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
63010 +               if (err)
63011 +                       goto out;
63013 +               le->vcn = cpu_to_le64(next_svcn);
63014 +               ni->attr_list.dirty = true;
63015 +               mi->dirty = true;
63017 +               next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
63018 +       }
63019 +ins_ext:
63020 +       if (evcn1 > next_svcn) {
63021 +               err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
63022 +                                           next_svcn, evcn1 - next_svcn,
63023 +                                           attr_b->flags, &attr, &mi);
63024 +               if (err)
63025 +                       goto out;
63026 +       }
63027 +ok:
63028 +       run_truncate_around(run, vcn);
63029 +out:
63030 +       if (new_valid > data_size)
63031 +               new_valid = data_size;
63033 +       valid_size = le64_to_cpu(attr_b->nres.valid_size);
63034 +       if (new_valid != valid_size) {
63035 +               attr_b->nres.valid_size = cpu_to_le64(valid_size);
63036 +               mi_b->dirty = true;
63037 +       }
63039 +       return err;
63042 +/* Collapse range in file */
63043 +int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
63045 +       int err = 0;
63046 +       struct runs_tree *run = &ni->file.run;
63047 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
63048 +       struct ATTRIB *attr = NULL, *attr_b;
63049 +       struct ATTR_LIST_ENTRY *le, *le_b;
63050 +       struct mft_inode *mi, *mi_b;
63051 +       CLST svcn, evcn1, len, dealloc, alen;
63052 +       CLST vcn, end;
63053 +       u64 valid_size, data_size, alloc_size, total_size;
63054 +       u32 mask;
63055 +       __le16 a_flags;
63057 +       if (!bytes)
63058 +               return 0;
63060 +       le_b = NULL;
63061 +       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
63062 +       if (!attr_b)
63063 +               return -ENOENT;
63065 +       if (!attr_b->non_res) {
63066 +               /* Attribute is resident. Nothing to do? */
63067 +               return 0;
63068 +       }
63070 +       data_size = le64_to_cpu(attr_b->nres.data_size);
63071 +       alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
63072 +       a_flags = attr_b->flags;
63074 +       if (is_attr_ext(attr_b)) {
63075 +               total_size = le64_to_cpu(attr_b->nres.total_size);
63076 +               mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
63077 +       } else {
63078 +               total_size = alloc_size;
63079 +               mask = sbi->cluster_mask;
63080 +       }
63082 +       if ((vbo & mask) || (bytes & mask)) {
63083 +               /* allow to collapse only cluster aligned ranges */
63084 +               return -EINVAL;
63085 +       }
63087 +       if (vbo > data_size)
63088 +               return -EINVAL;
63090 +       down_write(&ni->file.run_lock);
63092 +       if (vbo + bytes >= data_size) {
63093 +               u64 new_valid = min(ni->i_valid, vbo);
63095 +               /* Simple truncate file at 'vbo' */
63096 +               truncate_setsize(&ni->vfs_inode, vbo);
63097 +               err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
63098 +                                   &new_valid, true, NULL);
63100 +               if (!err && new_valid < ni->i_valid)
63101 +                       ni->i_valid = new_valid;
63103 +               goto out;
63104 +       }
63106 +       /*
63107 +        * Enumerate all attribute segments and collapse
63108 +        */
63109 +       alen = alloc_size >> sbi->cluster_bits;
63110 +       vcn = vbo >> sbi->cluster_bits;
63111 +       len = bytes >> sbi->cluster_bits;
63112 +       end = vcn + len;
63113 +       dealloc = 0;
63115 +       svcn = le64_to_cpu(attr_b->nres.svcn);
63116 +       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
63118 +       if (svcn <= vcn && vcn < evcn1) {
63119 +               attr = attr_b;
63120 +               le = le_b;
63121 +               mi = mi_b;
63122 +       } else if (!le_b) {
63123 +               err = -EINVAL;
63124 +               goto out;
63125 +       } else {
63126 +               le = le_b;
63127 +               attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
63128 +                                   &mi);
63129 +               if (!attr) {
63130 +                       err = -EINVAL;
63131 +                       goto out;
63132 +               }
63134 +               svcn = le64_to_cpu(attr->nres.svcn);
63135 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
63136 +       }
63138 +       for (;;) {
63139 +               if (svcn >= end) {
63140 +                       /* shift vcn */
63141 +                       attr->nres.svcn = cpu_to_le64(svcn - len);
63142 +                       attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
63143 +                       if (le) {
63144 +                               le->vcn = attr->nres.svcn;
63145 +                               ni->attr_list.dirty = true;
63146 +                       }
63147 +                       mi->dirty = true;
63148 +               } else if (svcn < vcn || end < evcn1) {
63149 +                       CLST vcn1, eat, next_svcn;
63151 +                       /* collapse a part of this attribute segment */
63152 +                       err = attr_load_runs(attr, ni, run, &svcn);
63153 +                       if (err)
63154 +                               goto out;
63155 +                       vcn1 = max(vcn, svcn);
63156 +                       eat = min(end, evcn1) - vcn1;
63158 +                       err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
63159 +                                               true);
63160 +                       if (err)
63161 +                               goto out;
63163 +                       if (!run_collapse_range(run, vcn1, eat)) {
63164 +                               err = -ENOMEM;
63165 +                               goto out;
63166 +                       }
63168 +                       if (svcn >= vcn) {
63169 +                               /* shift vcn */
63170 +                               attr->nres.svcn = cpu_to_le64(vcn);
63171 +                               if (le) {
63172 +                                       le->vcn = attr->nres.svcn;
63173 +                                       ni->attr_list.dirty = true;
63174 +                               }
63175 +                       }
63177 +                       err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
63178 +                       if (err)
63179 +                               goto out;
63181 +                       next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
63182 +                       if (next_svcn + eat < evcn1) {
63183 +                               err = ni_insert_nonresident(
63184 +                                       ni, ATTR_DATA, NULL, 0, run, next_svcn,
63185 +                                       evcn1 - eat - next_svcn, a_flags, &attr,
63186 +                                       &mi);
63187 +                               if (err)
63188 +                                       goto out;
63190 +                               /* layout of records maybe changed */
63191 +                               attr_b = NULL;
63192 +                               le = al_find_ex(ni, NULL, ATTR_DATA, NULL, 0,
63193 +                                               &next_svcn);
63194 +                               if (!le) {
63195 +                                       err = -EINVAL;
63196 +                                       goto out;
63197 +                               }
63198 +                       }
63200 +                       /* free all allocated memory */
63201 +                       run_truncate(run, 0);
63202 +               } else {
63203 +                       u16 le_sz;
63204 +                       u16 roff = le16_to_cpu(attr->nres.run_off);
63206 +                       /*run==1 means unpack and deallocate*/
63207 +                       run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
63208 +                                     evcn1 - 1, svcn, Add2Ptr(attr, roff),
63209 +                                     le32_to_cpu(attr->size) - roff);
63211 +                       /* delete this attribute segment */
63212 +                       mi_remove_attr(mi, attr);
63213 +                       if (!le)
63214 +                               break;
63216 +                       le_sz = le16_to_cpu(le->size);
63217 +                       if (!al_remove_le(ni, le)) {
63218 +                               err = -EINVAL;
63219 +                               goto out;
63220 +                       }
63222 +                       if (evcn1 >= alen)
63223 +                               break;
63225 +                       if (!svcn) {
63226 +                               /* Load next record that contains this attribute */
63227 +                               if (ni_load_mi(ni, le, &mi)) {
63228 +                                       err = -EINVAL;
63229 +                                       goto out;
63230 +                               }
63232 +                               /* Look for required attribute */
63233 +                               attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
63234 +                                                   0, &le->id);
63235 +                               if (!attr) {
63236 +                                       err = -EINVAL;
63237 +                                       goto out;
63238 +                               }
63239 +                               goto next_attr;
63240 +                       }
63241 +                       le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
63242 +               }
63244 +               if (evcn1 >= alen)
63245 +                       break;
63247 +               attr = ni_enum_attr_ex(ni, attr, &le, &mi);
63248 +               if (!attr) {
63249 +                       err = -EINVAL;
63250 +                       goto out;
63251 +               }
63253 +next_attr:
63254 +               svcn = le64_to_cpu(attr->nres.svcn);
63255 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
63256 +       }
63258 +       if (!attr_b) {
63259 +               le_b = NULL;
63260 +               attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
63261 +                                     &mi_b);
63262 +               if (!attr_b) {
63263 +                       err = -ENOENT;
63264 +                       goto out;
63265 +               }
63266 +       }
63268 +       data_size -= bytes;
63269 +       valid_size = ni->i_valid;
63270 +       if (vbo + bytes <= valid_size)
63271 +               valid_size -= bytes;
63272 +       else if (vbo < valid_size)
63273 +               valid_size = vbo;
63275 +       attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
63276 +       attr_b->nres.data_size = cpu_to_le64(data_size);
63277 +       attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
63278 +       total_size -= (u64)dealloc << sbi->cluster_bits;
63279 +       if (is_attr_ext(attr_b))
63280 +               attr_b->nres.total_size = cpu_to_le64(total_size);
63281 +       mi_b->dirty = true;
63283 +       /*update inode size*/
63284 +       ni->i_valid = valid_size;
63285 +       ni->vfs_inode.i_size = data_size;
63286 +       inode_set_bytes(&ni->vfs_inode, total_size);
63287 +       ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
63288 +       mark_inode_dirty(&ni->vfs_inode);
63290 +out:
63291 +       up_write(&ni->file.run_lock);
63292 +       if (err)
63293 +               make_bad_inode(&ni->vfs_inode);
63295 +       return err;
63298 +/* not for normal files */
63299 +int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes)
63301 +       int err = 0;
63302 +       struct runs_tree *run = &ni->file.run;
63303 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
63304 +       struct ATTRIB *attr = NULL, *attr_b;
63305 +       struct ATTR_LIST_ENTRY *le, *le_b;
63306 +       struct mft_inode *mi, *mi_b;
63307 +       CLST svcn, evcn1, vcn, len, end, alen, dealloc;
63308 +       u64 total_size, alloc_size;
63310 +       if (!bytes)
63311 +               return 0;
63313 +       le_b = NULL;
63314 +       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
63315 +       if (!attr_b)
63316 +               return -ENOENT;
63318 +       if (!attr_b->non_res) {
63319 +               u32 data_size = le32_to_cpu(attr->res.data_size);
63320 +               u32 from, to;
63322 +               if (vbo > data_size)
63323 +                       return 0;
63325 +               from = vbo;
63326 +               to = (vbo + bytes) < data_size ? (vbo + bytes) : data_size;
63327 +               memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
63328 +               return 0;
63329 +       }
63331 +       /* TODO: add support for normal files too */
63332 +       if (!is_attr_ext(attr_b))
63333 +               return -EOPNOTSUPP;
63335 +       alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
63336 +       total_size = le64_to_cpu(attr_b->nres.total_size);
63338 +       if (vbo >= alloc_size) {
63339 +               // NOTE: it is allowed
63340 +               return 0;
63341 +       }
63343 +       if (vbo + bytes > alloc_size)
63344 +               bytes = alloc_size - vbo;
63346 +       down_write(&ni->file.run_lock);
63347 +       /*
63348 +        * Enumerate all attribute segments and punch hole where necessary
63349 +        */
63350 +       alen = alloc_size >> sbi->cluster_bits;
63351 +       vcn = vbo >> sbi->cluster_bits;
63352 +       len = bytes >> sbi->cluster_bits;
63353 +       end = vcn + len;
63354 +       dealloc = 0;
63356 +       svcn = le64_to_cpu(attr_b->nres.svcn);
63357 +       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
63359 +       if (svcn <= vcn && vcn < evcn1) {
63360 +               attr = attr_b;
63361 +               le = le_b;
63362 +               mi = mi_b;
63363 +       } else if (!le_b) {
63364 +               err = -EINVAL;
63365 +               goto out;
63366 +       } else {
63367 +               le = le_b;
63368 +               attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
63369 +                                   &mi);
63370 +               if (!attr) {
63371 +                       err = -EINVAL;
63372 +                       goto out;
63373 +               }
63375 +               svcn = le64_to_cpu(attr->nres.svcn);
63376 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
63377 +       }
63379 +       while (svcn < end) {
63380 +               CLST vcn1, zero, dealloc2;
63382 +               err = attr_load_runs(attr, ni, run, &svcn);
63383 +               if (err)
63384 +                       goto out;
63385 +               vcn1 = max(vcn, svcn);
63386 +               zero = min(end, evcn1) - vcn1;
63388 +               dealloc2 = dealloc;
63389 +               err = run_deallocate_ex(sbi, run, vcn1, zero, &dealloc, true);
63390 +               if (err)
63391 +                       goto out;
63393 +               if (dealloc2 == dealloc) {
63394 +                       /* looks like  the required range is already sparsed */
63395 +               } else {
63396 +                       if (!run_add_entry(run, vcn1, SPARSE_LCN, zero,
63397 +                                          false)) {
63398 +                               err = -ENOMEM;
63399 +                               goto out;
63400 +                       }
63402 +                       err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
63403 +                       if (err)
63404 +                               goto out;
63405 +               }
63406 +               /* free all allocated memory */
63407 +               run_truncate(run, 0);
63409 +               if (evcn1 >= alen)
63410 +                       break;
63412 +               attr = ni_enum_attr_ex(ni, attr, &le, &mi);
63413 +               if (!attr) {
63414 +                       err = -EINVAL;
63415 +                       goto out;
63416 +               }
63418 +               svcn = le64_to_cpu(attr->nres.svcn);
63419 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
63420 +       }
63422 +       total_size -= (u64)dealloc << sbi->cluster_bits;
63423 +       attr_b->nres.total_size = cpu_to_le64(total_size);
63424 +       mi_b->dirty = true;
63426 +       /*update inode size*/
63427 +       inode_set_bytes(&ni->vfs_inode, total_size);
63428 +       ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
63429 +       mark_inode_dirty(&ni->vfs_inode);
63431 +out:
63432 +       up_write(&ni->file.run_lock);
63433 +       if (err)
63434 +               make_bad_inode(&ni->vfs_inode);
63436 +       return err;
63438 diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
63439 new file mode 100644
63440 index 000000000000..ea561361b576
63441 --- /dev/null
63442 +++ b/fs/ntfs3/attrlist.c
63443 @@ -0,0 +1,456 @@
63444 +// SPDX-License-Identifier: GPL-2.0
63446 + *
63447 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
63448 + *
63449 + */
63451 +#include <linux/blkdev.h>
63452 +#include <linux/buffer_head.h>
63453 +#include <linux/fs.h>
63454 +#include <linux/nls.h>
63456 +#include "debug.h"
63457 +#include "ntfs.h"
63458 +#include "ntfs_fs.h"
63460 +/* Returns true if le is valid */
63461 +static inline bool al_is_valid_le(const struct ntfs_inode *ni,
63462 +                                 struct ATTR_LIST_ENTRY *le)
63464 +       if (!le || !ni->attr_list.le || !ni->attr_list.size)
63465 +               return false;
63467 +       return PtrOffset(ni->attr_list.le, le) + le16_to_cpu(le->size) <=
63468 +              ni->attr_list.size;
63471 +void al_destroy(struct ntfs_inode *ni)
63473 +       run_close(&ni->attr_list.run);
63474 +       ntfs_free(ni->attr_list.le);
63475 +       ni->attr_list.le = NULL;
63476 +       ni->attr_list.size = 0;
63477 +       ni->attr_list.dirty = false;
63481 + * ntfs_load_attr_list
63482 + *
63483 + * This method makes sure that the ATTRIB list, if present,
63484 + * has been properly set up.
63485 + */
63486 +int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
63488 +       int err;
63489 +       size_t lsize;
63490 +       void *le = NULL;
63492 +       if (ni->attr_list.size)
63493 +               return 0;
63495 +       if (!attr->non_res) {
63496 +               lsize = le32_to_cpu(attr->res.data_size);
63497 +               le = ntfs_malloc(al_aligned(lsize));
63498 +               if (!le) {
63499 +                       err = -ENOMEM;
63500 +                       goto out;
63501 +               }
63502 +               memcpy(le, resident_data(attr), lsize);
63503 +       } else if (attr->nres.svcn) {
63504 +               err = -EINVAL;
63505 +               goto out;
63506 +       } else {
63507 +               u16 run_off = le16_to_cpu(attr->nres.run_off);
63509 +               lsize = le64_to_cpu(attr->nres.data_size);
63511 +               run_init(&ni->attr_list.run);
63513 +               err = run_unpack_ex(&ni->attr_list.run, ni->mi.sbi, ni->mi.rno,
63514 +                                   0, le64_to_cpu(attr->nres.evcn), 0,
63515 +                                   Add2Ptr(attr, run_off),
63516 +                                   le32_to_cpu(attr->size) - run_off);
63517 +               if (err < 0)
63518 +                       goto out;
63520 +               le = ntfs_malloc(al_aligned(lsize));
63521 +               if (!le) {
63522 +                       err = -ENOMEM;
63523 +                       goto out;
63524 +               }
63526 +               err = ntfs_read_run_nb(ni->mi.sbi, &ni->attr_list.run, 0, le,
63527 +                                      lsize, NULL);
63528 +               if (err)
63529 +                       goto out;
63530 +       }
63532 +       ni->attr_list.size = lsize;
63533 +       ni->attr_list.le = le;
63535 +       return 0;
63537 +out:
63538 +       ni->attr_list.le = le;
63539 +       al_destroy(ni);
63541 +       return err;
63545 + * al_enumerate
63546 + *
63547 + * Returns the next list 'le'
63548 + * if 'le' is NULL then returns the first 'le'
63549 + */
63550 +struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
63551 +                                    struct ATTR_LIST_ENTRY *le)
63553 +       size_t off;
63554 +       u16 sz;
63556 +       if (!le) {
63557 +               le = ni->attr_list.le;
63558 +       } else {
63559 +               sz = le16_to_cpu(le->size);
63560 +               if (sz < sizeof(struct ATTR_LIST_ENTRY)) {
63561 +                       /* Impossible 'cause we should not return such 'le' */
63562 +                       return NULL;
63563 +               }
63564 +               le = Add2Ptr(le, sz);
63565 +       }
63567 +       /* Check boundary */
63568 +       off = PtrOffset(ni->attr_list.le, le);
63569 +       if (off + sizeof(struct ATTR_LIST_ENTRY) > ni->attr_list.size) {
63570 +               // The regular end of list
63571 +               return NULL;
63572 +       }
63574 +       sz = le16_to_cpu(le->size);
63576 +       /* Check 'le' for errors */
63577 +       if (sz < sizeof(struct ATTR_LIST_ENTRY) ||
63578 +           off + sz > ni->attr_list.size ||
63579 +           sz < le->name_off + le->name_len * sizeof(short)) {
63580 +               return NULL;
63581 +       }
63583 +       return le;
63587 + * al_find_le
63588 + *
63589 + * finds the first 'le' in the list which matches type, name and vcn
63590 + * Returns NULL if not found
63591 + */
63592 +struct ATTR_LIST_ENTRY *al_find_le(struct ntfs_inode *ni,
63593 +                                  struct ATTR_LIST_ENTRY *le,
63594 +                                  const struct ATTRIB *attr)
63596 +       CLST svcn = attr_svcn(attr);
63598 +       return al_find_ex(ni, le, attr->type, attr_name(attr), attr->name_len,
63599 +                         &svcn);
63603 + * al_find_ex
63604 + *
63605 + * finds the first 'le' in the list which matches type, name and vcn
63606 + * Returns NULL if not found
63607 + */
63608 +struct ATTR_LIST_ENTRY *al_find_ex(struct ntfs_inode *ni,
63609 +                                  struct ATTR_LIST_ENTRY *le,
63610 +                                  enum ATTR_TYPE type, const __le16 *name,
63611 +                                  u8 name_len, const CLST *vcn)
63613 +       struct ATTR_LIST_ENTRY *ret = NULL;
63614 +       u32 type_in = le32_to_cpu(type);
63616 +       while ((le = al_enumerate(ni, le))) {
63617 +               u64 le_vcn;
63618 +               int diff = le32_to_cpu(le->type) - type_in;
63620 +               /* List entries are sorted by type, name and vcn */
63621 +               if (diff < 0)
63622 +                       continue;
63624 +               if (diff > 0)
63625 +                       return ret;
63627 +               if (le->name_len != name_len)
63628 +                       continue;
63630 +               le_vcn = le64_to_cpu(le->vcn);
63631 +               if (!le_vcn) {
63632 +                       /*
63633 +                        * compare entry names only for entry with vcn == 0
63634 +                        */
63635 +                       diff = ntfs_cmp_names(le_name(le), name_len, name,
63636 +                                             name_len, ni->mi.sbi->upcase,
63637 +                                             true);
63638 +                       if (diff < 0)
63639 +                               continue;
63641 +                       if (diff > 0)
63642 +                               return ret;
63643 +               }
63645 +               if (!vcn)
63646 +                       return le;
63648 +               if (*vcn == le_vcn)
63649 +                       return le;
63651 +               if (*vcn < le_vcn)
63652 +                       return ret;
63654 +               ret = le;
63655 +       }
63657 +       return ret;
63661 + * al_find_le_to_insert
63662 + *
63663 + * finds the first list entry which matches type, name and vcn
63664 + */
63665 +static struct ATTR_LIST_ENTRY *al_find_le_to_insert(struct ntfs_inode *ni,
63666 +                                                   enum ATTR_TYPE type,
63667 +                                                   const __le16 *name,
63668 +                                                   u8 name_len, CLST vcn)
63670 +       struct ATTR_LIST_ENTRY *le = NULL, *prev;
63671 +       u32 type_in = le32_to_cpu(type);
63673 +       /* List entries are sorted by type, name, vcn */
63674 +       while ((le = al_enumerate(ni, prev = le))) {
63675 +               int diff = le32_to_cpu(le->type) - type_in;
63677 +               if (diff < 0)
63678 +                       continue;
63680 +               if (diff > 0)
63681 +                       return le;
63683 +               if (!le->vcn) {
63684 +                       /*
63685 +                        * compare entry names only for entry with vcn == 0
63686 +                        */
63687 +                       diff = ntfs_cmp_names(le_name(le), le->name_len, name,
63688 +                                             name_len, ni->mi.sbi->upcase,
63689 +                                             true);
63690 +                       if (diff < 0)
63691 +                               continue;
63693 +                       if (diff > 0)
63694 +                               return le;
63695 +               }
63697 +               if (le64_to_cpu(le->vcn) >= vcn)
63698 +                       return le;
63699 +       }
63701 +       return prev ? Add2Ptr(prev, le16_to_cpu(prev->size)) : ni->attr_list.le;
63705 + * al_add_le
63706 + *
63707 + * adds an "attribute list entry" to the list.
63708 + */
63709 +int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
63710 +             u8 name_len, CLST svcn, __le16 id, const struct MFT_REF *ref,
63711 +             struct ATTR_LIST_ENTRY **new_le)
63713 +       int err;
63714 +       struct ATTRIB *attr;
63715 +       struct ATTR_LIST_ENTRY *le;
63716 +       size_t off;
63717 +       u16 sz;
63718 +       size_t asize, new_asize;
63719 +       u64 new_size;
63720 +       typeof(ni->attr_list) *al = &ni->attr_list;
63722 +       /*
63723 +        * Compute the size of the new 'le'
63724 +        */
63725 +       sz = le_size(name_len);
63726 +       new_size = al->size + sz;
63727 +       asize = al_aligned(al->size);
63728 +       new_asize = al_aligned(new_size);
63730 +       /* Scan forward to the point at which the new 'le' should be inserted. */
63731 +       le = al_find_le_to_insert(ni, type, name, name_len, svcn);
63732 +       off = PtrOffset(al->le, le);
63734 +       if (new_size > asize) {
63735 +               void *ptr = ntfs_malloc(new_asize);
63737 +               if (!ptr)
63738 +                       return -ENOMEM;
63740 +               memcpy(ptr, al->le, off);
63741 +               memcpy(Add2Ptr(ptr, off + sz), le, al->size - off);
63742 +               le = Add2Ptr(ptr, off);
63743 +               ntfs_free(al->le);
63744 +               al->le = ptr;
63745 +       } else {
63746 +               memmove(Add2Ptr(le, sz), le, al->size - off);
63747 +       }
63749 +       al->size = new_size;
63751 +       le->type = type;
63752 +       le->size = cpu_to_le16(sz);
63753 +       le->name_len = name_len;
63754 +       le->name_off = offsetof(struct ATTR_LIST_ENTRY, name);
63755 +       le->vcn = cpu_to_le64(svcn);
63756 +       le->ref = *ref;
63757 +       le->id = id;
63758 +       memcpy(le->name, name, sizeof(short) * name_len);
63760 +       al->dirty = true;
63762 +       err = attr_set_size(ni, ATTR_LIST, NULL, 0, &al->run, new_size,
63763 +                           &new_size, true, &attr);
63764 +       if (err)
63765 +               return err;
63767 +       if (attr && attr->non_res) {
63768 +               err = ntfs_sb_write_run(ni->mi.sbi, &al->run, 0, al->le,
63769 +                                       al->size);
63770 +               if (err)
63771 +                       return err;
63772 +       }
63774 +       al->dirty = false;
63775 +       *new_le = le;
63777 +       return 0;
63781 + * al_remove_le
63782 + *
63783 + * removes 'le' from attribute list
63784 + */
63785 +bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le)
63787 +       u16 size;
63788 +       size_t off;
63789 +       typeof(ni->attr_list) *al = &ni->attr_list;
63791 +       if (!al_is_valid_le(ni, le))
63792 +               return false;
63794 +       /* Save on stack the size of 'le' */
63795 +       size = le16_to_cpu(le->size);
63796 +       off = PtrOffset(al->le, le);
63798 +       memmove(le, Add2Ptr(le, size), al->size - (off + size));
63800 +       al->size -= size;
63801 +       al->dirty = true;
63803 +       return true;
63807 + * al_delete_le
63808 + *
63809 + * deletes from the list the first 'le' which matches its parameters.
63810 + */
63811 +bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
63812 +                 const __le16 *name, size_t name_len,
63813 +                 const struct MFT_REF *ref)
63815 +       u16 size;
63816 +       struct ATTR_LIST_ENTRY *le;
63817 +       size_t off;
63818 +       typeof(ni->attr_list) *al = &ni->attr_list;
63820 +       /* Scan forward to the first 'le' that matches the input */
63821 +       le = al_find_ex(ni, NULL, type, name, name_len, &vcn);
63822 +       if (!le)
63823 +               return false;
63825 +       off = PtrOffset(al->le, le);
63827 +next:
63828 +       if (off >= al->size)
63829 +               return false;
63830 +       if (le->type != type)
63831 +               return false;
63832 +       if (le->name_len != name_len)
63833 +               return false;
63834 +       if (name_len && ntfs_cmp_names(le_name(le), name_len, name, name_len,
63835 +                                      ni->mi.sbi->upcase, true))
63836 +               return false;
63837 +       if (le64_to_cpu(le->vcn) != vcn)
63838 +               return false;
63840 +       /*
63841 +        * The caller specified a segment reference, so we have to
63842 +        * scan through the matching entries until we find that segment
63843 +        * reference or we run of matching entries.
63844 +        */
63845 +       if (ref && memcmp(ref, &le->ref, sizeof(*ref))) {
63846 +               off += le16_to_cpu(le->size);
63847 +               le = Add2Ptr(al->le, off);
63848 +               goto next;
63849 +       }
63851 +       /* Save on stack the size of 'le' */
63852 +       size = le16_to_cpu(le->size);
63853 +       /* Delete 'le'. */
63854 +       memmove(le, Add2Ptr(le, size), al->size - (off + size));
63856 +       al->size -= size;
63857 +       al->dirty = true;
63859 +       return true;
63863 + * al_update
63864 + */
63865 +int al_update(struct ntfs_inode *ni)
63867 +       int err;
63868 +       struct ATTRIB *attr;
63869 +       typeof(ni->attr_list) *al = &ni->attr_list;
63871 +       if (!al->dirty || !al->size)
63872 +               return 0;
63874 +       /*
63875 +        * attribute list increased on demand in al_add_le
63876 +        * attribute list decreased here
63877 +        */
63878 +       err = attr_set_size(ni, ATTR_LIST, NULL, 0, &al->run, al->size, NULL,
63879 +                           false, &attr);
63880 +       if (err)
63881 +               goto out;
63883 +       if (!attr->non_res) {
63884 +               memcpy(resident_data(attr), al->le, al->size);
63885 +       } else {
63886 +               err = ntfs_sb_write_run(ni->mi.sbi, &al->run, 0, al->le,
63887 +                                       al->size);
63888 +               if (err)
63889 +                       goto out;
63891 +               attr->nres.valid_size = attr->nres.data_size;
63892 +       }
63894 +       ni->mi.dirty = true;
63895 +       al->dirty = false;
63897 +out:
63898 +       return err;
63900 diff --git a/fs/ntfs3/bitfunc.c b/fs/ntfs3/bitfunc.c
63901 new file mode 100644
63902 index 000000000000..2de5faef2721
63903 --- /dev/null
63904 +++ b/fs/ntfs3/bitfunc.c
63905 @@ -0,0 +1,135 @@
63906 +// SPDX-License-Identifier: GPL-2.0
63908 + *
63909 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
63910 + *
63911 + */
63912 +#include <linux/blkdev.h>
63913 +#include <linux/buffer_head.h>
63914 +#include <linux/fs.h>
63915 +#include <linux/nls.h>
63917 +#include "debug.h"
63918 +#include "ntfs.h"
63919 +#include "ntfs_fs.h"
63921 +#define BITS_IN_SIZE_T (sizeof(size_t) * 8)
63924 + * fill_mask[i] - first i bits are '1' , i = 0,1,2,3,4,5,6,7,8
63925 + * fill_mask[i] = 0xFF >> (8-i)
63926 + */
63927 +static const u8 fill_mask[] = { 0x00, 0x01, 0x03, 0x07, 0x0F,
63928 +                               0x1F, 0x3F, 0x7F, 0xFF };
63931 + * zero_mask[i] - first i bits are '0' , i = 0,1,2,3,4,5,6,7,8
63932 + * zero_mask[i] = 0xFF << i
63933 + */
63934 +static const u8 zero_mask[] = { 0xFF, 0xFE, 0xFC, 0xF8, 0xF0,
63935 +                               0xE0, 0xC0, 0x80, 0x00 };
63938 + * are_bits_clear
63939 + *
63940 + * Returns true if all bits [bit, bit+nbits) are zeros "0"
63941 + */
63942 +bool are_bits_clear(const ulong *lmap, size_t bit, size_t nbits)
63944 +       size_t pos = bit & 7;
63945 +       const u8 *map = (u8 *)lmap + (bit >> 3);
63947 +       if (pos) {
63948 +               if (8 - pos >= nbits)
63949 +                       return !nbits || !(*map & fill_mask[pos + nbits] &
63950 +                                          zero_mask[pos]);
63952 +               if (*map++ & zero_mask[pos])
63953 +                       return false;
63954 +               nbits -= 8 - pos;
63955 +       }
63957 +       pos = ((size_t)map) & (sizeof(size_t) - 1);
63958 +       if (pos) {
63959 +               pos = sizeof(size_t) - pos;
63960 +               if (nbits >= pos * 8) {
63961 +                       for (nbits -= pos * 8; pos; pos--, map++) {
63962 +                               if (*map)
63963 +                                       return false;
63964 +                       }
63965 +               }
63966 +       }
63968 +       for (pos = nbits / BITS_IN_SIZE_T; pos; pos--, map += sizeof(size_t)) {
63969 +               if (*((size_t *)map))
63970 +                       return false;
63971 +       }
63973 +       for (pos = (nbits % BITS_IN_SIZE_T) >> 3; pos; pos--, map++) {
63974 +               if (*map)
63975 +                       return false;
63976 +       }
63978 +       pos = nbits & 7;
63979 +       if (pos && (*map & fill_mask[pos]))
63980 +               return false;
63982 +       // All bits are zero
63983 +       return true;
63987 + * are_bits_set
63988 + *
63989 + * Returns true if all bits [bit, bit+nbits) are ones "1"
63990 + */
63991 +bool are_bits_set(const ulong *lmap, size_t bit, size_t nbits)
63993 +       u8 mask;
63994 +       size_t pos = bit & 7;
63995 +       const u8 *map = (u8 *)lmap + (bit >> 3);
63997 +       if (pos) {
63998 +               if (8 - pos >= nbits) {
63999 +                       mask = fill_mask[pos + nbits] & zero_mask[pos];
64000 +                       return !nbits || (*map & mask) == mask;
64001 +               }
64003 +               mask = zero_mask[pos];
64004 +               if ((*map++ & mask) != mask)
64005 +                       return false;
64006 +               nbits -= 8 - pos;
64007 +       }
64009 +       pos = ((size_t)map) & (sizeof(size_t) - 1);
64010 +       if (pos) {
64011 +               pos = sizeof(size_t) - pos;
64012 +               if (nbits >= pos * 8) {
64013 +                       for (nbits -= pos * 8; pos; pos--, map++) {
64014 +                               if (*map != 0xFF)
64015 +                                       return false;
64016 +                       }
64017 +               }
64018 +       }
64020 +       for (pos = nbits / BITS_IN_SIZE_T; pos; pos--, map += sizeof(size_t)) {
64021 +               if (*((size_t *)map) != MINUS_ONE_T)
64022 +                       return false;
64023 +       }
64025 +       for (pos = (nbits % BITS_IN_SIZE_T) >> 3; pos; pos--, map++) {
64026 +               if (*map != 0xFF)
64027 +                       return false;
64028 +       }
64030 +       pos = nbits & 7;
64031 +       if (pos) {
64032 +               u8 mask = fill_mask[pos];
64034 +               if ((*map & mask) != mask)
64035 +                       return false;
64036 +       }
64038 +       // All bits are ones
64039 +       return true;
64041 diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
64042 new file mode 100644
64043 index 000000000000..32aab0031221
64044 --- /dev/null
64045 +++ b/fs/ntfs3/bitmap.c
64046 @@ -0,0 +1,1519 @@
64047 +// SPDX-License-Identifier: GPL-2.0
64049 + *
64050 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
64051 + *
64052 + * This code builds two trees of free clusters extents.
64053 + * Trees are sorted by start of extent and by length of extent.
64054 + * NTFS_MAX_WND_EXTENTS defines the maximum number of elements in trees.
64055 + * In extreme case code reads on-disk bitmap to find free clusters
64056 + *
64057 + */
64059 +#include <linux/blkdev.h>
64060 +#include <linux/buffer_head.h>
64061 +#include <linux/fs.h>
64062 +#include <linux/nls.h>
64064 +#include "debug.h"
64065 +#include "ntfs.h"
64066 +#include "ntfs_fs.h"
64069 + * Maximum number of extents in tree.
64070 + */
64071 +#define NTFS_MAX_WND_EXTENTS (32u * 1024u)
64073 +struct rb_node_key {
64074 +       struct rb_node node;
64075 +       size_t key;
64079 + * Tree is sorted by start (key)
64080 + */
64081 +struct e_node {
64082 +       struct rb_node_key start; /* Tree sorted by start */
64083 +       struct rb_node_key count; /* Tree sorted by len*/
64086 +static int wnd_rescan(struct wnd_bitmap *wnd);
64087 +static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw);
64088 +static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits);
64090 +static struct kmem_cache *ntfs_enode_cachep;
64092 +int __init ntfs3_init_bitmap(void)
64094 +       ntfs_enode_cachep =
64095 +               kmem_cache_create("ntfs3_enode_cache", sizeof(struct e_node), 0,
64096 +                                 SLAB_RECLAIM_ACCOUNT, NULL);
64097 +       return ntfs_enode_cachep ? 0 : -ENOMEM;
64100 +void ntfs3_exit_bitmap(void)
64102 +       kmem_cache_destroy(ntfs_enode_cachep);
64105 +static inline u32 wnd_bits(const struct wnd_bitmap *wnd, size_t i)
64107 +       return i + 1 == wnd->nwnd ? wnd->bits_last : wnd->sb->s_blocksize * 8;
64111 + * b_pos + b_len - biggest fragment
64112 + * Scan range [wpos wbits) window 'buf'
64113 + * Returns -1 if not found
64114 + */
64115 +static size_t wnd_scan(const ulong *buf, size_t wbit, u32 wpos, u32 wend,
64116 +                      size_t to_alloc, size_t *prev_tail, size_t *b_pos,
64117 +                      size_t *b_len)
64119 +       while (wpos < wend) {
64120 +               size_t free_len;
64121 +               u32 free_bits, end;
64122 +               u32 used = find_next_zero_bit(buf, wend, wpos);
64124 +               if (used >= wend) {
64125 +                       if (*b_len < *prev_tail) {
64126 +                               *b_pos = wbit - *prev_tail;
64127 +                               *b_len = *prev_tail;
64128 +                       }
64130 +                       *prev_tail = 0;
64131 +                       return -1;
64132 +               }
64134 +               if (used > wpos) {
64135 +                       wpos = used;
64136 +                       if (*b_len < *prev_tail) {
64137 +                               *b_pos = wbit - *prev_tail;
64138 +                               *b_len = *prev_tail;
64139 +                       }
64141 +                       *prev_tail = 0;
64142 +               }
64144 +               /*
64145 +                * Now we have a fragment [wpos, wend) staring with 0
64146 +                */
64147 +               end = wpos + to_alloc - *prev_tail;
64148 +               free_bits = find_next_bit(buf, min(end, wend), wpos);
64150 +               free_len = *prev_tail + free_bits - wpos;
64152 +               if (*b_len < free_len) {
64153 +                       *b_pos = wbit + wpos - *prev_tail;
64154 +                       *b_len = free_len;
64155 +               }
64157 +               if (free_len >= to_alloc)
64158 +                       return wbit + wpos - *prev_tail;
64160 +               if (free_bits >= wend) {
64161 +                       *prev_tail += free_bits - wpos;
64162 +                       return -1;
64163 +               }
64165 +               wpos = free_bits + 1;
64167 +               *prev_tail = 0;
64168 +       }
64170 +       return -1;
64174 + * wnd_close
64175 + *
64176 + * Frees all resources
64177 + */
64178 +void wnd_close(struct wnd_bitmap *wnd)
64180 +       struct rb_node *node, *next;
64182 +       ntfs_free(wnd->free_bits);
64183 +       run_close(&wnd->run);
64185 +       node = rb_first(&wnd->start_tree);
64187 +       while (node) {
64188 +               next = rb_next(node);
64189 +               rb_erase(node, &wnd->start_tree);
64190 +               kmem_cache_free(ntfs_enode_cachep,
64191 +                               rb_entry(node, struct e_node, start.node));
64192 +               node = next;
64193 +       }
64196 +static struct rb_node *rb_lookup(struct rb_root *root, size_t v)
64198 +       struct rb_node **p = &root->rb_node;
64199 +       struct rb_node *r = NULL;
64201 +       while (*p) {
64202 +               struct rb_node_key *k;
64204 +               k = rb_entry(*p, struct rb_node_key, node);
64205 +               if (v < k->key) {
64206 +                       p = &(*p)->rb_left;
64207 +               } else if (v > k->key) {
64208 +                       r = &k->node;
64209 +                       p = &(*p)->rb_right;
64210 +               } else {
64211 +                       return &k->node;
64212 +               }
64213 +       }
64215 +       return r;
64219 + * rb_insert_count
64220 + *
64221 + * Helper function to insert special kind of 'count' tree
64222 + */
64223 +static inline bool rb_insert_count(struct rb_root *root, struct e_node *e)
64225 +       struct rb_node **p = &root->rb_node;
64226 +       struct rb_node *parent = NULL;
64227 +       size_t e_ckey = e->count.key;
64228 +       size_t e_skey = e->start.key;
64230 +       while (*p) {
64231 +               struct e_node *k =
64232 +                       rb_entry(parent = *p, struct e_node, count.node);
64234 +               if (e_ckey > k->count.key) {
64235 +                       p = &(*p)->rb_left;
64236 +               } else if (e_ckey < k->count.key) {
64237 +                       p = &(*p)->rb_right;
64238 +               } else if (e_skey < k->start.key) {
64239 +                       p = &(*p)->rb_left;
64240 +               } else if (e_skey > k->start.key) {
64241 +                       p = &(*p)->rb_right;
64242 +               } else {
64243 +                       WARN_ON(1);
64244 +                       return false;
64245 +               }
64246 +       }
64248 +       rb_link_node(&e->count.node, parent, p);
64249 +       rb_insert_color(&e->count.node, root);
64250 +       return true;
64254 + * inline bool rb_insert_start
64255 + *
64256 + * Helper function to insert special kind of 'start' tree
64257 + */
64258 +static inline bool rb_insert_start(struct rb_root *root, struct e_node *e)
64260 +       struct rb_node **p = &root->rb_node;
64261 +       struct rb_node *parent = NULL;
64262 +       size_t e_skey = e->start.key;
64264 +       while (*p) {
64265 +               struct e_node *k;
64267 +               parent = *p;
64269 +               k = rb_entry(parent, struct e_node, start.node);
64270 +               if (e_skey < k->start.key) {
64271 +                       p = &(*p)->rb_left;
64272 +               } else if (e_skey > k->start.key) {
64273 +                       p = &(*p)->rb_right;
64274 +               } else {
64275 +                       WARN_ON(1);
64276 +                       return false;
64277 +               }
64278 +       }
64280 +       rb_link_node(&e->start.node, parent, p);
64281 +       rb_insert_color(&e->start.node, root);
64282 +       return true;
64286 + * wnd_add_free_ext
64287 + *
64288 + * adds a new extent of free space
64289 + * build = 1 when building tree
64290 + */
64291 +static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
64292 +                            bool build)
64294 +       struct e_node *e, *e0 = NULL;
64295 +       size_t ib, end_in = bit + len;
64296 +       struct rb_node *n;
64298 +       if (build) {
64299 +               /* Use extent_min to filter too short extents */
64300 +               if (wnd->count >= NTFS_MAX_WND_EXTENTS &&
64301 +                   len <= wnd->extent_min) {
64302 +                       wnd->uptodated = -1;
64303 +                       return;
64304 +               }
64305 +       } else {
64306 +               /* Try to find extent before 'bit' */
64307 +               n = rb_lookup(&wnd->start_tree, bit);
64309 +               if (!n) {
64310 +                       n = rb_first(&wnd->start_tree);
64311 +               } else {
64312 +                       e = rb_entry(n, struct e_node, start.node);
64313 +                       n = rb_next(n);
64314 +                       if (e->start.key + e->count.key == bit) {
64315 +                               /* Remove left */
64316 +                               bit = e->start.key;
64317 +                               len += e->count.key;
64318 +                               rb_erase(&e->start.node, &wnd->start_tree);
64319 +                               rb_erase(&e->count.node, &wnd->count_tree);
64320 +                               wnd->count -= 1;
64321 +                               e0 = e;
64322 +                       }
64323 +               }
64325 +               while (n) {
64326 +                       size_t next_end;
64328 +                       e = rb_entry(n, struct e_node, start.node);
64329 +                       next_end = e->start.key + e->count.key;
64330 +                       if (e->start.key > end_in)
64331 +                               break;
64333 +                       /* Remove right */
64334 +                       n = rb_next(n);
64335 +                       len += next_end - end_in;
64336 +                       end_in = next_end;
64337 +                       rb_erase(&e->start.node, &wnd->start_tree);
64338 +                       rb_erase(&e->count.node, &wnd->count_tree);
64339 +                       wnd->count -= 1;
64341 +                       if (!e0)
64342 +                               e0 = e;
64343 +                       else
64344 +                               kmem_cache_free(ntfs_enode_cachep, e);
64345 +               }
64347 +               if (wnd->uptodated != 1) {
64348 +                       /* Check bits before 'bit' */
64349 +                       ib = wnd->zone_bit == wnd->zone_end ||
64350 +                                            bit < wnd->zone_end
64351 +                                    ? 0
64352 +                                    : wnd->zone_end;
64354 +                       while (bit > ib && wnd_is_free_hlp(wnd, bit - 1, 1)) {
64355 +                               bit -= 1;
64356 +                               len += 1;
64357 +                       }
64359 +                       /* Check bits after 'end_in' */
64360 +                       ib = wnd->zone_bit == wnd->zone_end ||
64361 +                                            end_in > wnd->zone_bit
64362 +                                    ? wnd->nbits
64363 +                                    : wnd->zone_bit;
64365 +                       while (end_in < ib && wnd_is_free_hlp(wnd, end_in, 1)) {
64366 +                               end_in += 1;
64367 +                               len += 1;
64368 +                       }
64369 +               }
64370 +       }
64371 +       /* Insert new fragment */
64372 +       if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
64373 +               if (e0)
64374 +                       kmem_cache_free(ntfs_enode_cachep, e0);
64376 +               wnd->uptodated = -1;
64378 +               /* Compare with smallest fragment */
64379 +               n = rb_last(&wnd->count_tree);
64380 +               e = rb_entry(n, struct e_node, count.node);
64381 +               if (len <= e->count.key)
64382 +                       goto out; /* Do not insert small fragments */
64384 +               if (build) {
64385 +                       struct e_node *e2;
64387 +                       n = rb_prev(n);
64388 +                       e2 = rb_entry(n, struct e_node, count.node);
64389 +                       /* smallest fragment will be 'e2->count.key' */
64390 +                       wnd->extent_min = e2->count.key;
64391 +               }
64393 +               /* Replace smallest fragment by new one */
64394 +               rb_erase(&e->start.node, &wnd->start_tree);
64395 +               rb_erase(&e->count.node, &wnd->count_tree);
64396 +               wnd->count -= 1;
64397 +       } else {
64398 +               e = e0 ? e0 : kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC);
64399 +               if (!e) {
64400 +                       wnd->uptodated = -1;
64401 +                       goto out;
64402 +               }
64404 +               if (build && len <= wnd->extent_min)
64405 +                       wnd->extent_min = len;
64406 +       }
64407 +       e->start.key = bit;
64408 +       e->count.key = len;
64409 +       if (len > wnd->extent_max)
64410 +               wnd->extent_max = len;
64412 +       rb_insert_start(&wnd->start_tree, e);
64413 +       rb_insert_count(&wnd->count_tree, e);
64414 +       wnd->count += 1;
64416 +out:;
64420 + * wnd_remove_free_ext
64421 + *
64422 + * removes a run from the cached free space
64423 + */
64424 +static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
64426 +       struct rb_node *n, *n3;
64427 +       struct e_node *e, *e3;
64428 +       size_t end_in = bit + len;
64429 +       size_t end3, end, new_key, new_len, max_new_len;
64431 +       /* Try to find extent before 'bit' */
64432 +       n = rb_lookup(&wnd->start_tree, bit);
64434 +       if (!n)
64435 +               return;
64437 +       e = rb_entry(n, struct e_node, start.node);
64438 +       end = e->start.key + e->count.key;
64440 +       new_key = new_len = 0;
64441 +       len = e->count.key;
64443 +       /* Range [bit,end_in) must be inside 'e' or outside 'e' and 'n' */
64444 +       if (e->start.key > bit)
64445 +               ;
64446 +       else if (end_in <= end) {
64447 +               /* Range [bit,end_in) inside 'e' */
64448 +               new_key = end_in;
64449 +               new_len = end - end_in;
64450 +               len = bit - e->start.key;
64451 +       } else if (bit > end) {
64452 +               bool bmax = false;
64454 +               n3 = rb_next(n);
64456 +               while (n3) {
64457 +                       e3 = rb_entry(n3, struct e_node, start.node);
64458 +                       if (e3->start.key >= end_in)
64459 +                               break;
64461 +                       if (e3->count.key == wnd->extent_max)
64462 +                               bmax = true;
64464 +                       end3 = e3->start.key + e3->count.key;
64465 +                       if (end3 > end_in) {
64466 +                               e3->start.key = end_in;
64467 +                               rb_erase(&e3->count.node, &wnd->count_tree);
64468 +                               e3->count.key = end3 - end_in;
64469 +                               rb_insert_count(&wnd->count_tree, e3);
64470 +                               break;
64471 +                       }
64473 +                       n3 = rb_next(n3);
64474 +                       rb_erase(&e3->start.node, &wnd->start_tree);
64475 +                       rb_erase(&e3->count.node, &wnd->count_tree);
64476 +                       wnd->count -= 1;
64477 +                       kmem_cache_free(ntfs_enode_cachep, e3);
64478 +               }
64479 +               if (!bmax)
64480 +                       return;
64481 +               n3 = rb_first(&wnd->count_tree);
64482 +               wnd->extent_max =
64483 +                       n3 ? rb_entry(n3, struct e_node, count.node)->count.key
64484 +                          : 0;
64485 +               return;
64486 +       }
64488 +       if (e->count.key != wnd->extent_max) {
64489 +               ;
64490 +       } else if (rb_prev(&e->count.node)) {
64491 +               ;
64492 +       } else {
64493 +               n3 = rb_next(&e->count.node);
64494 +               max_new_len = len > new_len ? len : new_len;
64495 +               if (!n3) {
64496 +                       wnd->extent_max = max_new_len;
64497 +               } else {
64498 +                       e3 = rb_entry(n3, struct e_node, count.node);
64499 +                       wnd->extent_max = max(e3->count.key, max_new_len);
64500 +               }
64501 +       }
64503 +       if (!len) {
64504 +               if (new_len) {
64505 +                       e->start.key = new_key;
64506 +                       rb_erase(&e->count.node, &wnd->count_tree);
64507 +                       e->count.key = new_len;
64508 +                       rb_insert_count(&wnd->count_tree, e);
64509 +               } else {
64510 +                       rb_erase(&e->start.node, &wnd->start_tree);
64511 +                       rb_erase(&e->count.node, &wnd->count_tree);
64512 +                       wnd->count -= 1;
64513 +                       kmem_cache_free(ntfs_enode_cachep, e);
64514 +               }
64515 +               goto out;
64516 +       }
64517 +       rb_erase(&e->count.node, &wnd->count_tree);
64518 +       e->count.key = len;
64519 +       rb_insert_count(&wnd->count_tree, e);
64521 +       if (!new_len)
64522 +               goto out;
64524 +       if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
64525 +               wnd->uptodated = -1;
64527 +               /* Get minimal extent */
64528 +               e = rb_entry(rb_last(&wnd->count_tree), struct e_node,
64529 +                            count.node);
64530 +               if (e->count.key > new_len)
64531 +                       goto out;
64533 +               /* Replace minimum */
64534 +               rb_erase(&e->start.node, &wnd->start_tree);
64535 +               rb_erase(&e->count.node, &wnd->count_tree);
64536 +               wnd->count -= 1;
64537 +       } else {
64538 +               e = kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC);
64539 +               if (!e)
64540 +                       wnd->uptodated = -1;
64541 +       }
64543 +       if (e) {
64544 +               e->start.key = new_key;
64545 +               e->count.key = new_len;
64546 +               rb_insert_start(&wnd->start_tree, e);
64547 +               rb_insert_count(&wnd->count_tree, e);
64548 +               wnd->count += 1;
64549 +       }
64551 +out:
64552 +       if (!wnd->count && 1 != wnd->uptodated)
64553 +               wnd_rescan(wnd);
64557 + * wnd_rescan
64558 + *
64559 + * Scan all bitmap. used while initialization.
64560 + */
64561 +static int wnd_rescan(struct wnd_bitmap *wnd)
64563 +       int err = 0;
64564 +       size_t prev_tail = 0;
64565 +       struct super_block *sb = wnd->sb;
64566 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
64567 +       u64 lbo, len = 0;
64568 +       u32 blocksize = sb->s_blocksize;
64569 +       u8 cluster_bits = sbi->cluster_bits;
64570 +       u32 wbits = 8 * sb->s_blocksize;
64571 +       u32 used, frb;
64572 +       const ulong *buf;
64573 +       size_t wpos, wbit, iw, vbo;
64574 +       struct buffer_head *bh = NULL;
64575 +       CLST lcn, clen;
64577 +       wnd->uptodated = 0;
64578 +       wnd->extent_max = 0;
64579 +       wnd->extent_min = MINUS_ONE_T;
64580 +       wnd->total_zeroes = 0;
64582 +       vbo = 0;
64584 +       for (iw = 0; iw < wnd->nwnd; iw++) {
64585 +               if (iw + 1 == wnd->nwnd)
64586 +                       wbits = wnd->bits_last;
64588 +               if (wnd->inited) {
64589 +                       if (!wnd->free_bits[iw]) {
64590 +                               /* all ones */
64591 +                               if (prev_tail) {
64592 +                                       wnd_add_free_ext(wnd,
64593 +                                                        vbo * 8 - prev_tail,
64594 +                                                        prev_tail, true);
64595 +                                       prev_tail = 0;
64596 +                               }
64597 +                               goto next_wnd;
64598 +                       }
64599 +                       if (wbits == wnd->free_bits[iw]) {
64600 +                               /* all zeroes */
64601 +                               prev_tail += wbits;
64602 +                               wnd->total_zeroes += wbits;
64603 +                               goto next_wnd;
64604 +                       }
64605 +               }
64607 +               if (!len) {
64608 +                       u32 off = vbo & sbi->cluster_mask;
64610 +                       if (!run_lookup_entry(&wnd->run, vbo >> cluster_bits,
64611 +                                             &lcn, &clen, NULL)) {
64612 +                               err = -ENOENT;
64613 +                               goto out;
64614 +                       }
64616 +                       lbo = ((u64)lcn << cluster_bits) + off;
64617 +                       len = ((u64)clen << cluster_bits) - off;
64618 +               }
64620 +               bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
64621 +               if (!bh) {
64622 +                       err = -EIO;
64623 +                       goto out;
64624 +               }
64626 +               buf = (ulong *)bh->b_data;
64628 +               used = __bitmap_weight(buf, wbits);
64629 +               if (used < wbits) {
64630 +                       frb = wbits - used;
64631 +                       wnd->free_bits[iw] = frb;
64632 +                       wnd->total_zeroes += frb;
64633 +               }
64635 +               wpos = 0;
64636 +               wbit = vbo * 8;
64638 +               if (wbit + wbits > wnd->nbits)
64639 +                       wbits = wnd->nbits - wbit;
64641 +               do {
64642 +                       used = find_next_zero_bit(buf, wbits, wpos);
64644 +                       if (used > wpos && prev_tail) {
64645 +                               wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
64646 +                                                prev_tail, true);
64647 +                               prev_tail = 0;
64648 +                       }
64650 +                       wpos = used;
64652 +                       if (wpos >= wbits) {
64653 +                               /* No free blocks */
64654 +                               prev_tail = 0;
64655 +                               break;
64656 +                       }
64658 +                       frb = find_next_bit(buf, wbits, wpos);
64659 +                       if (frb >= wbits) {
64660 +                               /* keep last free block */
64661 +                               prev_tail += frb - wpos;
64662 +                               break;
64663 +                       }
64665 +                       wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
64666 +                                        frb + prev_tail - wpos, true);
64668 +                       /* Skip free block and first '1' */
64669 +                       wpos = frb + 1;
64670 +                       /* Reset previous tail */
64671 +                       prev_tail = 0;
64672 +               } while (wpos < wbits);
64674 +next_wnd:
64676 +               if (bh)
64677 +                       put_bh(bh);
64678 +               bh = NULL;
64680 +               vbo += blocksize;
64681 +               if (len) {
64682 +                       len -= blocksize;
64683 +                       lbo += blocksize;
64684 +               }
64685 +       }
64687 +       /* Add last block */
64688 +       if (prev_tail)
64689 +               wnd_add_free_ext(wnd, wnd->nbits - prev_tail, prev_tail, true);
64691 +       /*
64692 +        * Before init cycle wnd->uptodated was 0
64693 +        * If any errors or limits occurs while initialization then
64694 +        * wnd->uptodated will be -1
64695 +        * If 'uptodated' is still 0 then Tree is really updated
64696 +        */
64697 +       if (!wnd->uptodated)
64698 +               wnd->uptodated = 1;
64700 +       if (wnd->zone_bit != wnd->zone_end) {
64701 +               size_t zlen = wnd->zone_end - wnd->zone_bit;
64703 +               wnd->zone_end = wnd->zone_bit;
64704 +               wnd_zone_set(wnd, wnd->zone_bit, zlen);
64705 +       }
64707 +out:
64708 +       return err;
64712 + * wnd_init
64713 + */
64714 +int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
64716 +       int err;
64717 +       u32 blocksize = sb->s_blocksize;
64718 +       u32 wbits = blocksize * 8;
64720 +       init_rwsem(&wnd->rw_lock);
64722 +       wnd->sb = sb;
64723 +       wnd->nbits = nbits;
64724 +       wnd->total_zeroes = nbits;
64725 +       wnd->extent_max = MINUS_ONE_T;
64726 +       wnd->zone_bit = wnd->zone_end = 0;
64727 +       wnd->nwnd = bytes_to_block(sb, bitmap_size(nbits));
64728 +       wnd->bits_last = nbits & (wbits - 1);
64729 +       if (!wnd->bits_last)
64730 +               wnd->bits_last = wbits;
64732 +       wnd->free_bits = ntfs_zalloc(wnd->nwnd * sizeof(u16));
64733 +       if (!wnd->free_bits)
64734 +               return -ENOMEM;
64736 +       err = wnd_rescan(wnd);
64737 +       if (err)
64738 +               return err;
64740 +       wnd->inited = true;
64742 +       return 0;
64746 + * wnd_map
64747 + *
64748 + * call sb_bread for requested window
64749 + */
64750 +static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw)
64752 +       size_t vbo;
64753 +       CLST lcn, clen;
64754 +       struct super_block *sb = wnd->sb;
64755 +       struct ntfs_sb_info *sbi;
64756 +       struct buffer_head *bh;
64757 +       u64 lbo;
64759 +       sbi = sb->s_fs_info;
64760 +       vbo = (u64)iw << sb->s_blocksize_bits;
64762 +       if (!run_lookup_entry(&wnd->run, vbo >> sbi->cluster_bits, &lcn, &clen,
64763 +                             NULL)) {
64764 +               return ERR_PTR(-ENOENT);
64765 +       }
64767 +       lbo = ((u64)lcn << sbi->cluster_bits) + (vbo & sbi->cluster_mask);
64769 +       bh = ntfs_bread(wnd->sb, lbo >> sb->s_blocksize_bits);
64770 +       if (!bh)
64771 +               return ERR_PTR(-EIO);
64773 +       return bh;
64777 + * wnd_set_free
64778 + *
64779 + * Marks the bits range from bit to bit + bits as free
64780 + */
64781 +int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
64783 +       int err = 0;
64784 +       struct super_block *sb = wnd->sb;
64785 +       size_t bits0 = bits;
64786 +       u32 wbits = 8 * sb->s_blocksize;
64787 +       size_t iw = bit >> (sb->s_blocksize_bits + 3);
64788 +       u32 wbit = bit & (wbits - 1);
64789 +       struct buffer_head *bh;
64791 +       while (iw < wnd->nwnd && bits) {
64792 +               u32 tail, op;
64793 +               ulong *buf;
64795 +               if (iw + 1 == wnd->nwnd)
64796 +                       wbits = wnd->bits_last;
64798 +               tail = wbits - wbit;
64799 +               op = tail < bits ? tail : bits;
64801 +               bh = wnd_map(wnd, iw);
64802 +               if (IS_ERR(bh)) {
64803 +                       err = PTR_ERR(bh);
64804 +                       break;
64805 +               }
64807 +               buf = (ulong *)bh->b_data;
64809 +               lock_buffer(bh);
64811 +               __bitmap_clear(buf, wbit, op);
64813 +               wnd->free_bits[iw] += op;
64815 +               set_buffer_uptodate(bh);
64816 +               mark_buffer_dirty(bh);
64817 +               unlock_buffer(bh);
64818 +               put_bh(bh);
64820 +               wnd->total_zeroes += op;
64821 +               bits -= op;
64822 +               wbit = 0;
64823 +               iw += 1;
64824 +       }
64826 +       wnd_add_free_ext(wnd, bit, bits0, false);
64828 +       return err;
64832 + * wnd_set_used
64833 + *
64834 + * Marks the bits range from bit to bit + bits as used
64835 + */
64836 +int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
64838 +       int err = 0;
64839 +       struct super_block *sb = wnd->sb;
64840 +       size_t bits0 = bits;
64841 +       size_t iw = bit >> (sb->s_blocksize_bits + 3);
64842 +       u32 wbits = 8 * sb->s_blocksize;
64843 +       u32 wbit = bit & (wbits - 1);
64844 +       struct buffer_head *bh;
64846 +       while (iw < wnd->nwnd && bits) {
64847 +               u32 tail, op;
64848 +               ulong *buf;
64850 +               if (unlikely(iw + 1 == wnd->nwnd))
64851 +                       wbits = wnd->bits_last;
64853 +               tail = wbits - wbit;
64854 +               op = tail < bits ? tail : bits;
64856 +               bh = wnd_map(wnd, iw);
64857 +               if (IS_ERR(bh)) {
64858 +                       err = PTR_ERR(bh);
64859 +                       break;
64860 +               }
64861 +               buf = (ulong *)bh->b_data;
64863 +               lock_buffer(bh);
64865 +               __bitmap_set(buf, wbit, op);
64866 +               wnd->free_bits[iw] -= op;
64868 +               set_buffer_uptodate(bh);
64869 +               mark_buffer_dirty(bh);
64870 +               unlock_buffer(bh);
64871 +               put_bh(bh);
64873 +               wnd->total_zeroes -= op;
64874 +               bits -= op;
64875 +               wbit = 0;
64876 +               iw += 1;
64877 +       }
64879 +       if (!RB_EMPTY_ROOT(&wnd->start_tree))
64880 +               wnd_remove_free_ext(wnd, bit, bits0);
64882 +       return err;
64886 + * wnd_is_free_hlp
64887 + *
64888 + * Returns true if all clusters [bit, bit+bits) are free (bitmap only)
64889 + */
64890 +static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits)
64892 +       struct super_block *sb = wnd->sb;
64893 +       size_t iw = bit >> (sb->s_blocksize_bits + 3);
64894 +       u32 wbits = 8 * sb->s_blocksize;
64895 +       u32 wbit = bit & (wbits - 1);
64897 +       while (iw < wnd->nwnd && bits) {
64898 +               u32 tail, op;
64900 +               if (unlikely(iw + 1 == wnd->nwnd))
64901 +                       wbits = wnd->bits_last;
64903 +               tail = wbits - wbit;
64904 +               op = tail < bits ? tail : bits;
64906 +               if (wbits != wnd->free_bits[iw]) {
64907 +                       bool ret;
64908 +                       struct buffer_head *bh = wnd_map(wnd, iw);
64910 +                       if (IS_ERR(bh))
64911 +                               return false;
64913 +                       ret = are_bits_clear((ulong *)bh->b_data, wbit, op);
64915 +                       put_bh(bh);
64916 +                       if (!ret)
64917 +                               return false;
64918 +               }
64920 +               bits -= op;
64921 +               wbit = 0;
64922 +               iw += 1;
64923 +       }
64925 +       return true;
64929 + * wnd_is_free
64930 + *
64931 + * Returns true if all clusters [bit, bit+bits) are free
64932 + */
64933 +bool wnd_is_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
64935 +       bool ret;
64936 +       struct rb_node *n;
64937 +       size_t end;
64938 +       struct e_node *e;
64940 +       if (RB_EMPTY_ROOT(&wnd->start_tree))
64941 +               goto use_wnd;
64943 +       n = rb_lookup(&wnd->start_tree, bit);
64944 +       if (!n)
64945 +               goto use_wnd;
64947 +       e = rb_entry(n, struct e_node, start.node);
64949 +       end = e->start.key + e->count.key;
64951 +       if (bit < end && bit + bits <= end)
64952 +               return true;
64954 +use_wnd:
64955 +       ret = wnd_is_free_hlp(wnd, bit, bits);
64957 +       return ret;
64961 + * wnd_is_used
64962 + *
64963 + * Returns true if all clusters [bit, bit+bits) are used
64964 + */
64965 +bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
64967 +       bool ret = false;
64968 +       struct super_block *sb = wnd->sb;
64969 +       size_t iw = bit >> (sb->s_blocksize_bits + 3);
64970 +       u32 wbits = 8 * sb->s_blocksize;
64971 +       u32 wbit = bit & (wbits - 1);
64972 +       size_t end;
64973 +       struct rb_node *n;
64974 +       struct e_node *e;
64976 +       if (RB_EMPTY_ROOT(&wnd->start_tree))
64977 +               goto use_wnd;
64979 +       end = bit + bits;
64980 +       n = rb_lookup(&wnd->start_tree, end - 1);
64981 +       if (!n)
64982 +               goto use_wnd;
64984 +       e = rb_entry(n, struct e_node, start.node);
64985 +       if (e->start.key + e->count.key > bit)
64986 +               return false;
64988 +use_wnd:
64989 +       while (iw < wnd->nwnd && bits) {
64990 +               u32 tail, op;
64992 +               if (unlikely(iw + 1 == wnd->nwnd))
64993 +                       wbits = wnd->bits_last;
64995 +               tail = wbits - wbit;
64996 +               op = tail < bits ? tail : bits;
64998 +               if (wnd->free_bits[iw]) {
64999 +                       bool ret;
65000 +                       struct buffer_head *bh = wnd_map(wnd, iw);
65002 +                       if (IS_ERR(bh))
65003 +                               goto out;
65005 +                       ret = are_bits_set((ulong *)bh->b_data, wbit, op);
65006 +                       put_bh(bh);
65007 +                       if (!ret)
65008 +                               goto out;
65009 +               }
65011 +               bits -= op;
65012 +               wbit = 0;
65013 +               iw += 1;
65014 +       }
65015 +       ret = true;
65017 +out:
65018 +       return ret;
65022 + * wnd_find
65023 + * - flags - BITMAP_FIND_XXX flags
65024 + *
65025 + * looks for free space
65026 + * Returns 0 if not found
65027 + */
65028 +size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
65029 +               size_t flags, size_t *allocated)
65031 +       struct super_block *sb;
65032 +       u32 wbits, wpos, wzbit, wzend;
65033 +       size_t fnd, max_alloc, b_len, b_pos;
65034 +       size_t iw, prev_tail, nwnd, wbit, ebit, zbit, zend;
65035 +       size_t to_alloc0 = to_alloc;
65036 +       const ulong *buf;
65037 +       const struct e_node *e;
65038 +       const struct rb_node *pr, *cr;
65039 +       u8 log2_bits;
65040 +       bool fbits_valid;
65041 +       struct buffer_head *bh;
65043 +       /* fast checking for available free space */
65044 +       if (flags & BITMAP_FIND_FULL) {
65045 +               size_t zeroes = wnd_zeroes(wnd);
65047 +               zeroes -= wnd->zone_end - wnd->zone_bit;
65048 +               if (zeroes < to_alloc0)
65049 +                       goto no_space;
65051 +               if (to_alloc0 > wnd->extent_max)
65052 +                       goto no_space;
65053 +       } else {
65054 +               if (to_alloc > wnd->extent_max)
65055 +                       to_alloc = wnd->extent_max;
65056 +       }
65058 +       if (wnd->zone_bit <= hint && hint < wnd->zone_end)
65059 +               hint = wnd->zone_end;
65061 +       max_alloc = wnd->nbits;
65062 +       b_len = b_pos = 0;
65064 +       if (hint >= max_alloc)
65065 +               hint = 0;
65067 +       if (RB_EMPTY_ROOT(&wnd->start_tree)) {
65068 +               if (wnd->uptodated == 1) {
65069 +                       /* extents tree is updated -> no free space */
65070 +                       goto no_space;
65071 +               }
65072 +               goto scan_bitmap;
65073 +       }
65075 +       e = NULL;
65076 +       if (!hint)
65077 +               goto allocate_biggest;
65079 +       /* Use hint: enumerate extents by start >= hint */
65080 +       pr = NULL;
65081 +       cr = wnd->start_tree.rb_node;
65083 +       for (;;) {
65084 +               e = rb_entry(cr, struct e_node, start.node);
65086 +               if (e->start.key == hint)
65087 +                       break;
65089 +               if (e->start.key < hint) {
65090 +                       pr = cr;
65091 +                       cr = cr->rb_right;
65092 +                       if (!cr)
65093 +                               break;
65094 +                       continue;
65095 +               }
65097 +               cr = cr->rb_left;
65098 +               if (!cr) {
65099 +                       e = pr ? rb_entry(pr, struct e_node, start.node) : NULL;
65100 +                       break;
65101 +               }
65102 +       }
65104 +       if (!e)
65105 +               goto allocate_biggest;
65107 +       if (e->start.key + e->count.key > hint) {
65108 +               /* We have found extension with 'hint' inside */
65109 +               size_t len = e->start.key + e->count.key - hint;
65111 +               if (len >= to_alloc && hint + to_alloc <= max_alloc) {
65112 +                       fnd = hint;
65113 +                       goto found;
65114 +               }
65116 +               if (!(flags & BITMAP_FIND_FULL)) {
65117 +                       if (len > to_alloc)
65118 +                               len = to_alloc;
65120 +                       if (hint + len <= max_alloc) {
65121 +                               fnd = hint;
65122 +                               to_alloc = len;
65123 +                               goto found;
65124 +                       }
65125 +               }
65126 +       }
65128 +allocate_biggest:
65129 +       /* Allocate from biggest free extent */
65130 +       e = rb_entry(rb_first(&wnd->count_tree), struct e_node, count.node);
65131 +       if (e->count.key != wnd->extent_max)
65132 +               wnd->extent_max = e->count.key;
65134 +       if (e->count.key < max_alloc) {
65135 +               if (e->count.key >= to_alloc) {
65136 +                       ;
65137 +               } else if (flags & BITMAP_FIND_FULL) {
65138 +                       if (e->count.key < to_alloc0) {
65139 +                               /* Biggest free block is less then requested */
65140 +                               goto no_space;
65141 +                       }
65142 +                       to_alloc = e->count.key;
65143 +               } else if (-1 != wnd->uptodated) {
65144 +                       to_alloc = e->count.key;
65145 +               } else {
65146 +                       /* Check if we can use more bits */
65147 +                       size_t op, max_check;
65148 +                       struct rb_root start_tree;
65150 +                       memcpy(&start_tree, &wnd->start_tree,
65151 +                              sizeof(struct rb_root));
65152 +                       memset(&wnd->start_tree, 0, sizeof(struct rb_root));
65154 +                       max_check = e->start.key + to_alloc;
65155 +                       if (max_check > max_alloc)
65156 +                               max_check = max_alloc;
65157 +                       for (op = e->start.key + e->count.key; op < max_check;
65158 +                            op++) {
65159 +                               if (!wnd_is_free(wnd, op, 1))
65160 +                                       break;
65161 +                       }
65162 +                       memcpy(&wnd->start_tree, &start_tree,
65163 +                              sizeof(struct rb_root));
65164 +                       to_alloc = op - e->start.key;
65165 +               }
65167 +               /* Prepare to return */
65168 +               fnd = e->start.key;
65169 +               if (e->start.key + to_alloc > max_alloc)
65170 +                       to_alloc = max_alloc - e->start.key;
65171 +               goto found;
65172 +       }
65174 +       if (wnd->uptodated == 1) {
65175 +               /* extents tree is updated -> no free space */
65176 +               goto no_space;
65177 +       }
65179 +       b_len = e->count.key;
65180 +       b_pos = e->start.key;
65182 +scan_bitmap:
65183 +       sb = wnd->sb;
65184 +       log2_bits = sb->s_blocksize_bits + 3;
65186 +       /* At most two ranges [hint, max_alloc) + [0, hint) */
65187 +Again:
65189 +       /* TODO: optimize request for case nbits > wbits */
65190 +       iw = hint >> log2_bits;
65191 +       wbits = sb->s_blocksize * 8;
65192 +       wpos = hint & (wbits - 1);
65193 +       prev_tail = 0;
65194 +       fbits_valid = true;
65196 +       if (max_alloc == wnd->nbits) {
65197 +               nwnd = wnd->nwnd;
65198 +       } else {
65199 +               size_t t = max_alloc + wbits - 1;
65201 +               nwnd = likely(t > max_alloc) ? (t >> log2_bits) : wnd->nwnd;
65202 +       }
65204 +       /* Enumerate all windows */
65205 +       for (; iw < nwnd; iw++) {
65206 +               wbit = iw << log2_bits;
65208 +               if (!wnd->free_bits[iw]) {
65209 +                       if (prev_tail > b_len) {
65210 +                               b_pos = wbit - prev_tail;
65211 +                               b_len = prev_tail;
65212 +                       }
65214 +                       /* Skip full used window */
65215 +                       prev_tail = 0;
65216 +                       wpos = 0;
65217 +                       continue;
65218 +               }
65220 +               if (unlikely(iw + 1 == nwnd)) {
65221 +                       if (max_alloc == wnd->nbits) {
65222 +                               wbits = wnd->bits_last;
65223 +                       } else {
65224 +                               size_t t = max_alloc & (wbits - 1);
65226 +                               if (t) {
65227 +                                       wbits = t;
65228 +                                       fbits_valid = false;
65229 +                               }
65230 +                       }
65231 +               }
65233 +               if (wnd->zone_end > wnd->zone_bit) {
65234 +                       ebit = wbit + wbits;
65235 +                       zbit = max(wnd->zone_bit, wbit);
65236 +                       zend = min(wnd->zone_end, ebit);
65238 +                       /* Here we have a window [wbit, ebit) and zone [zbit, zend) */
65239 +                       if (zend <= zbit) {
65240 +                               /* Zone does not overlap window */
65241 +                       } else {
65242 +                               wzbit = zbit - wbit;
65243 +                               wzend = zend - wbit;
65245 +                               /* Zone overlaps window */
65246 +                               if (wnd->free_bits[iw] == wzend - wzbit) {
65247 +                                       prev_tail = 0;
65248 +                                       wpos = 0;
65249 +                                       continue;
65250 +                               }
65252 +                               /* Scan two ranges window: [wbit, zbit) and [zend, ebit) */
65253 +                               bh = wnd_map(wnd, iw);
65255 +                               if (IS_ERR(bh)) {
65256 +                                       /* TODO: error */
65257 +                                       prev_tail = 0;
65258 +                                       wpos = 0;
65259 +                                       continue;
65260 +                               }
65262 +                               buf = (ulong *)bh->b_data;
65264 +                               /* Scan range [wbit, zbit) */
65265 +                               if (wpos < wzbit) {
65266 +                                       /* Scan range [wpos, zbit) */
65267 +                                       fnd = wnd_scan(buf, wbit, wpos, wzbit,
65268 +                                                      to_alloc, &prev_tail,
65269 +                                                      &b_pos, &b_len);
65270 +                                       if (fnd != MINUS_ONE_T) {
65271 +                                               put_bh(bh);
65272 +                                               goto found;
65273 +                                       }
65274 +                               }
65276 +                               prev_tail = 0;
65278 +                               /* Scan range [zend, ebit) */
65279 +                               if (wzend < wbits) {
65280 +                                       fnd = wnd_scan(buf, wbit,
65281 +                                                      max(wzend, wpos), wbits,
65282 +                                                      to_alloc, &prev_tail,
65283 +                                                      &b_pos, &b_len);
65284 +                                       if (fnd != MINUS_ONE_T) {
65285 +                                               put_bh(bh);
65286 +                                               goto found;
65287 +                                       }
65288 +                               }
65290 +                               wpos = 0;
65291 +                               put_bh(bh);
65292 +                               continue;
65293 +                       }
65294 +               }
65296 +               /* Current window does not overlap zone */
65297 +               if (!wpos && fbits_valid && wnd->free_bits[iw] == wbits) {
65298 +                       /* window is empty */
65299 +                       if (prev_tail + wbits >= to_alloc) {
65300 +                               fnd = wbit + wpos - prev_tail;
65301 +                               goto found;
65302 +                       }
65304 +                       /* Increase 'prev_tail' and process next window */
65305 +                       prev_tail += wbits;
65306 +                       wpos = 0;
65307 +                       continue;
65308 +               }
65310 +               /* read window */
65311 +               bh = wnd_map(wnd, iw);
65312 +               if (IS_ERR(bh)) {
65313 +                       // TODO: error
65314 +                       prev_tail = 0;
65315 +                       wpos = 0;
65316 +                       continue;
65317 +               }
65319 +               buf = (ulong *)bh->b_data;
65321 +               /* Scan range [wpos, eBits) */
65322 +               fnd = wnd_scan(buf, wbit, wpos, wbits, to_alloc, &prev_tail,
65323 +                              &b_pos, &b_len);
65324 +               put_bh(bh);
65325 +               if (fnd != MINUS_ONE_T)
65326 +                       goto found;
65327 +       }
65329 +       if (b_len < prev_tail) {
65330 +               /* The last fragment */
65331 +               b_len = prev_tail;
65332 +               b_pos = max_alloc - prev_tail;
65333 +       }
65335 +       if (hint) {
65336 +               /*
65337 +                * We have scanned range [hint max_alloc)
65338 +                * Prepare to scan range [0 hint + to_alloc)
65339 +                */
65340 +               size_t nextmax = hint + to_alloc;
65342 +               if (likely(nextmax >= hint) && nextmax < max_alloc)
65343 +                       max_alloc = nextmax;
65344 +               hint = 0;
65345 +               goto Again;
65346 +       }
65348 +       if (!b_len)
65349 +               goto no_space;
65351 +       wnd->extent_max = b_len;
65353 +       if (flags & BITMAP_FIND_FULL)
65354 +               goto no_space;
65356 +       fnd = b_pos;
65357 +       to_alloc = b_len;
65359 +found:
65360 +       if (flags & BITMAP_FIND_MARK_AS_USED) {
65361 +               /* TODO optimize remove extent (pass 'e'?) */
65362 +               if (wnd_set_used(wnd, fnd, to_alloc))
65363 +                       goto no_space;
65364 +       } else if (wnd->extent_max != MINUS_ONE_T &&
65365 +                  to_alloc > wnd->extent_max) {
65366 +               wnd->extent_max = to_alloc;
65367 +       }
65369 +       *allocated = fnd;
65370 +       return to_alloc;
65372 +no_space:
65373 +       return 0;
65377 + * wnd_extend
65378 + *
65379 + * Extend bitmap ($MFT bitmap)
65380 + */
65381 +int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
65383 +       int err;
65384 +       struct super_block *sb = wnd->sb;
65385 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
65386 +       u32 blocksize = sb->s_blocksize;
65387 +       u32 wbits = blocksize * 8;
65388 +       u32 b0, new_last;
65389 +       size_t bits, iw, new_wnd;
65390 +       size_t old_bits = wnd->nbits;
65391 +       u16 *new_free;
65393 +       if (new_bits <= old_bits)
65394 +               return -EINVAL;
65396 +       /* align to 8 byte boundary */
65397 +       new_wnd = bytes_to_block(sb, bitmap_size(new_bits));
65398 +       new_last = new_bits & (wbits - 1);
65399 +       if (!new_last)
65400 +               new_last = wbits;
65402 +       if (new_wnd != wnd->nwnd) {
65403 +               new_free = ntfs_malloc(new_wnd * sizeof(u16));
65404 +               if (!new_free)
65405 +                       return -ENOMEM;
65407 +               if (new_free != wnd->free_bits)
65408 +                       memcpy(new_free, wnd->free_bits,
65409 +                              wnd->nwnd * sizeof(short));
65410 +               memset(new_free + wnd->nwnd, 0,
65411 +                      (new_wnd - wnd->nwnd) * sizeof(short));
65412 +               ntfs_free(wnd->free_bits);
65413 +               wnd->free_bits = new_free;
65414 +       }
65416 +       /* Zero bits [old_bits,new_bits) */
65417 +       bits = new_bits - old_bits;
65418 +       b0 = old_bits & (wbits - 1);
65420 +       for (iw = old_bits >> (sb->s_blocksize_bits + 3); bits; iw += 1) {
65421 +               u32 op;
65422 +               size_t frb;
65423 +               u64 vbo, lbo, bytes;
65424 +               struct buffer_head *bh;
65425 +               ulong *buf;
65427 +               if (iw + 1 == new_wnd)
65428 +                       wbits = new_last;
65430 +               op = b0 + bits > wbits ? wbits - b0 : bits;
65431 +               vbo = (u64)iw * blocksize;
65433 +               err = ntfs_vbo_to_lbo(sbi, &wnd->run, vbo, &lbo, &bytes);
65434 +               if (err)
65435 +                       break;
65437 +               bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
65438 +               if (!bh)
65439 +                       return -EIO;
65441 +               lock_buffer(bh);
65442 +               buf = (ulong *)bh->b_data;
65444 +               __bitmap_clear(buf, b0, blocksize * 8 - b0);
65445 +               frb = wbits - __bitmap_weight(buf, wbits);
65446 +               wnd->total_zeroes += frb - wnd->free_bits[iw];
65447 +               wnd->free_bits[iw] = frb;
65449 +               set_buffer_uptodate(bh);
65450 +               mark_buffer_dirty(bh);
65451 +               unlock_buffer(bh);
65452 +               /*err = sync_dirty_buffer(bh);*/
65454 +               b0 = 0;
65455 +               bits -= op;
65456 +       }
65458 +       wnd->nbits = new_bits;
65459 +       wnd->nwnd = new_wnd;
65460 +       wnd->bits_last = new_last;
65462 +       wnd_add_free_ext(wnd, old_bits, new_bits - old_bits, false);
65464 +       return 0;
65468 + * wnd_zone_set
65469 + */
65470 +void wnd_zone_set(struct wnd_bitmap *wnd, size_t lcn, size_t len)
65472 +       size_t zlen;
65474 +       zlen = wnd->zone_end - wnd->zone_bit;
65475 +       if (zlen)
65476 +               wnd_add_free_ext(wnd, wnd->zone_bit, zlen, false);
65478 +       if (!RB_EMPTY_ROOT(&wnd->start_tree) && len)
65479 +               wnd_remove_free_ext(wnd, lcn, len);
65481 +       wnd->zone_bit = lcn;
65482 +       wnd->zone_end = lcn + len;
65485 +int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range)
65487 +       int err = 0;
65488 +       struct super_block *sb = sbi->sb;
65489 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
65490 +       u32 wbits = 8 * sb->s_blocksize;
65491 +       CLST len = 0, lcn = 0, done = 0;
65492 +       CLST minlen = bytes_to_cluster(sbi, range->minlen);
65493 +       CLST lcn_from = bytes_to_cluster(sbi, range->start);
65494 +       size_t iw = lcn_from >> (sb->s_blocksize_bits + 3);
65495 +       u32 wbit = lcn_from & (wbits - 1);
65496 +       const ulong *buf;
65497 +       CLST lcn_to;
65499 +       if (!minlen)
65500 +               minlen = 1;
65502 +       if (range->len == (u64)-1)
65503 +               lcn_to = wnd->nbits;
65504 +       else
65505 +               lcn_to = bytes_to_cluster(sbi, range->start + range->len);
65507 +       down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
65509 +       for (; iw < wnd->nbits; iw++, wbit = 0) {
65510 +               CLST lcn_wnd = iw * wbits;
65511 +               struct buffer_head *bh;
65513 +               if (lcn_wnd > lcn_to)
65514 +                       break;
65516 +               if (!wnd->free_bits[iw])
65517 +                       continue;
65519 +               if (iw + 1 == wnd->nwnd)
65520 +                       wbits = wnd->bits_last;
65522 +               if (lcn_wnd + wbits > lcn_to)
65523 +                       wbits = lcn_to - lcn_wnd;
65525 +               bh = wnd_map(wnd, iw);
65526 +               if (IS_ERR(bh)) {
65527 +                       err = PTR_ERR(bh);
65528 +                       break;
65529 +               }
65531 +               buf = (ulong *)bh->b_data;
65533 +               for (; wbit < wbits; wbit++) {
65534 +                       if (!test_bit(wbit, buf)) {
65535 +                               if (!len)
65536 +                                       lcn = lcn_wnd + wbit;
65537 +                               len += 1;
65538 +                               continue;
65539 +                       }
65540 +                       if (len >= minlen) {
65541 +                               err = ntfs_discard(sbi, lcn, len);
65542 +                               if (err)
65543 +                                       goto out;
65544 +                               done += len;
65545 +                       }
65546 +                       len = 0;
65547 +               }
65548 +               put_bh(bh);
65549 +       }
65551 +       /* Process the last fragment */
65552 +       if (len >= minlen) {
65553 +               err = ntfs_discard(sbi, lcn, len);
65554 +               if (err)
65555 +                       goto out;
65556 +               done += len;
65557 +       }
65559 +out:
65560 +       range->len = (u64)done << sbi->cluster_bits;
65562 +       up_read(&wnd->rw_lock);
65564 +       return err;
65566 diff --git a/fs/ntfs3/debug.h b/fs/ntfs3/debug.h
65567 new file mode 100644
65568 index 000000000000..dfaa4c79dc6d
65569 --- /dev/null
65570 +++ b/fs/ntfs3/debug.h
65571 @@ -0,0 +1,64 @@
65572 +/* SPDX-License-Identifier: GPL-2.0 */
65574 + *
65575 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
65576 + *
65577 + * useful functions for debuging
65578 + */
65580 +// clang-format off
65581 +#ifndef Add2Ptr
65582 +#define Add2Ptr(P, I)          ((void *)((u8 *)(P) + (I)))
65583 +#define PtrOffset(B, O)                ((size_t)((size_t)(O) - (size_t)(B)))
65584 +#endif
65586 +#define QuadAlign(n)           (((n) + 7u) & (~7u))
65587 +#define IsQuadAligned(n)       (!((size_t)(n)&7u))
65588 +#define Quad2Align(n)          (((n) + 15u) & (~15u))
65589 +#define IsQuad2Aligned(n)      (!((size_t)(n)&15u))
65590 +#define Quad4Align(n)          (((n) + 31u) & (~31u))
65591 +#define IsSizeTAligned(n)      (!((size_t)(n) & (sizeof(size_t) - 1)))
65592 +#define DwordAlign(n)          (((n) + 3u) & (~3u))
65593 +#define IsDwordAligned(n)      (!((size_t)(n)&3u))
65594 +#define WordAlign(n)           (((n) + 1u) & (~1u))
65595 +#define IsWordAligned(n)       (!((size_t)(n)&1u))
65597 +#ifdef CONFIG_PRINTK
65598 +__printf(2, 3)
65599 +void ntfs_printk(const struct super_block *sb, const char *fmt, ...);
65600 +__printf(2, 3)
65601 +void ntfs_inode_printk(struct inode *inode, const char *fmt, ...);
65602 +#else
65603 +static inline __printf(2, 3)
65604 +void ntfs_printk(const struct super_block *sb, const char *fmt, ...)
65608 +static inline __printf(2, 3)
65609 +void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
65612 +#endif
65615 + * Logging macros ( thanks Joe Perches <joe@perches.com> for implementation )
65616 + */
65618 +#define ntfs_err(sb, fmt, ...)  ntfs_printk(sb, KERN_ERR fmt, ##__VA_ARGS__)
65619 +#define ntfs_warn(sb, fmt, ...) ntfs_printk(sb, KERN_WARNING fmt, ##__VA_ARGS__)
65620 +#define ntfs_info(sb, fmt, ...) ntfs_printk(sb, KERN_INFO fmt, ##__VA_ARGS__)
65621 +#define ntfs_notice(sb, fmt, ...)                                              \
65622 +       ntfs_printk(sb, KERN_NOTICE fmt, ##__VA_ARGS__)
65624 +#define ntfs_inode_err(inode, fmt, ...)                                        \
65625 +       ntfs_inode_printk(inode, KERN_ERR fmt, ##__VA_ARGS__)
65626 +#define ntfs_inode_warn(inode, fmt, ...)                                       \
65627 +       ntfs_inode_printk(inode, KERN_WARNING fmt, ##__VA_ARGS__)
65629 +#define ntfs_malloc(s)         kmalloc(s, GFP_NOFS)
65630 +#define ntfs_zalloc(s)         kzalloc(s, GFP_NOFS)
65631 +#define ntfs_vmalloc(s)                kvmalloc(s, GFP_KERNEL)
65632 +#define ntfs_free(p)           kfree(p)
65633 +#define ntfs_vfree(p)          kvfree(p)
65634 +#define ntfs_memdup(src, len)  kmemdup(src, len, GFP_NOFS)
65635 +// clang-format on
65636 diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
65637 new file mode 100644
65638 index 000000000000..9ec6012c405b
65639 --- /dev/null
65640 +++ b/fs/ntfs3/dir.c
65641 @@ -0,0 +1,594 @@
65642 +// SPDX-License-Identifier: GPL-2.0
65644 + *
65645 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
65646 + *
65647 + *  directory handling functions for ntfs-based filesystems
65648 + *
65649 + */
65650 +#include <linux/blkdev.h>
65651 +#include <linux/buffer_head.h>
65652 +#include <linux/fs.h>
65653 +#include <linux/iversion.h>
65654 +#include <linux/nls.h>
65656 +#include "debug.h"
65657 +#include "ntfs.h"
65658 +#include "ntfs_fs.h"
65661 + * Convert little endian utf16 to nls string
65662 + */
65663 +int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
65664 +                     u8 *buf, int buf_len)
65666 +       int ret, uni_len, warn;
65667 +       const __le16 *ip;
65668 +       u8 *op;
65669 +       struct nls_table *nls = sbi->options.nls;
65671 +       static_assert(sizeof(wchar_t) == sizeof(__le16));
65673 +       if (!nls) {
65674 +               /* utf16 -> utf8 */
65675 +               ret = utf16s_to_utf8s((wchar_t *)uni->name, uni->len,
65676 +                                     UTF16_LITTLE_ENDIAN, buf, buf_len);
65677 +               buf[ret] = '\0';
65678 +               return ret;
65679 +       }
65681 +       ip = uni->name;
65682 +       op = buf;
65683 +       uni_len = uni->len;
65684 +       warn = 0;
65686 +       while (uni_len--) {
65687 +               u16 ec;
65688 +               int charlen;
65689 +               char dump[5];
65691 +               if (buf_len < NLS_MAX_CHARSET_SIZE) {
65692 +                       ntfs_warn(sbi->sb,
65693 +                                 "filename was truncated while converting.");
65694 +                       break;
65695 +               }
65697 +               ec = le16_to_cpu(*ip++);
65698 +               charlen = nls->uni2char(ec, op, buf_len);
65700 +               if (charlen > 0) {
65701 +                       op += charlen;
65702 +                       buf_len -= charlen;
65703 +                       continue;
65704 +               }
65706 +               *op++ = '_';
65707 +               buf_len -= 1;
65708 +               if (warn)
65709 +                       continue;
65711 +               warn = 1;
65712 +               hex_byte_pack(&dump[0], ec >> 8);
65713 +               hex_byte_pack(&dump[2], ec);
65714 +               dump[4] = 0;
65716 +               ntfs_err(sbi->sb, "failed to convert \"%s\" to %s", dump,
65717 +                        nls->charset);
65718 +       }
65720 +       *op = '\0';
65721 +       return op - buf;
65724 +// clang-format off
65725 +#define PLANE_SIZE     0x00010000
65727 +#define SURROGATE_PAIR 0x0000d800
65728 +#define SURROGATE_LOW  0x00000400
65729 +#define SURROGATE_BITS 0x000003ff
65730 +// clang-format on
65733 + * modified version of put_utf16 from fs/nls/nls_base.c
65734 + * is sparse warnings free
65735 + */
65736 +static inline void put_utf16(wchar_t *s, unsigned int c,
65737 +                            enum utf16_endian endian)
65739 +       static_assert(sizeof(wchar_t) == sizeof(__le16));
65740 +       static_assert(sizeof(wchar_t) == sizeof(__be16));
65742 +       switch (endian) {
65743 +       default:
65744 +               *s = (wchar_t)c;
65745 +               break;
65746 +       case UTF16_LITTLE_ENDIAN:
65747 +               *(__le16 *)s = __cpu_to_le16(c);
65748 +               break;
65749 +       case UTF16_BIG_ENDIAN:
65750 +               *(__be16 *)s = __cpu_to_be16(c);
65751 +               break;
65752 +       }
65756 + * modified version of 'utf8s_to_utf16s' allows to
65757 + * detect -ENAMETOOLONG without writing out of expected maximum
65758 + */
65759 +static int _utf8s_to_utf16s(const u8 *s, int inlen, enum utf16_endian endian,
65760 +                           wchar_t *pwcs, int maxout)
65762 +       u16 *op;
65763 +       int size;
65764 +       unicode_t u;
65766 +       op = pwcs;
65767 +       while (inlen > 0 && *s) {
65768 +               if (*s & 0x80) {
65769 +                       size = utf8_to_utf32(s, inlen, &u);
65770 +                       if (size < 0)
65771 +                               return -EINVAL;
65772 +                       s += size;
65773 +                       inlen -= size;
65775 +                       if (u >= PLANE_SIZE) {
65776 +                               if (maxout < 2)
65777 +                                       return -ENAMETOOLONG;
65779 +                               u -= PLANE_SIZE;
65780 +                               put_utf16(op++,
65781 +                                         SURROGATE_PAIR |
65782 +                                                 ((u >> 10) & SURROGATE_BITS),
65783 +                                         endian);
65784 +                               put_utf16(op++,
65785 +                                         SURROGATE_PAIR | SURROGATE_LOW |
65786 +                                                 (u & SURROGATE_BITS),
65787 +                                         endian);
65788 +                               maxout -= 2;
65789 +                       } else {
65790 +                               if (maxout < 1)
65791 +                                       return -ENAMETOOLONG;
65793 +                               put_utf16(op++, u, endian);
65794 +                               maxout--;
65795 +                       }
65796 +               } else {
65797 +                       if (maxout < 1)
65798 +                               return -ENAMETOOLONG;
65800 +                       put_utf16(op++, *s++, endian);
65801 +                       inlen--;
65802 +                       maxout--;
65803 +               }
65804 +       }
65805 +       return op - pwcs;
65809 + * Convert input string to utf16
65810 + *
65811 + * name, name_len - input name
65812 + * uni, max_ulen - destination memory
65813 + * endian - endian of target utf16 string
65814 + *
65815 + * This function is called:
65816 + * - to create ntfs name
65817 + * - to create symlink
65818 + *
65819 + * returns utf16 string length or error (if negative)
65820 + */
65821 +int ntfs_nls_to_utf16(struct ntfs_sb_info *sbi, const u8 *name, u32 name_len,
65822 +                     struct cpu_str *uni, u32 max_ulen,
65823 +                     enum utf16_endian endian)
65825 +       int ret, slen;
65826 +       const u8 *end;
65827 +       struct nls_table *nls = sbi->options.nls;
65828 +       u16 *uname = uni->name;
65830 +       static_assert(sizeof(wchar_t) == sizeof(u16));
65832 +       if (!nls) {
65833 +               /* utf8 -> utf16 */
65834 +               ret = _utf8s_to_utf16s(name, name_len, endian, uname, max_ulen);
65835 +               uni->len = ret;
65836 +               return ret;
65837 +       }
65839 +       for (ret = 0, end = name + name_len; name < end; ret++, name += slen) {
65840 +               if (ret >= max_ulen)
65841 +                       return -ENAMETOOLONG;
65843 +               slen = nls->char2uni(name, end - name, uname + ret);
65844 +               if (!slen)
65845 +                       return -EINVAL;
65846 +               if (slen < 0)
65847 +                       return slen;
65848 +       }
65850 +#ifdef __BIG_ENDIAN
65851 +       if (endian == UTF16_LITTLE_ENDIAN) {
65852 +               int i = ret;
65854 +               while (i--) {
65855 +                       __cpu_to_le16s(uname);
65856 +                       uname++;
65857 +               }
65858 +       }
65859 +#else
65860 +       if (endian == UTF16_BIG_ENDIAN) {
65861 +               int i = ret;
65863 +               while (i--) {
65864 +                       __cpu_to_be16s(uname);
65865 +                       uname++;
65866 +               }
65867 +       }
65868 +#endif
65870 +       uni->len = ret;
65871 +       return ret;
65874 +/* helper function */
65875 +struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
65876 +                          struct ntfs_fnd *fnd)
65878 +       int err = 0;
65879 +       struct super_block *sb = dir->i_sb;
65880 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
65881 +       struct ntfs_inode *ni = ntfs_i(dir);
65882 +       struct NTFS_DE *e;
65883 +       int diff;
65884 +       struct inode *inode = NULL;
65885 +       struct ntfs_fnd *fnd_a = NULL;
65887 +       if (!fnd) {
65888 +               fnd_a = fnd_get();
65889 +               if (!fnd_a) {
65890 +                       err = -ENOMEM;
65891 +                       goto out;
65892 +               }
65893 +               fnd = fnd_a;
65894 +       }
65896 +       err = indx_find(&ni->dir, ni, NULL, uni, 0, sbi, &diff, &e, fnd);
65898 +       if (err)
65899 +               goto out;
65901 +       if (diff) {
65902 +               err = -ENOENT;
65903 +               goto out;
65904 +       }
65906 +       inode = ntfs_iget5(sb, &e->ref, uni);
65907 +       if (!IS_ERR(inode) && is_bad_inode(inode)) {
65908 +               iput(inode);
65909 +               err = -EINVAL;
65910 +       }
65911 +out:
65912 +       fnd_put(fnd_a);
65914 +       return err == -ENOENT ? NULL : err ? ERR_PTR(err) : inode;
65917 +static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
65918 +                              const struct NTFS_DE *e, u8 *name,
65919 +                              struct dir_context *ctx)
65921 +       const struct ATTR_FILE_NAME *fname;
65922 +       unsigned long ino;
65923 +       int name_len;
65924 +       u32 dt_type;
65926 +       fname = Add2Ptr(e, sizeof(struct NTFS_DE));
65928 +       if (fname->type == FILE_NAME_DOS)
65929 +               return 0;
65931 +       if (!mi_is_ref(&ni->mi, &fname->home))
65932 +               return 0;
65934 +       ino = ino_get(&e->ref);
65936 +       if (ino == MFT_REC_ROOT)
65937 +               return 0;
65939 +       /* Skip meta files ( unless option to show metafiles is set ) */
65940 +       if (!sbi->options.showmeta && ntfs_is_meta_file(sbi, ino))
65941 +               return 0;
65943 +       if (sbi->options.nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN))
65944 +               return 0;
65946 +       name_len = ntfs_utf16_to_nls(sbi, (struct le_str *)&fname->name_len,
65947 +                                    name, PATH_MAX);
65948 +       if (name_len <= 0) {
65949 +               ntfs_warn(sbi->sb, "failed to convert name for inode %lx.",
65950 +                         ino);
65951 +               return 0;
65952 +       }
65954 +       dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
65956 +       return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
65960 + * ntfs_read_hdr
65961 + *
65962 + * helper function 'ntfs_readdir'
65963 + */
65964 +static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
65965 +                        const struct INDEX_HDR *hdr, u64 vbo, u64 pos,
65966 +                        u8 *name, struct dir_context *ctx)
65968 +       int err;
65969 +       const struct NTFS_DE *e;
65970 +       u32 e_size;
65971 +       u32 end = le32_to_cpu(hdr->used);
65972 +       u32 off = le32_to_cpu(hdr->de_off);
65974 +       for (;; off += e_size) {
65975 +               if (off + sizeof(struct NTFS_DE) > end)
65976 +                       return -1;
65978 +               e = Add2Ptr(hdr, off);
65979 +               e_size = le16_to_cpu(e->size);
65980 +               if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
65981 +                       return -1;
65983 +               if (de_is_last(e))
65984 +                       return 0;
65986 +               /* Skip already enumerated*/
65987 +               if (vbo + off < pos)
65988 +                       continue;
65990 +               if (le16_to_cpu(e->key_size) < SIZEOF_ATTRIBUTE_FILENAME)
65991 +                       return -1;
65993 +               ctx->pos = vbo + off;
65995 +               /* Submit the name to the filldir callback. */
65996 +               err = ntfs_filldir(sbi, ni, e, name, ctx);
65997 +               if (err)
65998 +                       return err;
65999 +       }
66003 + * file_operations::iterate_shared
66004 + *
66005 + * Use non sorted enumeration.
66006 + * We have an example of broken volume where sorted enumeration
66007 + * counts each name twice
66008 + */
66009 +static int ntfs_readdir(struct file *file, struct dir_context *ctx)
66011 +       const struct INDEX_ROOT *root;
66012 +       u64 vbo;
66013 +       size_t bit;
66014 +       loff_t eod;
66015 +       int err = 0;
66016 +       struct inode *dir = file_inode(file);
66017 +       struct ntfs_inode *ni = ntfs_i(dir);
66018 +       struct super_block *sb = dir->i_sb;
66019 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
66020 +       loff_t i_size = i_size_read(dir);
66021 +       u32 pos = ctx->pos;
66022 +       u8 *name = NULL;
66023 +       struct indx_node *node = NULL;
66024 +       u8 index_bits = ni->dir.index_bits;
66026 +       /* name is a buffer of PATH_MAX length */
66027 +       static_assert(NTFS_NAME_LEN * 4 < PATH_MAX);
66029 +       eod = i_size + sbi->record_size;
66031 +       if (pos >= eod)
66032 +               return 0;
66034 +       if (!dir_emit_dots(file, ctx))
66035 +               return 0;
66037 +       /* allocate PATH_MAX bytes */
66038 +       name = __getname();
66039 +       if (!name)
66040 +               return -ENOMEM;
66042 +       if (!ni->mi_loaded && ni->attr_list.size) {
66043 +               /*
66044 +                * directory inode is locked for read
66045 +                * load all subrecords to avoid 'write' access to 'ni' during
66046 +                * directory reading
66047 +                */
66048 +               ni_lock(ni);
66049 +               if (!ni->mi_loaded && ni->attr_list.size) {
66050 +                       err = ni_load_all_mi(ni);
66051 +                       if (!err)
66052 +                               ni->mi_loaded = true;
66053 +               }
66054 +               ni_unlock(ni);
66055 +               if (err)
66056 +                       goto out;
66057 +       }
66059 +       root = indx_get_root(&ni->dir, ni, NULL, NULL);
66060 +       if (!root) {
66061 +               err = -EINVAL;
66062 +               goto out;
66063 +       }
66065 +       if (pos >= sbi->record_size) {
66066 +               bit = (pos - sbi->record_size) >> index_bits;
66067 +       } else {
66068 +               err = ntfs_read_hdr(sbi, ni, &root->ihdr, 0, pos, name, ctx);
66069 +               if (err)
66070 +                       goto out;
66071 +               bit = 0;
66072 +       }
66074 +       if (!i_size) {
66075 +               ctx->pos = eod;
66076 +               goto out;
66077 +       }
66079 +       for (;;) {
66080 +               vbo = (u64)bit << index_bits;
66081 +               if (vbo >= i_size) {
66082 +                       ctx->pos = eod;
66083 +                       goto out;
66084 +               }
66086 +               err = indx_used_bit(&ni->dir, ni, &bit);
66087 +               if (err)
66088 +                       goto out;
66090 +               if (bit == MINUS_ONE_T) {
66091 +                       ctx->pos = eod;
66092 +                       goto out;
66093 +               }
66095 +               vbo = (u64)bit << index_bits;
66096 +               if (vbo >= i_size) {
66097 +                       ntfs_inode_err(dir, "Looks like your dir is corrupt");
66098 +                       err = -EINVAL;
66099 +                       goto out;
66100 +               }
66102 +               err = indx_read(&ni->dir, ni, bit << ni->dir.idx2vbn_bits,
66103 +                               &node);
66104 +               if (err)
66105 +                       goto out;
66107 +               err = ntfs_read_hdr(sbi, ni, &node->index->ihdr,
66108 +                                   vbo + sbi->record_size, pos, name, ctx);
66109 +               if (err)
66110 +                       goto out;
66112 +               bit += 1;
66113 +       }
66115 +out:
66117 +       __putname(name);
66118 +       put_indx_node(node);
66120 +       if (err == -ENOENT) {
66121 +               err = 0;
66122 +               ctx->pos = pos;
66123 +       }
66125 +       return err;
66128 +static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
66129 +                         size_t *files)
66131 +       int err = 0;
66132 +       struct ntfs_inode *ni = ntfs_i(dir);
66133 +       struct NTFS_DE *e = NULL;
66134 +       struct INDEX_ROOT *root;
66135 +       struct INDEX_HDR *hdr;
66136 +       const struct ATTR_FILE_NAME *fname;
66137 +       u32 e_size, off, end;
66138 +       u64 vbo = 0;
66139 +       size_t drs = 0, fles = 0, bit = 0;
66140 +       loff_t i_size = ni->vfs_inode.i_size;
66141 +       struct indx_node *node = NULL;
66142 +       u8 index_bits = ni->dir.index_bits;
66144 +       if (is_empty)
66145 +               *is_empty = true;
66147 +       root = indx_get_root(&ni->dir, ni, NULL, NULL);
66148 +       if (!root)
66149 +               return -EINVAL;
66151 +       hdr = &root->ihdr;
66153 +       for (;;) {
66154 +               end = le32_to_cpu(hdr->used);
66155 +               off = le32_to_cpu(hdr->de_off);
66157 +               for (; off + sizeof(struct NTFS_DE) <= end; off += e_size) {
66158 +                       e = Add2Ptr(hdr, off);
66159 +                       e_size = le16_to_cpu(e->size);
66160 +                       if (e_size < sizeof(struct NTFS_DE) ||
66161 +                           off + e_size > end)
66162 +                               break;
66164 +                       if (de_is_last(e))
66165 +                               break;
66167 +                       fname = de_get_fname(e);
66168 +                       if (!fname)
66169 +                               continue;
66171 +                       if (fname->type == FILE_NAME_DOS)
66172 +                               continue;
66174 +                       if (is_empty) {
66175 +                               *is_empty = false;
66176 +                               if (!dirs && !files)
66177 +                                       goto out;
66178 +                       }
66180 +                       if (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY)
66181 +                               drs += 1;
66182 +                       else
66183 +                               fles += 1;
66184 +               }
66186 +               if (vbo >= i_size)
66187 +                       goto out;
66189 +               err = indx_used_bit(&ni->dir, ni, &bit);
66190 +               if (err)
66191 +                       goto out;
66193 +               if (bit == MINUS_ONE_T)
66194 +                       goto out;
66196 +               vbo = (u64)bit << index_bits;
66197 +               if (vbo >= i_size)
66198 +                       goto out;
66200 +               err = indx_read(&ni->dir, ni, bit << ni->dir.idx2vbn_bits,
66201 +                               &node);
66202 +               if (err)
66203 +                       goto out;
66205 +               hdr = &node->index->ihdr;
66206 +               bit += 1;
66207 +               vbo = (u64)bit << ni->dir.idx2vbn_bits;
66208 +       }
66210 +out:
66211 +       put_indx_node(node);
66212 +       if (dirs)
66213 +               *dirs = drs;
66214 +       if (files)
66215 +               *files = fles;
66217 +       return err;
66220 +bool dir_is_empty(struct inode *dir)
66222 +       bool is_empty = false;
66224 +       ntfs_dir_count(dir, &is_empty, NULL, NULL);
66226 +       return is_empty;
66229 +const struct file_operations ntfs_dir_operations = {
66230 +       .llseek = generic_file_llseek,
66231 +       .read = generic_read_dir,
66232 +       .iterate_shared = ntfs_readdir,
66233 +       .fsync = generic_file_fsync,
66234 +       .open = ntfs_file_open,
66236 diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
66237 new file mode 100644
66238 index 000000000000..347baf674008
66239 --- /dev/null
66240 +++ b/fs/ntfs3/file.c
66241 @@ -0,0 +1,1130 @@
66242 +// SPDX-License-Identifier: GPL-2.0
66244 + *
66245 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
66246 + *
66247 + *  regular file handling primitives for ntfs-based filesystems
66248 + */
66249 +#include <linux/backing-dev.h>
66250 +#include <linux/buffer_head.h>
66251 +#include <linux/compat.h>
66252 +#include <linux/falloc.h>
66253 +#include <linux/fiemap.h>
66254 +#include <linux/msdos_fs.h> /* FAT_IOCTL_XXX */
66255 +#include <linux/nls.h>
66257 +#include "debug.h"
66258 +#include "ntfs.h"
66259 +#include "ntfs_fs.h"
66261 +static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
66263 +       struct fstrim_range __user *user_range;
66264 +       struct fstrim_range range;
66265 +       struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
66266 +       int err;
66268 +       if (!capable(CAP_SYS_ADMIN))
66269 +               return -EPERM;
66271 +       if (!blk_queue_discard(q))
66272 +               return -EOPNOTSUPP;
66274 +       user_range = (struct fstrim_range __user *)arg;
66275 +       if (copy_from_user(&range, user_range, sizeof(range)))
66276 +               return -EFAULT;
66278 +       range.minlen = max_t(u32, range.minlen, q->limits.discard_granularity);
66280 +       err = ntfs_trim_fs(sbi, &range);
66281 +       if (err < 0)
66282 +               return err;
66284 +       if (copy_to_user(user_range, &range, sizeof(range)))
66285 +               return -EFAULT;
66287 +       return 0;
66290 +static long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
66292 +       struct inode *inode = file_inode(filp);
66293 +       struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
66294 +       u32 __user *user_attr = (u32 __user *)arg;
66296 +       switch (cmd) {
66297 +       case FAT_IOCTL_GET_ATTRIBUTES:
66298 +               return put_user(le32_to_cpu(ntfs_i(inode)->std_fa), user_attr);
66300 +       case FAT_IOCTL_GET_VOLUME_ID:
66301 +               return put_user(sbi->volume.ser_num, user_attr);
66303 +       case FITRIM:
66304 +               return ntfs_ioctl_fitrim(sbi, arg);
66305 +       }
66306 +       return -ENOTTY; /* Inappropriate ioctl for device */
66309 +#ifdef CONFIG_COMPAT
66310 +static long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
66313 +       return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
66315 +#endif
66318 + * inode_operations::getattr
66319 + */
66320 +int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
66321 +                struct kstat *stat, u32 request_mask, u32 flags)
66323 +       struct inode *inode = d_inode(path->dentry);
66324 +       struct ntfs_inode *ni = ntfs_i(inode);
66326 +       if (is_compressed(ni))
66327 +               stat->attributes |= STATX_ATTR_COMPRESSED;
66329 +       if (is_encrypted(ni))
66330 +               stat->attributes |= STATX_ATTR_ENCRYPTED;
66332 +       stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED;
66334 +       generic_fillattr(mnt_userns, inode, stat);
66336 +       stat->result_mask |= STATX_BTIME;
66337 +       stat->btime = ni->i_crtime;
66339 +       return 0;
66342 +static int ntfs_extend_initialized_size(struct file *file,
66343 +                                       struct ntfs_inode *ni,
66344 +                                       const loff_t valid,
66345 +                                       const loff_t new_valid)
66347 +       struct inode *inode = &ni->vfs_inode;
66348 +       struct address_space *mapping = inode->i_mapping;
66349 +       struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
66350 +       loff_t pos = valid;
66351 +       int err;
66353 +       if (is_resident(ni)) {
66354 +               ni->i_valid = new_valid;
66355 +               return 0;
66356 +       }
66358 +       WARN_ON(is_compressed(ni));
66359 +       WARN_ON(valid >= new_valid);
66361 +       for (;;) {
66362 +               u32 zerofrom, len;
66363 +               struct page *page;
66364 +               void *fsdata;
66365 +               u8 bits;
66366 +               CLST vcn, lcn, clen;
66368 +               if (is_sparsed(ni)) {
66369 +                       bits = sbi->cluster_bits;
66370 +                       vcn = pos >> bits;
66372 +                       err = attr_data_get_block(ni, vcn, 0, &lcn, &clen,
66373 +                                                 NULL);
66374 +                       if (err)
66375 +                               goto out;
66377 +                       if (lcn == SPARSE_LCN) {
66378 +                               loff_t vbo = (loff_t)vcn << bits;
66379 +                               loff_t to = vbo + ((loff_t)clen << bits);
66381 +                               if (to <= new_valid) {
66382 +                                       ni->i_valid = to;
66383 +                                       pos = to;
66384 +                                       goto next;
66385 +                               }
66387 +                               if (vbo < pos) {
66388 +                                       pos = vbo;
66389 +                               } else {
66390 +                                       to = (new_valid >> bits) << bits;
66391 +                                       if (pos < to) {
66392 +                                               ni->i_valid = to;
66393 +                                               pos = to;
66394 +                                               goto next;
66395 +                                       }
66396 +                               }
66397 +                       }
66398 +               }
66400 +               zerofrom = pos & (PAGE_SIZE - 1);
66401 +               len = PAGE_SIZE - zerofrom;
66403 +               if (pos + len > new_valid)
66404 +                       len = new_valid - pos;
66406 +               err = pagecache_write_begin(file, mapping, pos, len, 0, &page,
66407 +                                           &fsdata);
66408 +               if (err)
66409 +                       goto out;
66411 +               zero_user_segment(page, zerofrom, PAGE_SIZE);
66413 +               /* this function in any case puts page*/
66414 +               err = pagecache_write_end(file, mapping, pos, len, len, page,
66415 +                                         fsdata);
66416 +               if (err < 0)
66417 +                       goto out;
66418 +               pos += len;
66420 +next:
66421 +               if (pos >= new_valid)
66422 +                       break;
66424 +               balance_dirty_pages_ratelimited(mapping);
66425 +               cond_resched();
66426 +       }
66428 +       mark_inode_dirty(inode);
66430 +       return 0;
66432 +out:
66433 +       ni->i_valid = valid;
66434 +       ntfs_inode_warn(inode, "failed to extend initialized size to %llx.",
66435 +                       new_valid);
66436 +       return err;
66440 + * ntfs_sparse_cluster
66441 + *
66442 + * Helper function to zero a new allocated clusters
66443 + */
66444 +void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
66445 +                        CLST len)
66447 +       struct address_space *mapping = inode->i_mapping;
66448 +       struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
66449 +       u64 vbo = (u64)vcn << sbi->cluster_bits;
66450 +       u64 bytes = (u64)len << sbi->cluster_bits;
66451 +       u32 blocksize = 1 << inode->i_blkbits;
66452 +       pgoff_t idx0 = page0 ? page0->index : -1;
66453 +       loff_t vbo_clst = vbo & sbi->cluster_mask_inv;
66454 +       loff_t end = ntfs_up_cluster(sbi, vbo + bytes);
66455 +       pgoff_t idx = vbo_clst >> PAGE_SHIFT;
66456 +       u32 from = vbo_clst & (PAGE_SIZE - 1);
66457 +       pgoff_t idx_end = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
66458 +       loff_t page_off;
66459 +       u32 to;
66460 +       bool partial;
66461 +       struct page *page;
66463 +       for (; idx < idx_end; idx += 1, from = 0) {
66464 +               page = idx == idx0 ? page0 : grab_cache_page(mapping, idx);
66466 +               if (!page)
66467 +                       continue;
66469 +               page_off = (loff_t)idx << PAGE_SHIFT;
66470 +               to = (page_off + PAGE_SIZE) > end ? (end - page_off)
66471 +                                                 : PAGE_SIZE;
66472 +               partial = false;
66474 +               if ((from || PAGE_SIZE != to) &&
66475 +                   likely(!page_has_buffers(page))) {
66476 +                       create_empty_buffers(page, blocksize, 0);
66477 +                       if (!page_has_buffers(page)) {
66478 +                               ntfs_inode_err(
66479 +                                       inode,
66480 +                                       "failed to allocate page buffers.");
66481 +                               /*err = -ENOMEM;*/
66482 +                               goto unlock_page;
66483 +                       }
66484 +               }
66486 +               if (page_has_buffers(page)) {
66487 +                       struct buffer_head *head, *bh;
66488 +                       u32 bh_off = 0;
66490 +                       bh = head = page_buffers(page);
66491 +                       do {
66492 +                               u32 bh_next = bh_off + blocksize;
66494 +                               if (from <= bh_off && bh_next <= to) {
66495 +                                       set_buffer_uptodate(bh);
66496 +                                       mark_buffer_dirty(bh);
66497 +                               } else if (!buffer_uptodate(bh)) {
66498 +                                       partial = true;
66499 +                               }
66500 +                               bh_off = bh_next;
66501 +                       } while (head != (bh = bh->b_this_page));
66502 +               }
66504 +               zero_user_segment(page, from, to);
66506 +               if (!partial) {
66507 +                       if (!PageUptodate(page))
66508 +                               SetPageUptodate(page);
66509 +                       set_page_dirty(page);
66510 +               }
66512 +unlock_page:
66513 +               if (idx != idx0) {
66514 +                       unlock_page(page);
66515 +                       put_page(page);
66516 +               }
66517 +               cond_resched();
66518 +       }
66519 +       mark_inode_dirty(inode);
66523 + * file_operations::mmap
66524 + */
66525 +static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
66527 +       struct address_space *mapping = file->f_mapping;
66528 +       struct inode *inode = mapping->host;
66529 +       struct ntfs_inode *ni = ntfs_i(inode);
66530 +       u64 to, from = ((u64)vma->vm_pgoff << PAGE_SHIFT);
66531 +       bool rw = vma->vm_flags & VM_WRITE;
66532 +       int err;
66534 +       if (is_encrypted(ni)) {
66535 +               ntfs_inode_warn(inode,
66536 +                               "mmap is not supported for encrypted files");
66537 +               err = -EOPNOTSUPP;
66538 +               goto out;
66539 +       }
66541 +       if (!rw)
66542 +               goto do_map;
66544 +       if (is_compressed(ni)) {
66545 +               ntfs_inode_warn(
66546 +                       inode,
66547 +                       "mmap(write) is not supported for compressed files");
66548 +               err = -EOPNOTSUPP;
66549 +               goto out;
66550 +       }
66552 +       to = min_t(loff_t, i_size_read(inode),
66553 +                  from + vma->vm_end - vma->vm_start);
66555 +       if (is_sparsed(ni)) {
66556 +               /* allocate clusters for rw map */
66557 +               struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
66558 +               CLST vcn, lcn, len;
66559 +               CLST end = bytes_to_cluster(sbi, to);
66560 +               bool new;
66562 +               for (vcn = from >> sbi->cluster_bits; vcn < end; vcn += len) {
66563 +                       err = attr_data_get_block(ni, vcn, 1, &lcn, &len, &new);
66564 +                       if (err)
66565 +                               goto out;
66566 +                       if (!new)
66567 +                               continue;
66568 +                       ntfs_sparse_cluster(inode, NULL, vcn, 1);
66569 +               }
66570 +       }
66572 +       if (ni->i_valid < to) {
66573 +               inode_lock(inode);
66574 +               err = ntfs_extend_initialized_size(file, ni, ni->i_valid, to);
66575 +               inode_unlock(inode);
66576 +               if (err)
66577 +                       goto out;
66578 +       }
66580 +do_map:
66581 +       err = generic_file_mmap(file, vma);
66582 +out:
66583 +       return err;
66586 +static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
66587 +                      struct file *file)
66589 +       struct ntfs_inode *ni = ntfs_i(inode);
66590 +       struct address_space *mapping = inode->i_mapping;
66591 +       loff_t end = pos + count;
66592 +       bool extend_init = file && pos > ni->i_valid;
66593 +       int err;
66595 +       if (end <= inode->i_size && !extend_init)
66596 +               return 0;
66598 +       /*mark rw ntfs as dirty. it will be cleared at umount*/
66599 +       ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
66601 +       if (end > inode->i_size) {
66602 +               err = ntfs_set_size(inode, end);
66603 +               if (err)
66604 +                       goto out;
66605 +               inode->i_size = end;
66606 +       }
66608 +       if (extend_init && !is_compressed(ni)) {
66609 +               err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos);
66610 +               if (err)
66611 +                       goto out;
66612 +       } else {
66613 +               err = 0;
66614 +       }
66616 +       inode->i_ctime = inode->i_mtime = current_time(inode);
66617 +       mark_inode_dirty(inode);
66619 +       if (IS_SYNC(inode)) {
66620 +               int err2;
66622 +               err = filemap_fdatawrite_range(mapping, pos, end - 1);
66623 +               err2 = sync_mapping_buffers(mapping);
66624 +               if (!err)
66625 +                       err = err2;
66626 +               err2 = write_inode_now(inode, 1);
66627 +               if (!err)
66628 +                       err = err2;
66629 +               if (!err)
66630 +                       err = filemap_fdatawait_range(mapping, pos, end - 1);
66631 +       }
66633 +out:
66634 +       return err;
66637 +static int ntfs_truncate(struct inode *inode, loff_t new_size)
66639 +       struct super_block *sb = inode->i_sb;
66640 +       struct ntfs_inode *ni = ntfs_i(inode);
66641 +       int err, dirty = 0;
66642 +       u64 new_valid;
66644 +       if (!S_ISREG(inode->i_mode))
66645 +               return 0;
66647 +       if (is_compressed(ni)) {
66648 +               if (ni->i_valid > new_size)
66649 +                       ni->i_valid = new_size;
66650 +       } else {
66651 +               err = block_truncate_page(inode->i_mapping, new_size,
66652 +                                         ntfs_get_block);
66653 +               if (err)
66654 +                       return err;
66655 +       }
66657 +       new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size));
66659 +       ni_lock(ni);
66661 +       truncate_setsize(inode, new_size);
66663 +       down_write(&ni->file.run_lock);
66664 +       err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
66665 +                           &new_valid, true, NULL);
66666 +       up_write(&ni->file.run_lock);
66668 +       if (new_valid < ni->i_valid)
66669 +               ni->i_valid = new_valid;
66671 +       ni_unlock(ni);
66673 +       ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
66674 +       inode->i_ctime = inode->i_mtime = current_time(inode);
66675 +       if (!IS_DIRSYNC(inode)) {
66676 +               dirty = 1;
66677 +       } else {
66678 +               err = ntfs_sync_inode(inode);
66679 +               if (err)
66680 +                       return err;
66681 +       }
66683 +       if (dirty)
66684 +               mark_inode_dirty(inode);
66686 +       /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/
66688 +       return 0;
66692 + * Preallocate space for a file. This implements ntfs's fallocate file
66693 + * operation, which gets called from sys_fallocate system call. User
66694 + * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set
66695 + * we just allocate clusters without zeroing them out. Otherwise we
66696 + * allocate and zero out clusters via an expanding truncate.
66697 + */
66698 +static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
66700 +       struct inode *inode = file->f_mapping->host;
66701 +       struct super_block *sb = inode->i_sb;
66702 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
66703 +       struct ntfs_inode *ni = ntfs_i(inode);
66704 +       loff_t end = vbo + len;
66705 +       loff_t vbo_down = round_down(vbo, PAGE_SIZE);
66706 +       loff_t i_size;
66707 +       int err;
66709 +       /* No support for dir */
66710 +       if (!S_ISREG(inode->i_mode))
66711 +               return -EOPNOTSUPP;
66713 +       /* Return error if mode is not supported */
66714 +       if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
66715 +                    FALLOC_FL_COLLAPSE_RANGE))
66716 +               return -EOPNOTSUPP;
66718 +       ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
66720 +       inode_lock(inode);
66721 +       i_size = inode->i_size;
66723 +       if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
66724 +               /* should never be here, see ntfs_file_open*/
66725 +               err = -EOPNOTSUPP;
66726 +               goto out;
66727 +       }
66729 +       if (mode & FALLOC_FL_PUNCH_HOLE) {
66730 +               if (!(mode & FALLOC_FL_KEEP_SIZE)) {
66731 +                       err = -EINVAL;
66732 +                       goto out;
66733 +               }
66735 +               if (!is_sparsed(ni) && !is_compressed(ni)) {
66736 +                       ntfs_inode_warn(
66737 +                               inode,
66738 +                               "punch_hole only for sparsed/compressed files");
66739 +                       err = -EOPNOTSUPP;
66740 +                       goto out;
66741 +               }
66743 +               err = filemap_write_and_wait_range(inode->i_mapping, vbo,
66744 +                                                  end - 1);
66745 +               if (err)
66746 +                       goto out;
66748 +               err = filemap_write_and_wait_range(inode->i_mapping, end,
66749 +                                                  LLONG_MAX);
66750 +               if (err)
66751 +                       goto out;
66753 +               truncate_pagecache(inode, vbo_down);
66755 +               ni_lock(ni);
66756 +               err = attr_punch_hole(ni, vbo, len);
66757 +               ni_unlock(ni);
66758 +       } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
66759 +               if (mode & ~FALLOC_FL_COLLAPSE_RANGE) {
66760 +                       err = -EINVAL;
66761 +                       goto out;
66762 +               }
66764 +               /*
66765 +                * Write tail of the last page before removed range since
66766 +                * it will get removed from the page cache below.
66767 +                */
66768 +               err = filemap_write_and_wait_range(inode->i_mapping, vbo_down,
66769 +                                                  vbo);
66770 +               if (err)
66771 +                       goto out;
66773 +               /*
66774 +                * Write data that will be shifted to preserve them
66775 +                * when discarding page cache below
66776 +                */
66777 +               err = filemap_write_and_wait_range(inode->i_mapping, end,
66778 +                                                  LLONG_MAX);
66779 +               if (err)
66780 +                       goto out;
66782 +               truncate_pagecache(inode, vbo_down);
66784 +               ni_lock(ni);
66785 +               err = attr_collapse_range(ni, vbo, len);
66786 +               ni_unlock(ni);
66787 +       } else {
66788 +               /*
66789 +                * normal file: allocate clusters, do not change 'valid' size
66790 +                */
66791 +               err = ntfs_set_size(inode, max(end, i_size));
66792 +               if (err)
66793 +                       goto out;
66795 +               if (is_sparsed(ni) || is_compressed(ni)) {
66796 +                       CLST vcn_v = ni->i_valid >> sbi->cluster_bits;
66797 +                       CLST vcn = vbo >> sbi->cluster_bits;
66798 +                       CLST cend = bytes_to_cluster(sbi, end);
66799 +                       CLST lcn, clen;
66800 +                       bool new;
66802 +                       /*
66803 +                        * allocate but not zero new clusters (see below comments)
66804 +                        * this breaks security (one can read unused on-disk areas)
66805 +                        * zeroing these clusters may be too long
66806 +                        * may be we should check here for root rights?
66807 +                        */
66808 +                       for (; vcn < cend; vcn += clen) {
66809 +                               err = attr_data_get_block(ni, vcn, cend - vcn,
66810 +                                                         &lcn, &clen, &new);
66811 +                               if (err)
66812 +                                       goto out;
66813 +                               if (!new || vcn >= vcn_v)
66814 +                                       continue;
66816 +                               /*
66817 +                                * Unwritten area
66818 +                                * NTFS is not able to store several unwritten areas
66819 +                                * Activate 'ntfs_sparse_cluster' to zero new allocated clusters
66820 +                                *
66821 +                                * Dangerous in case:
66822 +                                * 1G of sparsed clusters + 1 cluster of data =>
66823 +                                * valid_size == 1G + 1 cluster
66824 +                                * fallocate(1G) will zero 1G and this can be very long
66825 +                                * xfstest 016/086 will fail without 'ntfs_sparse_cluster'
66826 +                                */
66827 +                               /*ntfs_sparse_cluster(inode, NULL, vcn,
66828 +                                *                  min(vcn_v - vcn, clen));
66829 +                                */
66830 +                       }
66831 +               }
66833 +               if (mode & FALLOC_FL_KEEP_SIZE) {
66834 +                       ni_lock(ni);
66835 +                       /*true - keep preallocated*/
66836 +                       err = attr_set_size(ni, ATTR_DATA, NULL, 0,
66837 +                                           &ni->file.run, i_size, &ni->i_valid,
66838 +                                           true, NULL);
66839 +                       ni_unlock(ni);
66840 +               }
66841 +       }
66843 +       if (!err) {
66844 +               inode->i_ctime = inode->i_mtime = current_time(inode);
66845 +               mark_inode_dirty(inode);
66846 +       }
66847 +out:
66848 +       if (err == -EFBIG)
66849 +               err = -ENOSPC;
66851 +       inode_unlock(inode);
66852 +       return err;
66856 + * inode_operations::setattr
66857 + */
66858 +int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
66859 +                 struct iattr *attr)
66861 +       struct super_block *sb = dentry->d_sb;
66862 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
66863 +       struct inode *inode = d_inode(dentry);
66864 +       struct ntfs_inode *ni = ntfs_i(inode);
66865 +       u32 ia_valid = attr->ia_valid;
66866 +       umode_t mode = inode->i_mode;
66867 +       int err;
66869 +       if (sbi->options.no_acs_rules) {
66870 +               /* "no access rules" - force any changes of time etc. */
66871 +               attr->ia_valid |= ATTR_FORCE;
66872 +               /* and disable for editing some attributes */
66873 +               attr->ia_valid &= ~(ATTR_UID | ATTR_GID | ATTR_MODE);
66874 +               ia_valid = attr->ia_valid;
66875 +       }
66877 +       err = setattr_prepare(mnt_userns, dentry, attr);
66878 +       if (err)
66879 +               goto out;
66881 +       if (ia_valid & ATTR_SIZE) {
66882 +               loff_t oldsize = inode->i_size;
66884 +               if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
66885 +                       /* should never be here, see ntfs_file_open*/
66886 +                       err = -EOPNOTSUPP;
66887 +                       goto out;
66888 +               }
66889 +               inode_dio_wait(inode);
66891 +               if (attr->ia_size < oldsize)
66892 +                       err = ntfs_truncate(inode, attr->ia_size);
66893 +               else if (attr->ia_size > oldsize)
66894 +                       err = ntfs_extend(inode, attr->ia_size, 0, NULL);
66896 +               if (err)
66897 +                       goto out;
66899 +               ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
66900 +       }
66902 +       setattr_copy(mnt_userns, inode, attr);
66904 +       if (mode != inode->i_mode) {
66905 +               err = ntfs_acl_chmod(mnt_userns, inode);
66906 +               if (err)
66907 +                       goto out;
66909 +               /* linux 'w' -> windows 'ro' */
66910 +               if (0222 & inode->i_mode)
66911 +                       ni->std_fa &= ~FILE_ATTRIBUTE_READONLY;
66912 +               else
66913 +                       ni->std_fa |= FILE_ATTRIBUTE_READONLY;
66914 +       }
66916 +       mark_inode_dirty(inode);
66917 +out:
66918 +       return err;
66921 +static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
66923 +       ssize_t err;
66924 +       size_t count = iov_iter_count(iter);
66925 +       struct file *file = iocb->ki_filp;
66926 +       struct inode *inode = file->f_mapping->host;
66927 +       struct ntfs_inode *ni = ntfs_i(inode);
66929 +       if (is_encrypted(ni)) {
66930 +               ntfs_inode_warn(inode, "encrypted i/o not supported");
66931 +               return -EOPNOTSUPP;
66932 +       }
66934 +       if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
66935 +               ntfs_inode_warn(inode, "direct i/o + compressed not supported");
66936 +               return -EOPNOTSUPP;
66937 +       }
66939 +#ifndef CONFIG_NTFS3_LZX_XPRESS
66940 +       if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
66941 +               ntfs_inode_warn(
66942 +                       inode,
66943 +                       "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files");
66944 +               return -EOPNOTSUPP;
66945 +       }
66946 +#endif
66948 +       if (is_dedup(ni)) {
66949 +               ntfs_inode_warn(inode, "read deduplicated not supported");
66950 +               return -EOPNOTSUPP;
66951 +       }
66953 +       err = count ? generic_file_read_iter(iocb, iter) : 0;
66955 +       return err;
66958 +/* returns array of locked pages */
66959 +static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index,
66960 +                               struct page **pages, u32 pages_per_frame,
66961 +                               bool *frame_uptodate)
66963 +       gfp_t gfp_mask = mapping_gfp_mask(mapping);
66964 +       u32 npages;
66966 +       *frame_uptodate = true;
66968 +       for (npages = 0; npages < pages_per_frame; npages++, index++) {
66969 +               struct page *page;
66971 +               page = find_or_create_page(mapping, index, gfp_mask);
66972 +               if (!page) {
66973 +                       while (npages--) {
66974 +                               page = pages[npages];
66975 +                               unlock_page(page);
66976 +                               put_page(page);
66977 +                       }
66979 +                       return -ENOMEM;
66980 +               }
66982 +               if (!PageUptodate(page))
66983 +                       *frame_uptodate = false;
66985 +               pages[npages] = page;
66986 +       }
66988 +       return 0;
66991 +/*helper for ntfs_file_write_iter (compressed files)*/
66992 +static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
66994 +       int err;
66995 +       struct file *file = iocb->ki_filp;
66996 +       size_t count = iov_iter_count(from);
66997 +       loff_t pos = iocb->ki_pos;
66998 +       struct inode *inode = file_inode(file);
66999 +       loff_t i_size = inode->i_size;
67000 +       struct address_space *mapping = inode->i_mapping;
67001 +       struct ntfs_inode *ni = ntfs_i(inode);
67002 +       u64 valid = ni->i_valid;
67003 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
67004 +       struct page *page, **pages = NULL;
67005 +       size_t written = 0;
67006 +       u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
67007 +       u32 frame_size = 1u << frame_bits;
67008 +       u32 pages_per_frame = frame_size >> PAGE_SHIFT;
67009 +       u32 ip, off;
67010 +       CLST frame;
67011 +       u64 frame_vbo;
67012 +       pgoff_t index;
67013 +       bool frame_uptodate;
67015 +       if (frame_size < PAGE_SIZE) {
67016 +               /*
67017 +                * frame_size == 8K if cluster 512
67018 +                * frame_size == 64K if cluster 4096
67019 +                */
67020 +               ntfs_inode_warn(inode, "page size is bigger than frame size");
67021 +               return -EOPNOTSUPP;
67022 +       }
67024 +       pages = ntfs_malloc(pages_per_frame * sizeof(struct page *));
67025 +       if (!pages)
67026 +               return -ENOMEM;
67028 +       current->backing_dev_info = inode_to_bdi(inode);
67029 +       err = file_remove_privs(file);
67030 +       if (err)
67031 +               goto out;
67033 +       err = file_update_time(file);
67034 +       if (err)
67035 +               goto out;
67037 +       /* zero range [valid : pos) */
67038 +       while (valid < pos) {
67039 +               CLST lcn, clen;
67041 +               frame = valid >> frame_bits;
67042 +               frame_vbo = valid & ~(frame_size - 1);
67043 +               off = valid & (frame_size - 1);
67045 +               err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 0, &lcn,
67046 +                                         &clen, NULL);
67047 +               if (err)
67048 +                       goto out;
67050 +               if (lcn == SPARSE_LCN) {
67051 +                       ni->i_valid = valid =
67052 +                               frame_vbo + ((u64)clen << sbi->cluster_bits);
67053 +                       continue;
67054 +               }
67056 +               /* Load full frame */
67057 +               err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT,
67058 +                                          pages, pages_per_frame,
67059 +                                          &frame_uptodate);
67060 +               if (err)
67061 +                       goto out;
67063 +               if (!frame_uptodate && off) {
67064 +                       err = ni_read_frame(ni, frame_vbo, pages,
67065 +                                           pages_per_frame);
67066 +                       if (err) {
67067 +                               for (ip = 0; ip < pages_per_frame; ip++) {
67068 +                                       page = pages[ip];
67069 +                                       unlock_page(page);
67070 +                                       put_page(page);
67071 +                               }
67072 +                               goto out;
67073 +                       }
67074 +               }
67076 +               ip = off >> PAGE_SHIFT;
67077 +               off = offset_in_page(valid);
67078 +               for (; ip < pages_per_frame; ip++, off = 0) {
67079 +                       page = pages[ip];
67080 +                       zero_user_segment(page, off, PAGE_SIZE);
67081 +                       flush_dcache_page(page);
67082 +                       SetPageUptodate(page);
67083 +               }
67085 +               ni_lock(ni);
67086 +               err = ni_write_frame(ni, pages, pages_per_frame);
67087 +               ni_unlock(ni);
67089 +               for (ip = 0; ip < pages_per_frame; ip++) {
67090 +                       page = pages[ip];
67091 +                       SetPageUptodate(page);
67092 +                       unlock_page(page);
67093 +                       put_page(page);
67094 +               }
67096 +               if (err)
67097 +                       goto out;
67099 +               ni->i_valid = valid = frame_vbo + frame_size;
67100 +       }
67102 +       /* copy user data [pos : pos + count) */
67103 +       while (count) {
67104 +               size_t copied, bytes;
67106 +               off = pos & (frame_size - 1);
67107 +               bytes = frame_size - off;
67108 +               if (bytes > count)
67109 +                       bytes = count;
67111 +               frame = pos >> frame_bits;
67112 +               frame_vbo = pos & ~(frame_size - 1);
67113 +               index = frame_vbo >> PAGE_SHIFT;
67115 +               if (unlikely(iov_iter_fault_in_readable(from, bytes))) {
67116 +                       err = -EFAULT;
67117 +                       goto out;
67118 +               }
67120 +               /* Load full frame */
67121 +               err = ntfs_get_frame_pages(mapping, index, pages,
67122 +                                          pages_per_frame, &frame_uptodate);
67123 +               if (err)
67124 +                       goto out;
67126 +               if (!frame_uptodate) {
67127 +                       loff_t to = pos + bytes;
67129 +                       if (off || (to < i_size && (to & (frame_size - 1)))) {
67130 +                               err = ni_read_frame(ni, frame_vbo, pages,
67131 +                                                   pages_per_frame);
67132 +                               if (err) {
67133 +                                       for (ip = 0; ip < pages_per_frame;
67134 +                                            ip++) {
67135 +                                               page = pages[ip];
67136 +                                               unlock_page(page);
67137 +                                               put_page(page);
67138 +                                       }
67139 +                                       goto out;
67140 +                               }
67141 +                       }
67142 +               }
67144 +               WARN_ON(!bytes);
67145 +               copied = 0;
67146 +               ip = off >> PAGE_SHIFT;
67147 +               off = offset_in_page(pos);
67149 +               /* copy user data to pages */
67150 +               for (;;) {
67151 +                       size_t cp, tail = PAGE_SIZE - off;
67153 +                       page = pages[ip];
67154 +                       cp = iov_iter_copy_from_user_atomic(page, from, off,
67155 +                                                           min(tail, bytes));
67156 +                       flush_dcache_page(page);
67157 +                       iov_iter_advance(from, cp);
67158 +                       copied += cp;
67159 +                       bytes -= cp;
67160 +                       if (!bytes || !cp)
67161 +                               break;
67163 +                       if (cp < tail) {
67164 +                               off += cp;
67165 +                       } else {
67166 +                               ip++;
67167 +                               off = 0;
67168 +                       }
67169 +               }
67171 +               ni_lock(ni);
67172 +               err = ni_write_frame(ni, pages, pages_per_frame);
67173 +               ni_unlock(ni);
67175 +               for (ip = 0; ip < pages_per_frame; ip++) {
67176 +                       page = pages[ip];
67177 +                       ClearPageDirty(page);
67178 +                       SetPageUptodate(page);
67179 +                       unlock_page(page);
67180 +                       put_page(page);
67181 +               }
67183 +               if (err)
67184 +                       goto out;
67186 +               /*
67187 +                * We can loop for a long time in here. Be nice and allow
67188 +                * us to schedule out to avoid softlocking if preempt
67189 +                * is disabled.
67190 +                */
67191 +               cond_resched();
67193 +               pos += copied;
67194 +               written += copied;
67196 +               count = iov_iter_count(from);
67197 +       }
67199 +out:
67200 +       ntfs_free(pages);
67202 +       current->backing_dev_info = NULL;
67204 +       if (err < 0)
67205 +               return err;
67207 +       iocb->ki_pos += written;
67208 +       if (iocb->ki_pos > ni->i_valid)
67209 +               ni->i_valid = iocb->ki_pos;
67211 +       return written;
67215 + * file_operations::write_iter
67216 + */
67217 +static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
67219 +       struct file *file = iocb->ki_filp;
67220 +       struct address_space *mapping = file->f_mapping;
67221 +       struct inode *inode = mapping->host;
67222 +       ssize_t ret;
67223 +       struct ntfs_inode *ni = ntfs_i(inode);
67225 +       if (is_encrypted(ni)) {
67226 +               ntfs_inode_warn(inode, "encrypted i/o not supported");
67227 +               return -EOPNOTSUPP;
67228 +       }
67230 +       if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
67231 +               ntfs_inode_warn(inode, "direct i/o + compressed not supported");
67232 +               return -EOPNOTSUPP;
67233 +       }
67235 +       if (is_dedup(ni)) {
67236 +               ntfs_inode_warn(inode, "write into deduplicated not supported");
67237 +               return -EOPNOTSUPP;
67238 +       }
67240 +       if (!inode_trylock(inode)) {
67241 +               if (iocb->ki_flags & IOCB_NOWAIT)
67242 +                       return -EAGAIN;
67243 +               inode_lock(inode);
67244 +       }
67246 +       ret = generic_write_checks(iocb, from);
67247 +       if (ret <= 0)
67248 +               goto out;
67250 +       if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
67251 +               /* should never be here, see ntfs_file_open*/
67252 +               ret = -EOPNOTSUPP;
67253 +               goto out;
67254 +       }
67256 +       ret = ntfs_extend(inode, iocb->ki_pos, ret, file);
67257 +       if (ret)
67258 +               goto out;
67260 +       ret = is_compressed(ni) ? ntfs_compress_write(iocb, from)
67261 +                               : __generic_file_write_iter(iocb, from);
67263 +out:
67264 +       inode_unlock(inode);
67266 +       if (ret > 0)
67267 +               ret = generic_write_sync(iocb, ret);
67269 +       return ret;
67273 + * file_operations::open
67274 + */
67275 +int ntfs_file_open(struct inode *inode, struct file *file)
67277 +       struct ntfs_inode *ni = ntfs_i(inode);
67279 +       if (unlikely((is_compressed(ni) || is_encrypted(ni)) &&
67280 +                    (file->f_flags & O_DIRECT))) {
67281 +               return -EOPNOTSUPP;
67282 +       }
67284 +       /* Decompress "external compressed" file if opened for rw */
67285 +       if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) &&
67286 +           (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) {
67287 +#ifdef CONFIG_NTFS3_LZX_XPRESS
67288 +               int err = ni_decompress_file(ni);
67290 +               if (err)
67291 +                       return err;
67292 +#else
67293 +               ntfs_inode_warn(
67294 +                       inode,
67295 +                       "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files");
67296 +               return -EOPNOTSUPP;
67297 +#endif
67298 +       }
67300 +       return generic_file_open(inode, file);
67304 + * file_operations::release
67305 + */
67306 +static int ntfs_file_release(struct inode *inode, struct file *file)
67308 +       struct ntfs_inode *ni = ntfs_i(inode);
67309 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
67310 +       int err = 0;
67312 +       /* if we are the last writer on the inode, drop the block reservation */
67313 +       if (sbi->options.prealloc && ((file->f_mode & FMODE_WRITE) &&
67314 +                                     atomic_read(&inode->i_writecount) == 1)) {
67315 +               ni_lock(ni);
67316 +               down_write(&ni->file.run_lock);
67318 +               err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
67319 +                                   inode->i_size, &ni->i_valid, false, NULL);
67321 +               up_write(&ni->file.run_lock);
67322 +               ni_unlock(ni);
67323 +       }
67324 +       return err;
67327 +/* file_operations::fiemap */
67328 +int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
67329 +               __u64 start, __u64 len)
67331 +       int err;
67332 +       struct ntfs_inode *ni = ntfs_i(inode);
67334 +       if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
67335 +               return -EOPNOTSUPP;
67337 +       ni_lock(ni);
67339 +       err = ni_fiemap(ni, fieinfo, start, len);
67341 +       ni_unlock(ni);
67343 +       return err;
67346 +const struct inode_operations ntfs_file_inode_operations = {
67347 +       .getattr = ntfs_getattr,
67348 +       .setattr = ntfs3_setattr,
67349 +       .listxattr = ntfs_listxattr,
67350 +       .permission = ntfs_permission,
67351 +       .get_acl = ntfs_get_acl,
67352 +       .set_acl = ntfs_set_acl,
67353 +       .fiemap = ntfs_fiemap,
67356 +const struct file_operations ntfs_file_operations = {
67357 +       .llseek = generic_file_llseek,
67358 +       .read_iter = ntfs_file_read_iter,
67359 +       .write_iter = ntfs_file_write_iter,
67360 +       .unlocked_ioctl = ntfs_ioctl,
67361 +#ifdef CONFIG_COMPAT
67362 +       .compat_ioctl = ntfs_compat_ioctl,
67363 +#endif
67364 +       .splice_read = generic_file_splice_read,
67365 +       .mmap = ntfs_file_mmap,
67366 +       .open = ntfs_file_open,
67367 +       .fsync = generic_file_fsync,
67368 +       .splice_write = iter_file_splice_write,
67369 +       .fallocate = ntfs_fallocate,
67370 +       .release = ntfs_file_release,
67372 diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
67373 new file mode 100644
67374 index 000000000000..c3121bf9c62f
67375 --- /dev/null
67376 +++ b/fs/ntfs3/frecord.c
67377 @@ -0,0 +1,3071 @@
67378 +// SPDX-License-Identifier: GPL-2.0
67380 + *
67381 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
67382 + *
67383 + */
67385 +#include <linux/blkdev.h>
67386 +#include <linux/buffer_head.h>
67387 +#include <linux/fiemap.h>
67388 +#include <linux/fs.h>
67389 +#include <linux/nls.h>
67390 +#include <linux/vmalloc.h>
67392 +#include "debug.h"
67393 +#include "ntfs.h"
67394 +#include "ntfs_fs.h"
67395 +#ifdef CONFIG_NTFS3_LZX_XPRESS
67396 +#include "lib/lib.h"
67397 +#endif
67399 +static struct mft_inode *ni_ins_mi(struct ntfs_inode *ni, struct rb_root *tree,
67400 +                                  CLST ino, struct rb_node *ins)
67402 +       struct rb_node **p = &tree->rb_node;
67403 +       struct rb_node *pr = NULL;
67405 +       while (*p) {
67406 +               struct mft_inode *mi;
67408 +               pr = *p;
67409 +               mi = rb_entry(pr, struct mft_inode, node);
67410 +               if (mi->rno > ino)
67411 +                       p = &pr->rb_left;
67412 +               else if (mi->rno < ino)
67413 +                       p = &pr->rb_right;
67414 +               else
67415 +                       return mi;
67416 +       }
67418 +       if (!ins)
67419 +               return NULL;
67421 +       rb_link_node(ins, pr, p);
67422 +       rb_insert_color(ins, tree);
67423 +       return rb_entry(ins, struct mft_inode, node);
67427 + * ni_find_mi
67428 + *
67429 + * finds mft_inode by record number
67430 + */
67431 +static struct mft_inode *ni_find_mi(struct ntfs_inode *ni, CLST rno)
67433 +       return ni_ins_mi(ni, &ni->mi_tree, rno, NULL);
67437 + * ni_add_mi
67438 + *
67439 + * adds new mft_inode into ntfs_inode
67440 + */
67441 +static void ni_add_mi(struct ntfs_inode *ni, struct mft_inode *mi)
67443 +       ni_ins_mi(ni, &ni->mi_tree, mi->rno, &mi->node);
67447 + * ni_remove_mi
67448 + *
67449 + * removes mft_inode from ntfs_inode
67450 + */
67451 +void ni_remove_mi(struct ntfs_inode *ni, struct mft_inode *mi)
67453 +       rb_erase(&mi->node, &ni->mi_tree);
67457 + * ni_std
67458 + *
67459 + * returns pointer into std_info from primary record
67460 + */
67461 +struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni)
67463 +       const struct ATTRIB *attr;
67465 +       attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
67466 +       return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO))
67467 +                   : NULL;
67471 + * ni_std5
67472 + *
67473 + * returns pointer into std_info from primary record
67474 + */
67475 +struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni)
67477 +       const struct ATTRIB *attr;
67479 +       attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
67481 +       return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO5))
67482 +                   : NULL;
67486 + * ni_clear
67487 + *
67488 + * clears resources allocated by ntfs_inode
67489 + */
67490 +void ni_clear(struct ntfs_inode *ni)
67492 +       struct rb_node *node;
67494 +       if (!ni->vfs_inode.i_nlink && is_rec_inuse(ni->mi.mrec))
67495 +               ni_delete_all(ni);
67497 +       al_destroy(ni);
67499 +       for (node = rb_first(&ni->mi_tree); node;) {
67500 +               struct rb_node *next = rb_next(node);
67501 +               struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
67503 +               rb_erase(node, &ni->mi_tree);
67504 +               mi_put(mi);
67505 +               node = next;
67506 +       }
67508 +       /* bad inode always has mode == S_IFREG */
67509 +       if (ni->ni_flags & NI_FLAG_DIR)
67510 +               indx_clear(&ni->dir);
67511 +       else {
67512 +               run_close(&ni->file.run);
67513 +#ifdef CONFIG_NTFS3_LZX_XPRESS
67514 +               if (ni->file.offs_page) {
67515 +                       /* on-demand allocated page for offsets */
67516 +                       put_page(ni->file.offs_page);
67517 +                       ni->file.offs_page = NULL;
67518 +               }
67519 +#endif
67520 +       }
67522 +       mi_clear(&ni->mi);
67526 + * ni_load_mi_ex
67527 + *
67528 + * finds mft_inode by record number.
67529 + */
67530 +int ni_load_mi_ex(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
67532 +       int err;
67533 +       struct mft_inode *r;
67535 +       r = ni_find_mi(ni, rno);
67536 +       if (r)
67537 +               goto out;
67539 +       err = mi_get(ni->mi.sbi, rno, &r);
67540 +       if (err)
67541 +               return err;
67543 +       ni_add_mi(ni, r);
67545 +out:
67546 +       if (mi)
67547 +               *mi = r;
67548 +       return 0;
67552 + * ni_load_mi
67553 + *
67554 + * load mft_inode corresponded list_entry
67555 + */
67556 +int ni_load_mi(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
67557 +              struct mft_inode **mi)
67559 +       CLST rno;
67561 +       if (!le) {
67562 +               *mi = &ni->mi;
67563 +               return 0;
67564 +       }
67566 +       rno = ino_get(&le->ref);
67567 +       if (rno == ni->mi.rno) {
67568 +               *mi = &ni->mi;
67569 +               return 0;
67570 +       }
67571 +       return ni_load_mi_ex(ni, rno, mi);
67575 + * ni_find_attr
67576 + *
67577 + * returns attribute and record this attribute belongs to
67578 + */
67579 +struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
67580 +                           struct ATTR_LIST_ENTRY **le_o, enum ATTR_TYPE type,
67581 +                           const __le16 *name, u8 name_len, const CLST *vcn,
67582 +                           struct mft_inode **mi)
67584 +       struct ATTR_LIST_ENTRY *le;
67585 +       struct mft_inode *m;
67587 +       if (!ni->attr_list.size ||
67588 +           (!name_len && (type == ATTR_LIST || type == ATTR_STD))) {
67589 +               if (le_o)
67590 +                       *le_o = NULL;
67591 +               if (mi)
67592 +                       *mi = &ni->mi;
67594 +               /* Look for required attribute in primary record */
67595 +               return mi_find_attr(&ni->mi, attr, type, name, name_len, NULL);
67596 +       }
67598 +       /* first look for list entry of required type */
67599 +       le = al_find_ex(ni, le_o ? *le_o : NULL, type, name, name_len, vcn);
67600 +       if (!le)
67601 +               return NULL;
67603 +       if (le_o)
67604 +               *le_o = le;
67606 +       /* Load record that contains this attribute */
67607 +       if (ni_load_mi(ni, le, &m))
67608 +               return NULL;
67610 +       /* Look for required attribute */
67611 +       attr = mi_find_attr(m, NULL, type, name, name_len, &le->id);
67613 +       if (!attr)
67614 +               goto out;
67616 +       if (!attr->non_res) {
67617 +               if (vcn && *vcn)
67618 +                       goto out;
67619 +       } else if (!vcn) {
67620 +               if (attr->nres.svcn)
67621 +                       goto out;
67622 +       } else if (le64_to_cpu(attr->nres.svcn) > *vcn ||
67623 +                  *vcn > le64_to_cpu(attr->nres.evcn)) {
67624 +               goto out;
67625 +       }
67627 +       if (mi)
67628 +               *mi = m;
67629 +       return attr;
67631 +out:
67632 +       ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
67633 +       return NULL;
67637 + * ni_enum_attr_ex
67638 + *
67639 + * enumerates attributes in ntfs_inode
67640 + */
67641 +struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
67642 +                              struct ATTR_LIST_ENTRY **le,
67643 +                              struct mft_inode **mi)
67645 +       struct mft_inode *mi2;
67646 +       struct ATTR_LIST_ENTRY *le2;
67648 +       /* Do we have an attribute list? */
67649 +       if (!ni->attr_list.size) {
67650 +               *le = NULL;
67651 +               if (mi)
67652 +                       *mi = &ni->mi;
67653 +               /* Enum attributes in primary record */
67654 +               return mi_enum_attr(&ni->mi, attr);
67655 +       }
67657 +       /* get next list entry */
67658 +       le2 = *le = al_enumerate(ni, attr ? *le : NULL);
67659 +       if (!le2)
67660 +               return NULL;
67662 +       /* Load record that contains the required attribute */
67663 +       if (ni_load_mi(ni, le2, &mi2))
67664 +               return NULL;
67666 +       if (mi)
67667 +               *mi = mi2;
67669 +       /* Find attribute in loaded record */
67670 +       return rec_find_attr_le(mi2, le2);
67674 + * ni_load_attr
67675 + *
67676 + * loads attribute that contains given vcn
67677 + */
67678 +struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
67679 +                           const __le16 *name, u8 name_len, CLST vcn,
67680 +                           struct mft_inode **pmi)
67682 +       struct ATTR_LIST_ENTRY *le;
67683 +       struct ATTRIB *attr;
67684 +       struct mft_inode *mi;
67685 +       struct ATTR_LIST_ENTRY *next;
67687 +       if (!ni->attr_list.size) {
67688 +               if (pmi)
67689 +                       *pmi = &ni->mi;
67690 +               return mi_find_attr(&ni->mi, NULL, type, name, name_len, NULL);
67691 +       }
67693 +       le = al_find_ex(ni, NULL, type, name, name_len, NULL);
67694 +       if (!le)
67695 +               return NULL;
67697 +       /*
67698 +        * Unfortunately ATTR_LIST_ENTRY contains only start vcn
67699 +        * So to find the ATTRIB segment that contains 'vcn' we should
67700 +        * enumerate some entries
67701 +        */
67702 +       if (vcn) {
67703 +               for (;; le = next) {
67704 +                       next = al_find_ex(ni, le, type, name, name_len, NULL);
67705 +                       if (!next || le64_to_cpu(next->vcn) > vcn)
67706 +                               break;
67707 +               }
67708 +       }
67710 +       if (ni_load_mi(ni, le, &mi))
67711 +               return NULL;
67713 +       if (pmi)
67714 +               *pmi = mi;
67716 +       attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
67717 +       if (!attr)
67718 +               return NULL;
67720 +       if (!attr->non_res)
67721 +               return attr;
67723 +       if (le64_to_cpu(attr->nres.svcn) <= vcn &&
67724 +           vcn <= le64_to_cpu(attr->nres.evcn))
67725 +               return attr;
67727 +       return NULL;
67731 + * ni_load_all_mi
67732 + *
67733 + * loads all subrecords
67734 + */
67735 +int ni_load_all_mi(struct ntfs_inode *ni)
67737 +       int err;
67738 +       struct ATTR_LIST_ENTRY *le;
67740 +       if (!ni->attr_list.size)
67741 +               return 0;
67743 +       le = NULL;
67745 +       while ((le = al_enumerate(ni, le))) {
67746 +               CLST rno = ino_get(&le->ref);
67748 +               if (rno == ni->mi.rno)
67749 +                       continue;
67751 +               err = ni_load_mi_ex(ni, rno, NULL);
67752 +               if (err)
67753 +                       return err;
67754 +       }
67756 +       return 0;
67760 + * ni_add_subrecord
67761 + *
67762 + * allocate + format + attach a new subrecord
67763 + */
67764 +bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
67766 +       struct mft_inode *m;
67768 +       m = ntfs_zalloc(sizeof(struct mft_inode));
67769 +       if (!m)
67770 +               return false;
67772 +       if (mi_format_new(m, ni->mi.sbi, rno, 0, ni->mi.rno == MFT_REC_MFT)) {
67773 +               mi_put(m);
67774 +               return false;
67775 +       }
67777 +       mi_get_ref(&ni->mi, &m->mrec->parent_ref);
67779 +       ni_add_mi(ni, m);
67780 +       *mi = m;
67781 +       return true;
67785 + * ni_remove_attr
67786 + *
67787 + * removes all attributes for the given type/name/id
67788 + */
67789 +int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
67790 +                  const __le16 *name, size_t name_len, bool base_only,
67791 +                  const __le16 *id)
67793 +       int err;
67794 +       struct ATTRIB *attr;
67795 +       struct ATTR_LIST_ENTRY *le;
67796 +       struct mft_inode *mi;
67797 +       u32 type_in;
67798 +       int diff;
67800 +       if (base_only || type == ATTR_LIST || !ni->attr_list.size) {
67801 +               attr = mi_find_attr(&ni->mi, NULL, type, name, name_len, id);
67802 +               if (!attr)
67803 +                       return -ENOENT;
67805 +               mi_remove_attr(&ni->mi, attr);
67806 +               return 0;
67807 +       }
67809 +       type_in = le32_to_cpu(type);
67810 +       le = NULL;
67812 +       for (;;) {
67813 +               le = al_enumerate(ni, le);
67814 +               if (!le)
67815 +                       return 0;
67817 +next_le2:
67818 +               diff = le32_to_cpu(le->type) - type_in;
67819 +               if (diff < 0)
67820 +                       continue;
67822 +               if (diff > 0)
67823 +                       return 0;
67825 +               if (le->name_len != name_len)
67826 +                       continue;
67828 +               if (name_len &&
67829 +                   memcmp(le_name(le), name, name_len * sizeof(short)))
67830 +                       continue;
67832 +               if (id && le->id != *id)
67833 +                       continue;
67834 +               err = ni_load_mi(ni, le, &mi);
67835 +               if (err)
67836 +                       return err;
67838 +               al_remove_le(ni, le);
67840 +               attr = mi_find_attr(mi, NULL, type, name, name_len, id);
67841 +               if (!attr)
67842 +                       return -ENOENT;
67844 +               mi_remove_attr(mi, attr);
67846 +               if (PtrOffset(ni->attr_list.le, le) >= ni->attr_list.size)
67847 +                       return 0;
67848 +               goto next_le2;
67849 +       }
67853 + * ni_ins_new_attr
67854 + *
67855 + * inserts the attribute into record
67856 + * Returns not full constructed attribute or NULL if not possible to create
67857 + */
67858 +static struct ATTRIB *ni_ins_new_attr(struct ntfs_inode *ni,
67859 +                                     struct mft_inode *mi,
67860 +                                     struct ATTR_LIST_ENTRY *le,
67861 +                                     enum ATTR_TYPE type, const __le16 *name,
67862 +                                     u8 name_len, u32 asize, u16 name_off,
67863 +                                     CLST svcn)
67865 +       int err;
67866 +       struct ATTRIB *attr;
67867 +       bool le_added = false;
67868 +       struct MFT_REF ref;
67870 +       mi_get_ref(mi, &ref);
67872 +       if (type != ATTR_LIST && !le && ni->attr_list.size) {
67873 +               err = al_add_le(ni, type, name, name_len, svcn, cpu_to_le16(-1),
67874 +                               &ref, &le);
67875 +               if (err) {
67876 +                       /* no memory or no space */
67877 +                       return NULL;
67878 +               }
67879 +               le_added = true;
67881 +               /*
67882 +                * al_add_le -> attr_set_size (list) -> ni_expand_list
67883 +                * which moves some attributes out of primary record
67884 +                * this means that name may point into moved memory
67885 +                * reinit 'name' from le
67886 +                */
67887 +               name = le->name;
67888 +       }
67890 +       attr = mi_insert_attr(mi, type, name, name_len, asize, name_off);
67891 +       if (!attr) {
67892 +               if (le_added)
67893 +                       al_remove_le(ni, le);
67894 +               return NULL;
67895 +       }
67897 +       if (type == ATTR_LIST) {
67898 +               /*attr list is not in list entry array*/
67899 +               goto out;
67900 +       }
67902 +       if (!le)
67903 +               goto out;
67905 +       /* Update ATTRIB Id and record reference */
67906 +       le->id = attr->id;
67907 +       ni->attr_list.dirty = true;
67908 +       le->ref = ref;
67910 +out:
67911 +       return attr;
67915 + * random write access to sparsed or compressed file may result to
67916 + * not optimized packed runs.
67917 + * Here it is the place to optimize it
67918 + */
67919 +static int ni_repack(struct ntfs_inode *ni)
67921 +       int err = 0;
67922 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
67923 +       struct mft_inode *mi, *mi_p = NULL;
67924 +       struct ATTRIB *attr = NULL, *attr_p;
67925 +       struct ATTR_LIST_ENTRY *le = NULL, *le_p;
67926 +       CLST alloc = 0;
67927 +       u8 cluster_bits = sbi->cluster_bits;
67928 +       CLST svcn, evcn = 0, svcn_p, evcn_p, next_svcn;
67929 +       u32 roff, rs = sbi->record_size;
67930 +       struct runs_tree run;
67932 +       run_init(&run);
67934 +       while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi))) {
67935 +               if (!attr->non_res)
67936 +                       continue;
67938 +               svcn = le64_to_cpu(attr->nres.svcn);
67939 +               if (svcn != le64_to_cpu(le->vcn)) {
67940 +                       err = -EINVAL;
67941 +                       break;
67942 +               }
67944 +               if (!svcn) {
67945 +                       alloc = le64_to_cpu(attr->nres.alloc_size) >>
67946 +                               cluster_bits;
67947 +                       mi_p = NULL;
67948 +               } else if (svcn != evcn + 1) {
67949 +                       err = -EINVAL;
67950 +                       break;
67951 +               }
67953 +               evcn = le64_to_cpu(attr->nres.evcn);
67955 +               if (svcn > evcn + 1) {
67956 +                       err = -EINVAL;
67957 +                       break;
67958 +               }
67960 +               if (!mi_p) {
67961 +                       /* do not try if too little free space */
67962 +                       if (le32_to_cpu(mi->mrec->used) + 8 >= rs)
67963 +                               continue;
67965 +                       /* do not try if last attribute segment */
67966 +                       if (evcn + 1 == alloc)
67967 +                               continue;
67968 +                       run_close(&run);
67969 +               }
67971 +               roff = le16_to_cpu(attr->nres.run_off);
67972 +               err = run_unpack(&run, sbi, ni->mi.rno, svcn, evcn, svcn,
67973 +                                Add2Ptr(attr, roff),
67974 +                                le32_to_cpu(attr->size) - roff);
67975 +               if (err < 0)
67976 +                       break;
67978 +               if (!mi_p) {
67979 +                       mi_p = mi;
67980 +                       attr_p = attr;
67981 +                       svcn_p = svcn;
67982 +                       evcn_p = evcn;
67983 +                       le_p = le;
67984 +                       err = 0;
67985 +                       continue;
67986 +               }
67988 +               /*
67989 +                * run contains data from two records: mi_p and mi
67990 +                * try to pack in one
67991 +                */
67992 +               err = mi_pack_runs(mi_p, attr_p, &run, evcn + 1 - svcn_p);
67993 +               if (err)
67994 +                       break;
67996 +               next_svcn = le64_to_cpu(attr_p->nres.evcn) + 1;
67998 +               if (next_svcn >= evcn + 1) {
67999 +                       /* we can remove this attribute segment */
68000 +                       al_remove_le(ni, le);
68001 +                       mi_remove_attr(mi, attr);
68002 +                       le = le_p;
68003 +                       continue;
68004 +               }
68006 +               attr->nres.svcn = le->vcn = cpu_to_le64(next_svcn);
68007 +               mi->dirty = true;
68008 +               ni->attr_list.dirty = true;
68010 +               if (evcn + 1 == alloc) {
68011 +                       err = mi_pack_runs(mi, attr, &run,
68012 +                                          evcn + 1 - next_svcn);
68013 +                       if (err)
68014 +                               break;
68015 +                       mi_p = NULL;
68016 +               } else {
68017 +                       mi_p = mi;
68018 +                       attr_p = attr;
68019 +                       svcn_p = next_svcn;
68020 +                       evcn_p = evcn;
68021 +                       le_p = le;
68022 +                       run_truncate_head(&run, next_svcn);
68023 +               }
68024 +       }
68026 +       if (err) {
68027 +               ntfs_inode_warn(&ni->vfs_inode, "repack problem");
68028 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
68030 +               /* Pack loaded but not packed runs */
68031 +               if (mi_p)
68032 +                       mi_pack_runs(mi_p, attr_p, &run, evcn_p + 1 - svcn_p);
68033 +       }
68035 +       run_close(&run);
68036 +       return err;
68040 + * ni_try_remove_attr_list
68041 + *
68042 + * Can we remove attribute list?
68043 + * Check the case when primary record contains enough space for all attributes
68044 + */
68045 +static int ni_try_remove_attr_list(struct ntfs_inode *ni)
68047 +       int err = 0;
68048 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68049 +       struct ATTRIB *attr, *attr_list, *attr_ins;
68050 +       struct ATTR_LIST_ENTRY *le;
68051 +       struct mft_inode *mi;
68052 +       u32 asize, free;
68053 +       struct MFT_REF ref;
68054 +       __le16 id;
68056 +       if (!ni->attr_list.dirty)
68057 +               return 0;
68059 +       err = ni_repack(ni);
68060 +       if (err)
68061 +               return err;
68063 +       attr_list = mi_find_attr(&ni->mi, NULL, ATTR_LIST, NULL, 0, NULL);
68064 +       if (!attr_list)
68065 +               return 0;
68067 +       asize = le32_to_cpu(attr_list->size);
68069 +       /* free space in primary record without attribute list */
68070 +       free = sbi->record_size - le32_to_cpu(ni->mi.mrec->used) + asize;
68071 +       mi_get_ref(&ni->mi, &ref);
68073 +       le = NULL;
68074 +       while ((le = al_enumerate(ni, le))) {
68075 +               if (!memcmp(&le->ref, &ref, sizeof(ref)))
68076 +                       continue;
68078 +               if (le->vcn)
68079 +                       return 0;
68081 +               mi = ni_find_mi(ni, ino_get(&le->ref));
68082 +               if (!mi)
68083 +                       return 0;
68085 +               attr = mi_find_attr(mi, NULL, le->type, le_name(le),
68086 +                                   le->name_len, &le->id);
68087 +               if (!attr)
68088 +                       return 0;
68090 +               asize = le32_to_cpu(attr->size);
68091 +               if (asize > free)
68092 +                       return 0;
68094 +               free -= asize;
68095 +       }
68097 +       /* Is seems that attribute list can be removed from primary record */
68098 +       mi_remove_attr(&ni->mi, attr_list);
68100 +       /*
68101 +        * Repeat the cycle above and move all attributes to primary record.
68102 +        * It should be success!
68103 +        */
68104 +       le = NULL;
68105 +       while ((le = al_enumerate(ni, le))) {
68106 +               if (!memcmp(&le->ref, &ref, sizeof(ref)))
68107 +                       continue;
68109 +               mi = ni_find_mi(ni, ino_get(&le->ref));
68111 +               attr = mi_find_attr(mi, NULL, le->type, le_name(le),
68112 +                                   le->name_len, &le->id);
68113 +               asize = le32_to_cpu(attr->size);
68115 +               /* insert into primary record */
68116 +               attr_ins = mi_insert_attr(&ni->mi, le->type, le_name(le),
68117 +                                         le->name_len, asize,
68118 +                                         le16_to_cpu(attr->name_off));
68119 +               id = attr_ins->id;
68121 +               /* copy all except id */
68122 +               memcpy(attr_ins, attr, asize);
68123 +               attr_ins->id = id;
68125 +               /* remove from original record */
68126 +               mi_remove_attr(mi, attr);
68127 +       }
68129 +       run_deallocate(sbi, &ni->attr_list.run, true);
68130 +       run_close(&ni->attr_list.run);
68131 +       ni->attr_list.size = 0;
68132 +       ntfs_free(ni->attr_list.le);
68133 +       ni->attr_list.le = NULL;
68134 +       ni->attr_list.dirty = false;
68136 +       return 0;
68140 + * ni_create_attr_list
68141 + *
68142 + * generates an attribute list for this primary record
68143 + */
68144 +int ni_create_attr_list(struct ntfs_inode *ni)
68146 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68147 +       int err;
68148 +       u32 lsize;
68149 +       struct ATTRIB *attr;
68150 +       struct ATTRIB *arr_move[7];
68151 +       struct ATTR_LIST_ENTRY *le, *le_b[7];
68152 +       struct MFT_REC *rec;
68153 +       bool is_mft;
68154 +       CLST rno = 0;
68155 +       struct mft_inode *mi;
68156 +       u32 free_b, nb, to_free, rs;
68157 +       u16 sz;
68159 +       is_mft = ni->mi.rno == MFT_REC_MFT;
68160 +       rec = ni->mi.mrec;
68161 +       rs = sbi->record_size;
68163 +       /*
68164 +        * Skip estimating exact memory requirement
68165 +        * Looks like one record_size is always enough
68166 +        */
68167 +       le = ntfs_malloc(al_aligned(rs));
68168 +       if (!le) {
68169 +               err = -ENOMEM;
68170 +               goto out;
68171 +       }
68173 +       mi_get_ref(&ni->mi, &le->ref);
68174 +       ni->attr_list.le = le;
68176 +       attr = NULL;
68177 +       nb = 0;
68178 +       free_b = 0;
68179 +       attr = NULL;
68181 +       for (; (attr = mi_enum_attr(&ni->mi, attr)); le = Add2Ptr(le, sz)) {
68182 +               sz = le_size(attr->name_len);
68183 +               le->type = attr->type;
68184 +               le->size = cpu_to_le16(sz);
68185 +               le->name_len = attr->name_len;
68186 +               le->name_off = offsetof(struct ATTR_LIST_ENTRY, name);
68187 +               le->vcn = 0;
68188 +               if (le != ni->attr_list.le)
68189 +                       le->ref = ni->attr_list.le->ref;
68190 +               le->id = attr->id;
68192 +               if (attr->name_len)
68193 +                       memcpy(le->name, attr_name(attr),
68194 +                              sizeof(short) * attr->name_len);
68195 +               else if (attr->type == ATTR_STD)
68196 +                       continue;
68197 +               else if (attr->type == ATTR_LIST)
68198 +                       continue;
68199 +               else if (is_mft && attr->type == ATTR_DATA)
68200 +                       continue;
68202 +               if (!nb || nb < ARRAY_SIZE(arr_move)) {
68203 +                       le_b[nb] = le;
68204 +                       arr_move[nb++] = attr;
68205 +                       free_b += le32_to_cpu(attr->size);
68206 +               }
68207 +       }
68209 +       lsize = PtrOffset(ni->attr_list.le, le);
68210 +       ni->attr_list.size = lsize;
68212 +       to_free = le32_to_cpu(rec->used) + lsize + SIZEOF_RESIDENT;
68213 +       if (to_free <= rs) {
68214 +               to_free = 0;
68215 +       } else {
68216 +               to_free -= rs;
68218 +               if (to_free > free_b) {
68219 +                       err = -EINVAL;
68220 +                       goto out1;
68221 +               }
68222 +       }
68224 +       /* Allocate child mft. */
68225 +       err = ntfs_look_free_mft(sbi, &rno, is_mft, ni, &mi);
68226 +       if (err)
68227 +               goto out1;
68229 +       /* Call 'mi_remove_attr' in reverse order to keep pointers 'arr_move' valid */
68230 +       while (to_free > 0) {
68231 +               struct ATTRIB *b = arr_move[--nb];
68232 +               u32 asize = le32_to_cpu(b->size);
68233 +               u16 name_off = le16_to_cpu(b->name_off);
68235 +               attr = mi_insert_attr(mi, b->type, Add2Ptr(b, name_off),
68236 +                                     b->name_len, asize, name_off);
68237 +               WARN_ON(!attr);
68239 +               mi_get_ref(mi, &le_b[nb]->ref);
68240 +               le_b[nb]->id = attr->id;
68242 +               /* copy all except id */
68243 +               memcpy(attr, b, asize);
68244 +               attr->id = le_b[nb]->id;
68246 +               WARN_ON(!mi_remove_attr(&ni->mi, b));
68248 +               if (to_free <= asize)
68249 +                       break;
68250 +               to_free -= asize;
68251 +               WARN_ON(!nb);
68252 +       }
68254 +       attr = mi_insert_attr(&ni->mi, ATTR_LIST, NULL, 0,
68255 +                             lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT);
68256 +       WARN_ON(!attr);
68258 +       attr->non_res = 0;
68259 +       attr->flags = 0;
68260 +       attr->res.data_size = cpu_to_le32(lsize);
68261 +       attr->res.data_off = SIZEOF_RESIDENT_LE;
68262 +       attr->res.flags = 0;
68263 +       attr->res.res = 0;
68265 +       memcpy(resident_data_ex(attr, lsize), ni->attr_list.le, lsize);
68267 +       ni->attr_list.dirty = false;
68269 +       mark_inode_dirty(&ni->vfs_inode);
68270 +       goto out;
68272 +out1:
68273 +       ntfs_free(ni->attr_list.le);
68274 +       ni->attr_list.le = NULL;
68275 +       ni->attr_list.size = 0;
68277 +out:
68278 +       return err;
68282 + * ni_ins_attr_ext
68283 + *
68284 + * This method adds an external attribute to the ntfs_inode.
68285 + */
68286 +static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
68287 +                          enum ATTR_TYPE type, const __le16 *name, u8 name_len,
68288 +                          u32 asize, CLST svcn, u16 name_off, bool force_ext,
68289 +                          struct ATTRIB **ins_attr, struct mft_inode **ins_mi)
68291 +       struct ATTRIB *attr;
68292 +       struct mft_inode *mi;
68293 +       CLST rno;
68294 +       u64 vbo;
68295 +       struct rb_node *node;
68296 +       int err;
68297 +       bool is_mft, is_mft_data;
68298 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68300 +       is_mft = ni->mi.rno == MFT_REC_MFT;
68301 +       is_mft_data = is_mft && type == ATTR_DATA && !name_len;
68303 +       if (asize > sbi->max_bytes_per_attr) {
68304 +               err = -EINVAL;
68305 +               goto out;
68306 +       }
68308 +       /*
68309 +        * standard information and attr_list cannot be made external.
68310 +        * The Log File cannot have any external attributes
68311 +        */
68312 +       if (type == ATTR_STD || type == ATTR_LIST ||
68313 +           ni->mi.rno == MFT_REC_LOG) {
68314 +               err = -EINVAL;
68315 +               goto out;
68316 +       }
68318 +       /* Create attribute list if it is not already existed */
68319 +       if (!ni->attr_list.size) {
68320 +               err = ni_create_attr_list(ni);
68321 +               if (err)
68322 +                       goto out;
68323 +       }
68325 +       vbo = is_mft_data ? ((u64)svcn << sbi->cluster_bits) : 0;
68327 +       if (force_ext)
68328 +               goto insert_ext;
68330 +       /* Load all subrecords into memory. */
68331 +       err = ni_load_all_mi(ni);
68332 +       if (err)
68333 +               goto out;
68335 +       /* Check each of loaded subrecord */
68336 +       for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
68337 +               mi = rb_entry(node, struct mft_inode, node);
68339 +               if (is_mft_data &&
68340 +                   (mi_enum_attr(mi, NULL) ||
68341 +                    vbo <= ((u64)mi->rno << sbi->record_bits))) {
68342 +                       /* We can't accept this record 'case MFT's bootstrapping */
68343 +                       continue;
68344 +               }
68345 +               if (is_mft &&
68346 +                   mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, NULL)) {
68347 +                       /*
68348 +                        * This child record already has a ATTR_DATA.
68349 +                        * So it can't accept any other records.
68350 +                        */
68351 +                       continue;
68352 +               }
68354 +               if ((type != ATTR_NAME || name_len) &&
68355 +                   mi_find_attr(mi, NULL, type, name, name_len, NULL)) {
68356 +                       /* Only indexed attributes can share same record */
68357 +                       continue;
68358 +               }
68360 +               /* Try to insert attribute into this subrecord */
68361 +               attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
68362 +                                      name_off, svcn);
68363 +               if (!attr)
68364 +                       continue;
68366 +               if (ins_attr)
68367 +                       *ins_attr = attr;
68368 +               return 0;
68369 +       }
68371 +insert_ext:
68372 +       /* We have to allocate a new child subrecord*/
68373 +       err = ntfs_look_free_mft(sbi, &rno, is_mft_data, ni, &mi);
68374 +       if (err)
68375 +               goto out;
68377 +       if (is_mft_data && vbo <= ((u64)rno << sbi->record_bits)) {
68378 +               err = -EINVAL;
68379 +               goto out1;
68380 +       }
68382 +       attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
68383 +                              name_off, svcn);
68384 +       if (!attr)
68385 +               goto out2;
68387 +       if (ins_attr)
68388 +               *ins_attr = attr;
68389 +       if (ins_mi)
68390 +               *ins_mi = mi;
68392 +       return 0;
68394 +out2:
68395 +       ni_remove_mi(ni, mi);
68396 +       mi_put(mi);
68397 +       err = -EINVAL;
68399 +out1:
68400 +       ntfs_mark_rec_free(sbi, rno);
68402 +out:
68403 +       return err;
68407 + * ni_insert_attr
68408 + *
68409 + * inserts an attribute into the file.
68410 + *
68411 + * If the primary record has room, it will just insert the attribute.
68412 + * If not, it may make the attribute external.
68413 + * For $MFT::Data it may make room for the attribute by
68414 + * making other attributes external.
68415 + *
68416 + * NOTE:
68417 + * The ATTR_LIST and ATTR_STD cannot be made external.
68418 + * This function does not fill new attribute full
68419 + * It only fills 'size'/'type'/'id'/'name_len' fields
68420 + */
68421 +static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
68422 +                         const __le16 *name, u8 name_len, u32 asize,
68423 +                         u16 name_off, CLST svcn, struct ATTRIB **ins_attr,
68424 +                         struct mft_inode **ins_mi)
68426 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68427 +       int err;
68428 +       struct ATTRIB *attr, *eattr;
68429 +       struct MFT_REC *rec;
68430 +       bool is_mft;
68431 +       struct ATTR_LIST_ENTRY *le;
68432 +       u32 list_reserve, max_free, free, used, t32;
68433 +       __le16 id;
68434 +       u16 t16;
68436 +       is_mft = ni->mi.rno == MFT_REC_MFT;
68437 +       rec = ni->mi.mrec;
68439 +       list_reserve = SIZEOF_NONRESIDENT + 3 * (1 + 2 * sizeof(u32));
68440 +       used = le32_to_cpu(rec->used);
68441 +       free = sbi->record_size - used;
68443 +       if (is_mft && type != ATTR_LIST) {
68444 +               /* Reserve space for the ATTRIB List. */
68445 +               if (free < list_reserve)
68446 +                       free = 0;
68447 +               else
68448 +                       free -= list_reserve;
68449 +       }
68451 +       if (asize <= free) {
68452 +               attr = ni_ins_new_attr(ni, &ni->mi, NULL, type, name, name_len,
68453 +                                      asize, name_off, svcn);
68454 +               if (attr) {
68455 +                       if (ins_attr)
68456 +                               *ins_attr = attr;
68457 +                       if (ins_mi)
68458 +                               *ins_mi = &ni->mi;
68459 +                       err = 0;
68460 +                       goto out;
68461 +               }
68462 +       }
68464 +       if (!is_mft || type != ATTR_DATA || svcn) {
68465 +               /* This ATTRIB will be external. */
68466 +               err = ni_ins_attr_ext(ni, NULL, type, name, name_len, asize,
68467 +                                     svcn, name_off, false, ins_attr, ins_mi);
68468 +               goto out;
68469 +       }
68471 +       /*
68472 +        * Here we have: "is_mft && type == ATTR_DATA && !svcn
68473 +        *
68474 +        * The first chunk of the $MFT::Data ATTRIB must be the base record.
68475 +        * Evict as many other attributes as possible.
68476 +        */
68477 +       max_free = free;
68479 +       /* Estimate the result of moving all possible attributes away.*/
68480 +       attr = NULL;
68482 +       while ((attr = mi_enum_attr(&ni->mi, attr))) {
68483 +               if (attr->type == ATTR_STD)
68484 +                       continue;
68485 +               if (attr->type == ATTR_LIST)
68486 +                       continue;
68487 +               max_free += le32_to_cpu(attr->size);
68488 +       }
68490 +       if (max_free < asize + list_reserve) {
68491 +               /* Impossible to insert this attribute into primary record */
68492 +               err = -EINVAL;
68493 +               goto out;
68494 +       }
68496 +       /* Start real attribute moving */
68497 +       attr = NULL;
68499 +       for (;;) {
68500 +               attr = mi_enum_attr(&ni->mi, attr);
68501 +               if (!attr) {
68502 +                       /* We should never be here 'cause we have already check this case */
68503 +                       err = -EINVAL;
68504 +                       goto out;
68505 +               }
68507 +               /* Skip attributes that MUST be primary record */
68508 +               if (attr->type == ATTR_STD || attr->type == ATTR_LIST)
68509 +                       continue;
68511 +               le = NULL;
68512 +               if (ni->attr_list.size) {
68513 +                       le = al_find_le(ni, NULL, attr);
68514 +                       if (!le) {
68515 +                               /* Really this is a serious bug */
68516 +                               err = -EINVAL;
68517 +                               goto out;
68518 +                       }
68519 +               }
68521 +               t32 = le32_to_cpu(attr->size);
68522 +               t16 = le16_to_cpu(attr->name_off);
68523 +               err = ni_ins_attr_ext(ni, le, attr->type, Add2Ptr(attr, t16),
68524 +                                     attr->name_len, t32, attr_svcn(attr), t16,
68525 +                                     false, &eattr, NULL);
68526 +               if (err)
68527 +                       return err;
68529 +               id = eattr->id;
68530 +               memcpy(eattr, attr, t32);
68531 +               eattr->id = id;
68533 +               /* remove attrib from primary record */
68534 +               mi_remove_attr(&ni->mi, attr);
68536 +               /* attr now points to next attribute */
68537 +               if (attr->type == ATTR_END)
68538 +                       goto out;
68539 +       }
68540 +       while (asize + list_reserve > sbi->record_size - le32_to_cpu(rec->used))
68541 +               ;
68543 +       attr = ni_ins_new_attr(ni, &ni->mi, NULL, type, name, name_len, asize,
68544 +                              name_off, svcn);
68545 +       if (!attr) {
68546 +               err = -EINVAL;
68547 +               goto out;
68548 +       }
68550 +       if (ins_attr)
68551 +               *ins_attr = attr;
68552 +       if (ins_mi)
68553 +               *ins_mi = &ni->mi;
68555 +out:
68556 +       return err;
68560 + * ni_expand_mft_list
68561 + *
68562 + * This method splits ATTR_DATA of $MFT
68563 + */
68564 +static int ni_expand_mft_list(struct ntfs_inode *ni)
68566 +       int err = 0;
68567 +       struct runs_tree *run = &ni->file.run;
68568 +       u32 asize, run_size, done = 0;
68569 +       struct ATTRIB *attr;
68570 +       struct rb_node *node;
68571 +       CLST mft_min, mft_new, svcn, evcn, plen;
68572 +       struct mft_inode *mi, *mi_min, *mi_new;
68573 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68575 +       /* Find the nearest Mft */
68576 +       mft_min = 0;
68577 +       mft_new = 0;
68578 +       mi_min = NULL;
68580 +       for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
68581 +               mi = rb_entry(node, struct mft_inode, node);
68583 +               attr = mi_enum_attr(mi, NULL);
68585 +               if (!attr) {
68586 +                       mft_min = mi->rno;
68587 +                       mi_min = mi;
68588 +                       break;
68589 +               }
68590 +       }
68592 +       if (ntfs_look_free_mft(sbi, &mft_new, true, ni, &mi_new)) {
68593 +               mft_new = 0;
68594 +               // really this is not critical
68595 +       } else if (mft_min > mft_new) {
68596 +               mft_min = mft_new;
68597 +               mi_min = mi_new;
68598 +       } else {
68599 +               ntfs_mark_rec_free(sbi, mft_new);
68600 +               mft_new = 0;
68601 +               ni_remove_mi(ni, mi_new);
68602 +       }
68604 +       attr = mi_find_attr(&ni->mi, NULL, ATTR_DATA, NULL, 0, NULL);
68605 +       if (!attr) {
68606 +               err = -EINVAL;
68607 +               goto out;
68608 +       }
68610 +       asize = le32_to_cpu(attr->size);
68612 +       evcn = le64_to_cpu(attr->nres.evcn);
68613 +       svcn = bytes_to_cluster(sbi, (u64)(mft_min + 1) << sbi->record_bits);
68614 +       if (evcn + 1 >= svcn) {
68615 +               err = -EINVAL;
68616 +               goto out;
68617 +       }
68619 +       /*
68620 +        * split primary attribute [0 evcn] in two parts [0 svcn) + [svcn evcn]
68621 +        *
68622 +        * Update first part of ATTR_DATA in 'primary MFT
68623 +        */
68624 +       err = run_pack(run, 0, svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT),
68625 +                      asize - SIZEOF_NONRESIDENT, &plen);
68626 +       if (err < 0)
68627 +               goto out;
68629 +       run_size = QuadAlign(err);
68630 +       err = 0;
68632 +       if (plen < svcn) {
68633 +               err = -EINVAL;
68634 +               goto out;
68635 +       }
68637 +       attr->nres.evcn = cpu_to_le64(svcn - 1);
68638 +       attr->size = cpu_to_le32(run_size + SIZEOF_NONRESIDENT);
68639 +       /* 'done' - how many bytes of primary MFT becomes free */
68640 +       done = asize - run_size - SIZEOF_NONRESIDENT;
68641 +       le32_sub_cpu(&ni->mi.mrec->used, done);
68643 +       /* Estimate the size of second part: run_buf=NULL */
68644 +       err = run_pack(run, svcn, evcn + 1 - svcn, NULL, sbi->record_size,
68645 +                      &plen);
68646 +       if (err < 0)
68647 +               goto out;
68649 +       run_size = QuadAlign(err);
68650 +       err = 0;
68652 +       if (plen < evcn + 1 - svcn) {
68653 +               err = -EINVAL;
68654 +               goto out;
68655 +       }
68657 +       /*
68658 +        * This function may implicitly call expand attr_list
68659 +        * Insert second part of ATTR_DATA in 'mi_min'
68660 +        */
68661 +       attr = ni_ins_new_attr(ni, mi_min, NULL, ATTR_DATA, NULL, 0,
68662 +                              SIZEOF_NONRESIDENT + run_size,
68663 +                              SIZEOF_NONRESIDENT, svcn);
68664 +       if (!attr) {
68665 +               err = -EINVAL;
68666 +               goto out;
68667 +       }
68669 +       attr->non_res = 1;
68670 +       attr->name_off = SIZEOF_NONRESIDENT_LE;
68671 +       attr->flags = 0;
68673 +       run_pack(run, svcn, evcn + 1 - svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT),
68674 +                run_size, &plen);
68676 +       attr->nres.svcn = cpu_to_le64(svcn);
68677 +       attr->nres.evcn = cpu_to_le64(evcn);
68678 +       attr->nres.run_off = cpu_to_le16(SIZEOF_NONRESIDENT);
68680 +out:
68681 +       if (mft_new) {
68682 +               ntfs_mark_rec_free(sbi, mft_new);
68683 +               ni_remove_mi(ni, mi_new);
68684 +       }
68686 +       return !err && !done ? -EOPNOTSUPP : err;
68690 + * ni_expand_list
68691 + *
68692 + * This method moves all possible attributes out of primary record
68693 + */
68694 +int ni_expand_list(struct ntfs_inode *ni)
68696 +       int err = 0;
68697 +       u32 asize, done = 0;
68698 +       struct ATTRIB *attr, *ins_attr;
68699 +       struct ATTR_LIST_ENTRY *le;
68700 +       bool is_mft = ni->mi.rno == MFT_REC_MFT;
68701 +       struct MFT_REF ref;
68703 +       mi_get_ref(&ni->mi, &ref);
68704 +       le = NULL;
68706 +       while ((le = al_enumerate(ni, le))) {
68707 +               if (le->type == ATTR_STD)
68708 +                       continue;
68710 +               if (memcmp(&ref, &le->ref, sizeof(struct MFT_REF)))
68711 +                       continue;
68713 +               if (is_mft && le->type == ATTR_DATA)
68714 +                       continue;
68716 +               /* Find attribute in primary record */
68717 +               attr = rec_find_attr_le(&ni->mi, le);
68718 +               if (!attr) {
68719 +                       err = -EINVAL;
68720 +                       goto out;
68721 +               }
68723 +               asize = le32_to_cpu(attr->size);
68725 +               /* Always insert into new record to avoid collisions (deep recursive) */
68726 +               err = ni_ins_attr_ext(ni, le, attr->type, attr_name(attr),
68727 +                                     attr->name_len, asize, attr_svcn(attr),
68728 +                                     le16_to_cpu(attr->name_off), true,
68729 +                                     &ins_attr, NULL);
68731 +               if (err)
68732 +                       goto out;
68734 +               memcpy(ins_attr, attr, asize);
68735 +               ins_attr->id = le->id;
68736 +               mi_remove_attr(&ni->mi, attr);
68738 +               done += asize;
68739 +               goto out;
68740 +       }
68742 +       if (!is_mft) {
68743 +               err = -EFBIG; /* attr list is too big(?) */
68744 +               goto out;
68745 +       }
68747 +       /* split mft data as much as possible */
68748 +       err = ni_expand_mft_list(ni);
68749 +       if (err)
68750 +               goto out;
68752 +out:
68753 +       return !err && !done ? -EOPNOTSUPP : err;
68757 + * ni_insert_nonresident
68758 + *
68759 + * inserts new nonresident attribute
68760 + */
68761 +int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
68762 +                         const __le16 *name, u8 name_len,
68763 +                         const struct runs_tree *run, CLST svcn, CLST len,
68764 +                         __le16 flags, struct ATTRIB **new_attr,
68765 +                         struct mft_inode **mi)
68767 +       int err;
68768 +       CLST plen;
68769 +       struct ATTRIB *attr;
68770 +       bool is_ext =
68771 +               (flags & (ATTR_FLAG_SPARSED | ATTR_FLAG_COMPRESSED)) && !svcn;
68772 +       u32 name_size = QuadAlign(name_len * sizeof(short));
68773 +       u32 name_off = is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT;
68774 +       u32 run_off = name_off + name_size;
68775 +       u32 run_size, asize;
68776 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68778 +       err = run_pack(run, svcn, len, NULL, sbi->max_bytes_per_attr - run_off,
68779 +                      &plen);
68780 +       if (err < 0)
68781 +               goto out;
68783 +       run_size = QuadAlign(err);
68785 +       if (plen < len) {
68786 +               err = -EINVAL;
68787 +               goto out;
68788 +       }
68790 +       asize = run_off + run_size;
68792 +       if (asize > sbi->max_bytes_per_attr) {
68793 +               err = -EINVAL;
68794 +               goto out;
68795 +       }
68797 +       err = ni_insert_attr(ni, type, name, name_len, asize, name_off, svcn,
68798 +                            &attr, mi);
68800 +       if (err)
68801 +               goto out;
68803 +       attr->non_res = 1;
68804 +       attr->name_off = cpu_to_le16(name_off);
68805 +       attr->flags = flags;
68807 +       run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size, &plen);
68809 +       attr->nres.svcn = cpu_to_le64(svcn);
68810 +       attr->nres.evcn = cpu_to_le64((u64)svcn + len - 1);
68812 +       err = 0;
68813 +       if (new_attr)
68814 +               *new_attr = attr;
68816 +       *(__le64 *)&attr->nres.run_off = cpu_to_le64(run_off);
68818 +       attr->nres.alloc_size =
68819 +               svcn ? 0 : cpu_to_le64((u64)len << ni->mi.sbi->cluster_bits);
68820 +       attr->nres.data_size = attr->nres.alloc_size;
68821 +       attr->nres.valid_size = attr->nres.alloc_size;
68823 +       if (is_ext) {
68824 +               if (flags & ATTR_FLAG_COMPRESSED)
68825 +                       attr->nres.c_unit = COMPRESSION_UNIT;
68826 +               attr->nres.total_size = attr->nres.alloc_size;
68827 +       }
68829 +out:
68830 +       return err;
68834 + * ni_insert_resident
68835 + *
68836 + * inserts new resident attribute
68837 + */
68838 +int ni_insert_resident(struct ntfs_inode *ni, u32 data_size,
68839 +                      enum ATTR_TYPE type, const __le16 *name, u8 name_len,
68840 +                      struct ATTRIB **new_attr, struct mft_inode **mi)
68842 +       int err;
68843 +       u32 name_size = QuadAlign(name_len * sizeof(short));
68844 +       u32 asize = SIZEOF_RESIDENT + name_size + QuadAlign(data_size);
68845 +       struct ATTRIB *attr;
68847 +       err = ni_insert_attr(ni, type, name, name_len, asize, SIZEOF_RESIDENT,
68848 +                            0, &attr, mi);
68849 +       if (err)
68850 +               return err;
68852 +       attr->non_res = 0;
68853 +       attr->flags = 0;
68855 +       attr->res.data_size = cpu_to_le32(data_size);
68856 +       attr->res.data_off = cpu_to_le16(SIZEOF_RESIDENT + name_size);
68857 +       if (type == ATTR_NAME)
68858 +               attr->res.flags = RESIDENT_FLAG_INDEXED;
68859 +       attr->res.res = 0;
68861 +       if (new_attr)
68862 +               *new_attr = attr;
68864 +       return 0;
68868 + * ni_remove_attr_le
68869 + *
68870 + * removes attribute from record
68871 + */
68872 +int ni_remove_attr_le(struct ntfs_inode *ni, struct ATTRIB *attr,
68873 +                     struct ATTR_LIST_ENTRY *le)
68875 +       int err;
68876 +       struct mft_inode *mi;
68878 +       err = ni_load_mi(ni, le, &mi);
68879 +       if (err)
68880 +               return err;
68882 +       mi_remove_attr(mi, attr);
68884 +       if (le)
68885 +               al_remove_le(ni, le);
68887 +       return 0;
68891 + * ni_delete_all
68892 + *
68893 + * removes all attributes and frees allocates space
68894 + * ntfs_evict_inode->ntfs_clear_inode->ni_delete_all (if no links)
68895 + */
68896 +int ni_delete_all(struct ntfs_inode *ni)
68898 +       int err;
68899 +       struct ATTR_LIST_ENTRY *le = NULL;
68900 +       struct ATTRIB *attr = NULL;
68901 +       struct rb_node *node;
68902 +       u16 roff;
68903 +       u32 asize;
68904 +       CLST svcn, evcn;
68905 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68906 +       bool nt3 = is_ntfs3(sbi);
68907 +       struct MFT_REF ref;
68909 +       while ((attr = ni_enum_attr_ex(ni, attr, &le, NULL))) {
68910 +               if (!nt3 || attr->name_len) {
68911 +                       ;
68912 +               } else if (attr->type == ATTR_REPARSE) {
68913 +                       mi_get_ref(&ni->mi, &ref);
68914 +                       ntfs_remove_reparse(sbi, 0, &ref);
68915 +               } else if (attr->type == ATTR_ID && !attr->non_res &&
68916 +                          le32_to_cpu(attr->res.data_size) >=
68917 +                                  sizeof(struct GUID)) {
68918 +                       ntfs_objid_remove(sbi, resident_data(attr));
68919 +               }
68921 +               if (!attr->non_res)
68922 +                       continue;
68924 +               svcn = le64_to_cpu(attr->nres.svcn);
68925 +               evcn = le64_to_cpu(attr->nres.evcn);
68927 +               if (evcn + 1 <= svcn)
68928 +                       continue;
68930 +               asize = le32_to_cpu(attr->size);
68931 +               roff = le16_to_cpu(attr->nres.run_off);
68933 +               /*run==1 means unpack and deallocate*/
68934 +               run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
68935 +                             Add2Ptr(attr, roff), asize - roff);
68936 +       }
68938 +       if (ni->attr_list.size) {
68939 +               run_deallocate(ni->mi.sbi, &ni->attr_list.run, true);
68940 +               al_destroy(ni);
68941 +       }
68943 +       /* Free all subrecords */
68944 +       for (node = rb_first(&ni->mi_tree); node;) {
68945 +               struct rb_node *next = rb_next(node);
68946 +               struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
68948 +               clear_rec_inuse(mi->mrec);
68949 +               mi->dirty = true;
68950 +               mi_write(mi, 0);
68952 +               ntfs_mark_rec_free(sbi, mi->rno);
68953 +               ni_remove_mi(ni, mi);
68954 +               mi_put(mi);
68955 +               node = next;
68956 +       }
68958 +       // Free base record
68959 +       clear_rec_inuse(ni->mi.mrec);
68960 +       ni->mi.dirty = true;
68961 +       err = mi_write(&ni->mi, 0);
68963 +       ntfs_mark_rec_free(sbi, ni->mi.rno);
68965 +       return err;
68969 + * ni_fname_name
68970 + *
68971 + * returns file name attribute by its value
68972 + */
68973 +struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
68974 +                                    const struct cpu_str *uni,
68975 +                                    const struct MFT_REF *home_dir,
68976 +                                    struct ATTR_LIST_ENTRY **le)
68978 +       struct ATTRIB *attr = NULL;
68979 +       struct ATTR_FILE_NAME *fname;
68981 +       *le = NULL;
68983 +       /* Enumerate all names */
68984 +next:
68985 +       attr = ni_find_attr(ni, attr, le, ATTR_NAME, NULL, 0, NULL, NULL);
68986 +       if (!attr)
68987 +               return NULL;
68989 +       fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
68990 +       if (!fname)
68991 +               goto next;
68993 +       if (home_dir && memcmp(home_dir, &fname->home, sizeof(*home_dir)))
68994 +               goto next;
68996 +       if (!uni)
68997 +               goto next;
68999 +       if (uni->len != fname->name_len)
69000 +               goto next;
69002 +       if (ntfs_cmp_names_cpu(uni, (struct le_str *)&fname->name_len, NULL,
69003 +                              false))
69004 +               goto next;
69006 +       return fname;
69010 + * ni_fname_type
69011 + *
69012 + * returns file name attribute with given type
69013 + */
69014 +struct ATTR_FILE_NAME *ni_fname_type(struct ntfs_inode *ni, u8 name_type,
69015 +                                    struct ATTR_LIST_ENTRY **le)
69017 +       struct ATTRIB *attr = NULL;
69018 +       struct ATTR_FILE_NAME *fname;
69020 +       *le = NULL;
69022 +       /* Enumerate all names */
69023 +       for (;;) {
69024 +               attr = ni_find_attr(ni, attr, le, ATTR_NAME, NULL, 0, NULL,
69025 +                                   NULL);
69026 +               if (!attr)
69027 +                       return NULL;
69029 +               fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
69030 +               if (fname && name_type == fname->type)
69031 +                       return fname;
69032 +       }
69036 + * Process compressed/sparsed in special way
69037 + * NOTE: you need to set ni->std_fa = new_fa
69038 + * after this function to keep internal structures in consistency
69039 + */
69040 +int ni_new_attr_flags(struct ntfs_inode *ni, enum FILE_ATTRIBUTE new_fa)
69042 +       struct ATTRIB *attr;
69043 +       struct mft_inode *mi;
69044 +       __le16 new_aflags;
69045 +       u32 new_asize;
69047 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
69048 +       if (!attr)
69049 +               return -EINVAL;
69051 +       new_aflags = attr->flags;
69053 +       if (new_fa & FILE_ATTRIBUTE_SPARSE_FILE)
69054 +               new_aflags |= ATTR_FLAG_SPARSED;
69055 +       else
69056 +               new_aflags &= ~ATTR_FLAG_SPARSED;
69058 +       if (new_fa & FILE_ATTRIBUTE_COMPRESSED)
69059 +               new_aflags |= ATTR_FLAG_COMPRESSED;
69060 +       else
69061 +               new_aflags &= ~ATTR_FLAG_COMPRESSED;
69063 +       if (new_aflags == attr->flags)
69064 +               return 0;
69066 +       if ((new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) ==
69067 +           (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) {
69068 +               ntfs_inode_warn(&ni->vfs_inode,
69069 +                               "file can't be sparsed and compressed");
69070 +               return -EOPNOTSUPP;
69071 +       }
69073 +       if (!attr->non_res)
69074 +               goto out;
69076 +       if (attr->nres.data_size) {
69077 +               ntfs_inode_warn(
69078 +                       &ni->vfs_inode,
69079 +                       "one can change sparsed/compressed only for empty files");
69080 +               return -EOPNOTSUPP;
69081 +       }
69083 +       /* resize nonresident empty attribute in-place only*/
69084 +       new_asize = (new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED))
69085 +                           ? (SIZEOF_NONRESIDENT_EX + 8)
69086 +                           : (SIZEOF_NONRESIDENT + 8);
69088 +       if (!mi_resize_attr(mi, attr, new_asize - le32_to_cpu(attr->size)))
69089 +               return -EOPNOTSUPP;
69091 +       if (new_aflags & ATTR_FLAG_SPARSED) {
69092 +               attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
69093 +               /* windows uses 16 clusters per frame but supports one cluster per frame too*/
69094 +               attr->nres.c_unit = 0;
69095 +               ni->vfs_inode.i_mapping->a_ops = &ntfs_aops;
69096 +       } else if (new_aflags & ATTR_FLAG_COMPRESSED) {
69097 +               attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
69098 +               /* the only allowed: 16 clusters per frame */
69099 +               attr->nres.c_unit = NTFS_LZNT_CUNIT;
69100 +               ni->vfs_inode.i_mapping->a_ops = &ntfs_aops_cmpr;
69101 +       } else {
69102 +               attr->name_off = SIZEOF_NONRESIDENT_LE;
69103 +               /* normal files */
69104 +               attr->nres.c_unit = 0;
69105 +               ni->vfs_inode.i_mapping->a_ops = &ntfs_aops;
69106 +       }
69107 +       attr->nres.run_off = attr->name_off;
69108 +out:
69109 +       attr->flags = new_aflags;
69110 +       mi->dirty = true;
69112 +       return 0;
69116 + * ni_parse_reparse
69117 + *
69118 + * buffer is at least 24 bytes
69119 + */
69120 +enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
69121 +                                  void *buffer)
69123 +       const struct REPARSE_DATA_BUFFER *rp = NULL;
69124 +       u8 bits;
69125 +       u16 len;
69126 +       typeof(rp->CompressReparseBuffer) *cmpr;
69128 +       static_assert(sizeof(struct REPARSE_DATA_BUFFER) <= 24);
69130 +       /* Try to estimate reparse point */
69131 +       if (!attr->non_res) {
69132 +               rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
69133 +       } else if (le64_to_cpu(attr->nres.data_size) >=
69134 +                  sizeof(struct REPARSE_DATA_BUFFER)) {
69135 +               struct runs_tree run;
69137 +               run_init(&run);
69139 +               if (!attr_load_runs_vcn(ni, ATTR_REPARSE, NULL, 0, &run, 0) &&
69140 +                   !ntfs_read_run_nb(ni->mi.sbi, &run, 0, buffer,
69141 +                                     sizeof(struct REPARSE_DATA_BUFFER),
69142 +                                     NULL)) {
69143 +                       rp = buffer;
69144 +               }
69146 +               run_close(&run);
69147 +       }
69149 +       if (!rp)
69150 +               return REPARSE_NONE;
69152 +       len = le16_to_cpu(rp->ReparseDataLength);
69153 +       switch (rp->ReparseTag) {
69154 +       case (IO_REPARSE_TAG_MICROSOFT | IO_REPARSE_TAG_SYMBOLIC_LINK):
69155 +               break; /* Symbolic link */
69156 +       case IO_REPARSE_TAG_MOUNT_POINT:
69157 +               break; /* Mount points and junctions */
69158 +       case IO_REPARSE_TAG_SYMLINK:
69159 +               break;
69160 +       case IO_REPARSE_TAG_COMPRESS:
69161 +               /*
69162 +                * WOF - Windows Overlay Filter - used to compress files with lzx/xpress
69163 +                * Unlike native NTFS file compression, the Windows Overlay Filter supports
69164 +                * only read operations. This means that it doesn’t need to sector-align each
69165 +                * compressed chunk, so the compressed data can be packed more tightly together.
69166 +                * If you open the file for writing, the Windows Overlay Filter just decompresses
69167 +                * the entire file, turning it back into a plain file.
69168 +                *
69169 +                * ntfs3 driver decompresses the entire file only on write or change size requests
69170 +                */
69172 +               cmpr = &rp->CompressReparseBuffer;
69173 +               if (len < sizeof(*cmpr) ||
69174 +                   cmpr->WofVersion != WOF_CURRENT_VERSION ||
69175 +                   cmpr->WofProvider != WOF_PROVIDER_SYSTEM ||
69176 +                   cmpr->ProviderVer != WOF_PROVIDER_CURRENT_VERSION) {
69177 +                       return REPARSE_NONE;
69178 +               }
69180 +               switch (cmpr->CompressionFormat) {
69181 +               case WOF_COMPRESSION_XPRESS4K:
69182 +                       bits = 0xc; // 4k
69183 +                       break;
69184 +               case WOF_COMPRESSION_XPRESS8K:
69185 +                       bits = 0xd; // 8k
69186 +                       break;
69187 +               case WOF_COMPRESSION_XPRESS16K:
69188 +                       bits = 0xe; // 16k
69189 +                       break;
69190 +               case WOF_COMPRESSION_LZX32K:
69191 +                       bits = 0xf; // 32k
69192 +                       break;
69193 +               default:
69194 +                       bits = 0x10; // 64k
69195 +                       break;
69196 +               }
69197 +               ni_set_ext_compress_bits(ni, bits);
69198 +               return REPARSE_COMPRESSED;
69200 +       case IO_REPARSE_TAG_DEDUP:
69201 +               ni->ni_flags |= NI_FLAG_DEDUPLICATED;
69202 +               return REPARSE_DEDUPLICATED;
69204 +       default:
69205 +               if (rp->ReparseTag & IO_REPARSE_TAG_NAME_SURROGATE)
69206 +                       break;
69208 +               return REPARSE_NONE;
69209 +       }
69211 +       /* Looks like normal symlink */
69212 +       return REPARSE_LINK;
69216 + * helper for file_fiemap
69217 + * assumed ni_lock
69218 + * TODO: less aggressive locks
69219 + */
69220 +int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
69221 +             __u64 vbo, __u64 len)
69223 +       int err = 0;
69224 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
69225 +       u8 cluster_bits = sbi->cluster_bits;
69226 +       struct runs_tree *run;
69227 +       struct rw_semaphore *run_lock;
69228 +       struct ATTRIB *attr;
69229 +       CLST vcn = vbo >> cluster_bits;
69230 +       CLST lcn, clen;
69231 +       u64 valid = ni->i_valid;
69232 +       u64 lbo, bytes;
69233 +       u64 end, alloc_size;
69234 +       size_t idx = -1;
69235 +       u32 flags;
69236 +       bool ok;
69238 +       if (S_ISDIR(ni->vfs_inode.i_mode)) {
69239 +               run = &ni->dir.alloc_run;
69240 +               attr = ni_find_attr(ni, NULL, NULL, ATTR_ALLOC, I30_NAME,
69241 +                                   ARRAY_SIZE(I30_NAME), NULL, NULL);
69242 +               run_lock = &ni->dir.run_lock;
69243 +       } else {
69244 +               run = &ni->file.run;
69245 +               attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
69246 +                                   NULL);
69247 +               if (!attr) {
69248 +                       err = -EINVAL;
69249 +                       goto out;
69250 +               }
69251 +               if (is_attr_compressed(attr)) {
69252 +                       /*unfortunately cp -r incorrectly treats compressed clusters*/
69253 +                       err = -EOPNOTSUPP;
69254 +                       ntfs_inode_warn(
69255 +                               &ni->vfs_inode,
69256 +                               "fiemap is not supported for compressed file (cp -r)");
69257 +                       goto out;
69258 +               }
69259 +               run_lock = &ni->file.run_lock;
69260 +       }
69262 +       if (!attr || !attr->non_res) {
69263 +               err = fiemap_fill_next_extent(
69264 +                       fieinfo, 0, 0,
69265 +                       attr ? le32_to_cpu(attr->res.data_size) : 0,
69266 +                       FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_LAST |
69267 +                               FIEMAP_EXTENT_MERGED);
69268 +               goto out;
69269 +       }
69271 +       end = vbo + len;
69272 +       alloc_size = le64_to_cpu(attr->nres.alloc_size);
69273 +       if (end > alloc_size)
69274 +               end = alloc_size;
69276 +       down_read(run_lock);
69278 +       while (vbo < end) {
69279 +               if (idx == -1) {
69280 +                       ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
69281 +               } else {
69282 +                       CLST vcn_next = vcn;
69284 +                       ok = run_get_entry(run, ++idx, &vcn, &lcn, &clen) &&
69285 +                            vcn == vcn_next;
69286 +                       if (!ok)
69287 +                               vcn = vcn_next;
69288 +               }
69290 +               if (!ok) {
69291 +                       up_read(run_lock);
69292 +                       down_write(run_lock);
69294 +                       err = attr_load_runs_vcn(ni, attr->type,
69295 +                                                attr_name(attr),
69296 +                                                attr->name_len, run, vcn);
69298 +                       up_write(run_lock);
69299 +                       down_read(run_lock);
69301 +                       if (err)
69302 +                               break;
69304 +                       ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
69306 +                       if (!ok) {
69307 +                               err = -EINVAL;
69308 +                               break;
69309 +                       }
69310 +               }
69312 +               if (!clen) {
69313 +                       err = -EINVAL; // ?
69314 +                       break;
69315 +               }
69317 +               if (lcn == SPARSE_LCN) {
69318 +                       vcn += clen;
69319 +                       vbo = (u64)vcn << cluster_bits;
69320 +                       continue;
69321 +               }
69323 +               flags = FIEMAP_EXTENT_MERGED;
69324 +               if (S_ISDIR(ni->vfs_inode.i_mode)) {
69325 +                       ;
69326 +               } else if (is_attr_compressed(attr)) {
69327 +                       CLST clst_data;
69329 +                       err = attr_is_frame_compressed(
69330 +                               ni, attr, vcn >> attr->nres.c_unit, &clst_data);
69331 +                       if (err)
69332 +                               break;
69333 +                       if (clst_data < NTFS_LZNT_CLUSTERS)
69334 +                               flags |= FIEMAP_EXTENT_ENCODED;
69335 +               } else if (is_attr_encrypted(attr)) {
69336 +                       flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
69337 +               }
69339 +               vbo = (u64)vcn << cluster_bits;
69340 +               bytes = (u64)clen << cluster_bits;
69341 +               lbo = (u64)lcn << cluster_bits;
69343 +               vcn += clen;
69345 +               if (vbo + bytes >= end) {
69346 +                       bytes = end - vbo;
69347 +                       flags |= FIEMAP_EXTENT_LAST;
69348 +               }
69350 +               if (vbo + bytes <= valid) {
69351 +                       ;
69352 +               } else if (vbo >= valid) {
69353 +                       flags |= FIEMAP_EXTENT_UNWRITTEN;
69354 +               } else {
69355 +                       /* vbo < valid && valid < vbo + bytes */
69356 +                       u64 dlen = valid - vbo;
69358 +                       err = fiemap_fill_next_extent(fieinfo, vbo, lbo, dlen,
69359 +                                                     flags);
69360 +                       if (err < 0)
69361 +                               break;
69362 +                       if (err == 1) {
69363 +                               err = 0;
69364 +                               break;
69365 +                       }
69367 +                       vbo = valid;
69368 +                       bytes -= dlen;
69369 +                       if (!bytes)
69370 +                               continue;
69372 +                       lbo += dlen;
69373 +                       flags |= FIEMAP_EXTENT_UNWRITTEN;
69374 +               }
69376 +               err = fiemap_fill_next_extent(fieinfo, vbo, lbo, bytes, flags);
69377 +               if (err < 0)
69378 +                       break;
69379 +               if (err == 1) {
69380 +                       err = 0;
69381 +                       break;
69382 +               }
69384 +               vbo += bytes;
69385 +       }
69387 +       up_read(run_lock);
69389 +out:
69390 +       return err;
69394 + * When decompressing, we typically obtain more than one page per reference.
69395 + * We inject the additional pages into the page cache.
69396 + */
69397 +int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page)
69399 +       int err;
69400 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
69401 +       struct address_space *mapping = page->mapping;
69402 +       pgoff_t index = page->index;
69403 +       u64 frame_vbo, vbo = (u64)index << PAGE_SHIFT;
69404 +       struct page **pages = NULL; /*array of at most 16 pages. stack?*/
69405 +       u8 frame_bits;
69406 +       CLST frame;
69407 +       u32 i, idx, frame_size, pages_per_frame;
69408 +       gfp_t gfp_mask;
69409 +       struct page *pg;
69411 +       if (vbo >= ni->vfs_inode.i_size) {
69412 +               SetPageUptodate(page);
69413 +               err = 0;
69414 +               goto out;
69415 +       }
69417 +       if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
69418 +               /* xpress or lzx */
69419 +               frame_bits = ni_ext_compress_bits(ni);
69420 +       } else {
69421 +               /* lznt compression*/
69422 +               frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
69423 +       }
69424 +       frame_size = 1u << frame_bits;
69425 +       frame = vbo >> frame_bits;
69426 +       frame_vbo = (u64)frame << frame_bits;
69427 +       idx = (vbo - frame_vbo) >> PAGE_SHIFT;
69429 +       pages_per_frame = frame_size >> PAGE_SHIFT;
69430 +       pages = ntfs_zalloc(pages_per_frame * sizeof(struct page *));
69431 +       if (!pages) {
69432 +               err = -ENOMEM;
69433 +               goto out;
69434 +       }
69436 +       pages[idx] = page;
69437 +       index = frame_vbo >> PAGE_SHIFT;
69438 +       gfp_mask = mapping_gfp_mask(mapping);
69440 +       for (i = 0; i < pages_per_frame; i++, index++) {
69441 +               if (i == idx)
69442 +                       continue;
69444 +               pg = find_or_create_page(mapping, index, gfp_mask);
69445 +               if (!pg) {
69446 +                       err = -ENOMEM;
69447 +                       goto out1;
69448 +               }
69449 +               pages[i] = pg;
69450 +       }
69452 +       err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame);
69454 +out1:
69455 +       if (err)
69456 +               SetPageError(page);
69458 +       for (i = 0; i < pages_per_frame; i++) {
69459 +               pg = pages[i];
69460 +               if (i == idx)
69461 +                       continue;
69462 +               unlock_page(pg);
69463 +               put_page(pg);
69464 +       }
69466 +out:
69467 +       /* At this point, err contains 0 or -EIO depending on the "critical" page */
69468 +       ntfs_free(pages);
69469 +       unlock_page(page);
69471 +       return err;
69474 +#ifdef CONFIG_NTFS3_LZX_XPRESS
69476 + * decompress lzx/xpress compressed file
69477 + * remove ATTR_DATA::WofCompressedData
69478 + * remove ATTR_REPARSE
69479 + */
69480 +int ni_decompress_file(struct ntfs_inode *ni)
69482 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
69483 +       struct inode *inode = &ni->vfs_inode;
69484 +       loff_t i_size = inode->i_size;
69485 +       struct address_space *mapping = inode->i_mapping;
69486 +       gfp_t gfp_mask = mapping_gfp_mask(mapping);
69487 +       struct page **pages = NULL;
69488 +       struct ATTR_LIST_ENTRY *le;
69489 +       struct ATTRIB *attr;
69490 +       CLST vcn, cend, lcn, clen, end;
69491 +       pgoff_t index;
69492 +       u64 vbo;
69493 +       u8 frame_bits;
69494 +       u32 i, frame_size, pages_per_frame, bytes;
69495 +       struct mft_inode *mi;
69496 +       int err;
69498 +       /* clusters for decompressed data*/
69499 +       cend = bytes_to_cluster(sbi, i_size);
69501 +       if (!i_size)
69502 +               goto remove_wof;
69504 +       /* check in advance */
69505 +       if (cend > wnd_zeroes(&sbi->used.bitmap)) {
69506 +               err = -ENOSPC;
69507 +               goto out;
69508 +       }
69510 +       frame_bits = ni_ext_compress_bits(ni);
69511 +       frame_size = 1u << frame_bits;
69512 +       pages_per_frame = frame_size >> PAGE_SHIFT;
69513 +       pages = ntfs_zalloc(pages_per_frame * sizeof(struct page *));
69514 +       if (!pages) {
69515 +               err = -ENOMEM;
69516 +               goto out;
69517 +       }
69519 +       /*
69520 +        * Step 1: decompress data and copy to new allocated clusters
69521 +        */
69522 +       index = 0;
69523 +       for (vbo = 0; vbo < i_size; vbo += bytes) {
69524 +               u32 nr_pages;
69525 +               bool new;
69527 +               if (vbo + frame_size > i_size) {
69528 +                       bytes = i_size - vbo;
69529 +                       nr_pages = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
69530 +               } else {
69531 +                       nr_pages = pages_per_frame;
69532 +                       bytes = frame_size;
69533 +               }
69535 +               end = bytes_to_cluster(sbi, vbo + bytes);
69537 +               for (vcn = vbo >> sbi->cluster_bits; vcn < end; vcn += clen) {
69538 +                       err = attr_data_get_block(ni, vcn, cend - vcn, &lcn,
69539 +                                                 &clen, &new);
69540 +                       if (err)
69541 +                               goto out;
69542 +               }
69544 +               for (i = 0; i < pages_per_frame; i++, index++) {
69545 +                       struct page *pg;
69547 +                       pg = find_or_create_page(mapping, index, gfp_mask);
69548 +                       if (!pg) {
69549 +                               while (i--) {
69550 +                                       unlock_page(pages[i]);
69551 +                                       put_page(pages[i]);
69552 +                               }
69553 +                               err = -ENOMEM;
69554 +                               goto out;
69555 +                       }
69556 +                       pages[i] = pg;
69557 +               }
69559 +               err = ni_read_frame(ni, vbo, pages, pages_per_frame);
69561 +               if (!err) {
69562 +                       down_read(&ni->file.run_lock);
69563 +                       err = ntfs_bio_pages(sbi, &ni->file.run, pages,
69564 +                                            nr_pages, vbo, bytes,
69565 +                                            REQ_OP_WRITE);
69566 +                       up_read(&ni->file.run_lock);
69567 +               }
69569 +               for (i = 0; i < pages_per_frame; i++) {
69570 +                       unlock_page(pages[i]);
69571 +                       put_page(pages[i]);
69572 +               }
69574 +               if (err)
69575 +                       goto out;
69577 +               cond_resched();
69578 +       }
69580 +remove_wof:
69581 +       /*
69582 +        * Step 2: deallocate attributes ATTR_DATA::WofCompressedData and ATTR_REPARSE
69583 +        */
69584 +       attr = NULL;
69585 +       le = NULL;
69586 +       while ((attr = ni_enum_attr_ex(ni, attr, &le, NULL))) {
69587 +               CLST svcn, evcn;
69588 +               u32 asize, roff;
69590 +               if (attr->type == ATTR_REPARSE) {
69591 +                       struct MFT_REF ref;
69593 +                       mi_get_ref(&ni->mi, &ref);
69594 +                       ntfs_remove_reparse(sbi, 0, &ref);
69595 +               }
69597 +               if (!attr->non_res)
69598 +                       continue;
69600 +               if (attr->type != ATTR_REPARSE &&
69601 +                   (attr->type != ATTR_DATA ||
69602 +                    attr->name_len != ARRAY_SIZE(WOF_NAME) ||
69603 +                    memcmp(attr_name(attr), WOF_NAME, sizeof(WOF_NAME))))
69604 +                       continue;
69606 +               svcn = le64_to_cpu(attr->nres.svcn);
69607 +               evcn = le64_to_cpu(attr->nres.evcn);
69609 +               if (evcn + 1 <= svcn)
69610 +                       continue;
69612 +               asize = le32_to_cpu(attr->size);
69613 +               roff = le16_to_cpu(attr->nres.run_off);
69615 +               /*run==1 means unpack and deallocate*/
69616 +               run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
69617 +                             Add2Ptr(attr, roff), asize - roff);
69618 +       }
69620 +       /*
69621 +        * Step 3: remove attribute ATTR_DATA::WofCompressedData
69622 +        */
69623 +       err = ni_remove_attr(ni, ATTR_DATA, WOF_NAME, ARRAY_SIZE(WOF_NAME),
69624 +                            false, NULL);
69625 +       if (err)
69626 +               goto out;
69628 +       /*
69629 +        * Step 4: remove ATTR_REPARSE
69630 +        */
69631 +       err = ni_remove_attr(ni, ATTR_REPARSE, NULL, 0, false, NULL);
69632 +       if (err)
69633 +               goto out;
69635 +       /*
69636 +        * Step 5: remove sparse flag from data attribute
69637 +        */
69638 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
69639 +       if (!attr) {
69640 +               err = -EINVAL;
69641 +               goto out;
69642 +       }
69644 +       if (attr->non_res && is_attr_sparsed(attr)) {
69645 +               /* sparsed attribute header is 8 bytes bigger than normal*/
69646 +               struct MFT_REC *rec = mi->mrec;
69647 +               u32 used = le32_to_cpu(rec->used);
69648 +               u32 asize = le32_to_cpu(attr->size);
69649 +               u16 roff = le16_to_cpu(attr->nres.run_off);
69650 +               char *rbuf = Add2Ptr(attr, roff);
69652 +               memmove(rbuf - 8, rbuf, used - PtrOffset(rec, rbuf));
69653 +               attr->size = cpu_to_le32(asize - 8);
69654 +               attr->flags &= ~ATTR_FLAG_SPARSED;
69655 +               attr->nres.run_off = cpu_to_le16(roff - 8);
69656 +               attr->nres.c_unit = 0;
69657 +               rec->used = cpu_to_le32(used - 8);
69658 +               mi->dirty = true;
69659 +               ni->std_fa &= ~(FILE_ATTRIBUTE_SPARSE_FILE |
69660 +                               FILE_ATTRIBUTE_REPARSE_POINT);
69662 +               mark_inode_dirty(inode);
69663 +       }
69665 +       /* clear cached flag */
69666 +       ni->ni_flags &= ~NI_FLAG_COMPRESSED_MASK;
69667 +       if (ni->file.offs_page) {
69668 +               put_page(ni->file.offs_page);
69669 +               ni->file.offs_page = NULL;
69670 +       }
69671 +       mapping->a_ops = &ntfs_aops;
69673 +out:
69674 +       ntfs_free(pages);
69675 +       if (err) {
69676 +               make_bad_inode(inode);
69677 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
69678 +       }
69680 +       return err;
69683 +/* external compression lzx/xpress */
69684 +static int decompress_lzx_xpress(struct ntfs_sb_info *sbi, const char *cmpr,
69685 +                                size_t cmpr_size, void *unc, size_t unc_size,
69686 +                                u32 frame_size)
69688 +       int err;
69689 +       void *ctx;
69691 +       if (cmpr_size == unc_size) {
69692 +               /* frame not compressed */
69693 +               memcpy(unc, cmpr, unc_size);
69694 +               return 0;
69695 +       }
69697 +       err = 0;
69698 +       if (frame_size == 0x8000) {
69699 +               mutex_lock(&sbi->compress.mtx_lzx);
69700 +               /* LZX: frame compressed */
69701 +               ctx = sbi->compress.lzx;
69702 +               if (!ctx) {
69703 +                       /* Lazy initialize lzx decompress context */
69704 +                       ctx = lzx_allocate_decompressor();
69705 +                       if (!ctx) {
69706 +                               err = -ENOMEM;
69707 +                               goto out1;
69708 +                       }
69710 +                       sbi->compress.lzx = ctx;
69711 +               }
69713 +               if (lzx_decompress(ctx, cmpr, cmpr_size, unc, unc_size)) {
69714 +                       /* treat all errors as "invalid argument" */
69715 +                       err = -EINVAL;
69716 +               }
69717 +out1:
69718 +               mutex_unlock(&sbi->compress.mtx_lzx);
69719 +       } else {
69720 +               /* XPRESS: frame compressed */
69721 +               mutex_lock(&sbi->compress.mtx_xpress);
69722 +               ctx = sbi->compress.xpress;
69723 +               if (!ctx) {
69724 +                       /* Lazy initialize xpress decompress context */
69725 +                       ctx = xpress_allocate_decompressor();
69726 +                       if (!ctx) {
69727 +                               err = -ENOMEM;
69728 +                               goto out2;
69729 +                       }
69731 +                       sbi->compress.xpress = ctx;
69732 +               }
69734 +               if (xpress_decompress(ctx, cmpr, cmpr_size, unc, unc_size)) {
69735 +                       /* treat all errors as "invalid argument" */
69736 +                       err = -EINVAL;
69737 +               }
69738 +out2:
69739 +               mutex_unlock(&sbi->compress.mtx_xpress);
69740 +       }
69741 +       return err;
69743 +#endif
69746 + * ni_read_frame
69747 + *
69748 + * pages - array of locked pages
69749 + */
69750 +int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
69751 +                 u32 pages_per_frame)
69753 +       int err;
69754 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
69755 +       u8 cluster_bits = sbi->cluster_bits;
69756 +       char *frame_ondisk = NULL;
69757 +       char *frame_mem = NULL;
69758 +       struct page **pages_disk = NULL;
69759 +       struct ATTR_LIST_ENTRY *le = NULL;
69760 +       struct runs_tree *run = &ni->file.run;
69761 +       u64 valid_size = ni->i_valid;
69762 +       u64 vbo_disk;
69763 +       size_t unc_size;
69764 +       u32 frame_size, i, npages_disk, ondisk_size;
69765 +       struct page *pg;
69766 +       struct ATTRIB *attr;
69767 +       CLST frame, clst_data;
69769 +       /*
69770 +        * To simplify decompress algorithm do vmap for source and target pages
69771 +        */
69772 +       for (i = 0; i < pages_per_frame; i++)
69773 +               kmap(pages[i]);
69775 +       frame_size = pages_per_frame << PAGE_SHIFT;
69776 +       frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL);
69777 +       if (!frame_mem) {
69778 +               err = -ENOMEM;
69779 +               goto out;
69780 +       }
69782 +       attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, NULL);
69783 +       if (!attr) {
69784 +               err = -ENOENT;
69785 +               goto out1;
69786 +       }
69788 +       if (!attr->non_res) {
69789 +               u32 data_size = le32_to_cpu(attr->res.data_size);
69791 +               memset(frame_mem, 0, frame_size);
69792 +               if (frame_vbo < data_size) {
69793 +                       ondisk_size = data_size - frame_vbo;
69794 +                       memcpy(frame_mem, resident_data(attr) + frame_vbo,
69795 +                              min(ondisk_size, frame_size));
69796 +               }
69797 +               err = 0;
69798 +               goto out1;
69799 +       }
69801 +       if (frame_vbo >= valid_size) {
69802 +               memset(frame_mem, 0, frame_size);
69803 +               err = 0;
69804 +               goto out1;
69805 +       }
69807 +       if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
69808 +#ifndef CONFIG_NTFS3_LZX_XPRESS
69809 +               err = -EOPNOTSUPP;
69810 +               goto out1;
69811 +#else
69812 +               u32 frame_bits = ni_ext_compress_bits(ni);
69813 +               u64 frame64 = frame_vbo >> frame_bits;
69814 +               u64 frames, vbo_data;
69816 +               if (frame_size != (1u << frame_bits)) {
69817 +                       err = -EINVAL;
69818 +                       goto out1;
69819 +               }
69820 +               switch (frame_size) {
69821 +               case 0x1000:
69822 +               case 0x2000:
69823 +               case 0x4000:
69824 +               case 0x8000:
69825 +                       break;
69826 +               default:
69827 +                       /* unknown compression */
69828 +                       err = -EOPNOTSUPP;
69829 +                       goto out1;
69830 +               }
69832 +               attr = ni_find_attr(ni, attr, &le, ATTR_DATA, WOF_NAME,
69833 +                                   ARRAY_SIZE(WOF_NAME), NULL, NULL);
69834 +               if (!attr) {
69835 +                       ntfs_inode_err(
69836 +                               &ni->vfs_inode,
69837 +                               "external compressed file should contains data attribute \"WofCompressedData\"");
69838 +                       err = -EINVAL;
69839 +                       goto out1;
69840 +               }
69842 +               if (!attr->non_res) {
69843 +                       run = NULL;
69844 +               } else {
69845 +                       run = run_alloc();
69846 +                       if (!run) {
69847 +                               err = -ENOMEM;
69848 +                               goto out1;
69849 +                       }
69850 +               }
69852 +               frames = (ni->vfs_inode.i_size - 1) >> frame_bits;
69854 +               err = attr_wof_frame_info(ni, attr, run, frame64, frames,
69855 +                                         frame_bits, &ondisk_size, &vbo_data);
69856 +               if (err)
69857 +                       goto out2;
69859 +               if (frame64 == frames) {
69860 +                       unc_size = 1 + ((ni->vfs_inode.i_size - 1) &
69861 +                                       (frame_size - 1));
69862 +                       ondisk_size = attr_size(attr) - vbo_data;
69863 +               } else {
69864 +                       unc_size = frame_size;
69865 +               }
69867 +               if (ondisk_size > frame_size) {
69868 +                       err = -EINVAL;
69869 +                       goto out2;
69870 +               }
69872 +               if (!attr->non_res) {
69873 +                       if (vbo_data + ondisk_size >
69874 +                           le32_to_cpu(attr->res.data_size)) {
69875 +                               err = -EINVAL;
69876 +                               goto out1;
69877 +                       }
69879 +                       err = decompress_lzx_xpress(
69880 +                               sbi, Add2Ptr(resident_data(attr), vbo_data),
69881 +                               ondisk_size, frame_mem, unc_size, frame_size);
69882 +                       goto out1;
69883 +               }
69884 +               vbo_disk = vbo_data;
69885 +               /* load all runs to read [vbo_disk-vbo_to) */
69886 +               err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
69887 +                                          ARRAY_SIZE(WOF_NAME), run, vbo_disk,
69888 +                                          vbo_data + ondisk_size);
69889 +               if (err)
69890 +                       goto out2;
69891 +               npages_disk = (ondisk_size + (vbo_disk & (PAGE_SIZE - 1)) +
69892 +                              PAGE_SIZE - 1) >>
69893 +                             PAGE_SHIFT;
69894 +#endif
69895 +       } else if (is_attr_compressed(attr)) {
69896 +               /* lznt compression*/
69897 +               if (sbi->cluster_size > NTFS_LZNT_MAX_CLUSTER) {
69898 +                       err = -EOPNOTSUPP;
69899 +                       goto out1;
69900 +               }
69902 +               if (attr->nres.c_unit != NTFS_LZNT_CUNIT) {
69903 +                       err = -EOPNOTSUPP;
69904 +                       goto out1;
69905 +               }
69907 +               down_write(&ni->file.run_lock);
69908 +               run_truncate_around(run, le64_to_cpu(attr->nres.svcn));
69909 +               frame = frame_vbo >> (cluster_bits + NTFS_LZNT_CUNIT);
69910 +               err = attr_is_frame_compressed(ni, attr, frame, &clst_data);
69911 +               up_write(&ni->file.run_lock);
69912 +               if (err)
69913 +                       goto out1;
69915 +               if (!clst_data) {
69916 +                       memset(frame_mem, 0, frame_size);
69917 +                       goto out1;
69918 +               }
69920 +               frame_size = sbi->cluster_size << NTFS_LZNT_CUNIT;
69921 +               ondisk_size = clst_data << cluster_bits;
69923 +               if (clst_data >= NTFS_LZNT_CLUSTERS) {
69924 +                       /* frame is not compressed */
69925 +                       down_read(&ni->file.run_lock);
69926 +                       err = ntfs_bio_pages(sbi, run, pages, pages_per_frame,
69927 +                                            frame_vbo, ondisk_size,
69928 +                                            REQ_OP_READ);
69929 +                       up_read(&ni->file.run_lock);
69930 +                       goto out1;
69931 +               }
69932 +               vbo_disk = frame_vbo;
69933 +               npages_disk = (ondisk_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
69934 +       } else {
69935 +               __builtin_unreachable();
69936 +               err = -EINVAL;
69937 +               goto out1;
69938 +       }
69940 +       pages_disk = ntfs_zalloc(npages_disk * sizeof(struct page *));
69941 +       if (!pages_disk) {
69942 +               err = -ENOMEM;
69943 +               goto out2;
69944 +       }
69946 +       for (i = 0; i < npages_disk; i++) {
69947 +               pg = alloc_page(GFP_KERNEL);
69948 +               if (!pg) {
69949 +                       err = -ENOMEM;
69950 +                       goto out3;
69951 +               }
69952 +               pages_disk[i] = pg;
69953 +               lock_page(pg);
69954 +               kmap(pg);
69955 +       }
69957 +       /* read 'ondisk_size' bytes from disk */
69958 +       down_read(&ni->file.run_lock);
69959 +       err = ntfs_bio_pages(sbi, run, pages_disk, npages_disk, vbo_disk,
69960 +                            ondisk_size, REQ_OP_READ);
69961 +       up_read(&ni->file.run_lock);
69962 +       if (err)
69963 +               goto out3;
69965 +       /*
69966 +        * To simplify decompress algorithm do vmap for source and target pages
69967 +        */
69968 +       frame_ondisk = vmap(pages_disk, npages_disk, VM_MAP, PAGE_KERNEL_RO);
69969 +       if (!frame_ondisk) {
69970 +               err = -ENOMEM;
69971 +               goto out3;
69972 +       }
69974 +       /* decompress: frame_ondisk -> frame_mem */
69975 +#ifdef CONFIG_NTFS3_LZX_XPRESS
69976 +       if (run != &ni->file.run) {
69977 +               /* LZX or XPRESS */
69978 +               err = decompress_lzx_xpress(
69979 +                       sbi, frame_ondisk + (vbo_disk & (PAGE_SIZE - 1)),
69980 +                       ondisk_size, frame_mem, unc_size, frame_size);
69981 +       } else
69982 +#endif
69983 +       {
69984 +               /* LZNT - native ntfs compression */
69985 +               unc_size = decompress_lznt(frame_ondisk, ondisk_size, frame_mem,
69986 +                                          frame_size);
69987 +               if ((ssize_t)unc_size < 0)
69988 +                       err = unc_size;
69989 +               else if (!unc_size || unc_size > frame_size)
69990 +                       err = -EINVAL;
69991 +       }
69992 +       if (!err && valid_size < frame_vbo + frame_size) {
69993 +               size_t ok = valid_size - frame_vbo;
69995 +               memset(frame_mem + ok, 0, frame_size - ok);
69996 +       }
69998 +       vunmap(frame_ondisk);
70000 +out3:
70001 +       for (i = 0; i < npages_disk; i++) {
70002 +               pg = pages_disk[i];
70003 +               if (pg) {
70004 +                       kunmap(pg);
70005 +                       unlock_page(pg);
70006 +                       put_page(pg);
70007 +               }
70008 +       }
70009 +       ntfs_free(pages_disk);
70011 +out2:
70012 +#ifdef CONFIG_NTFS3_LZX_XPRESS
70013 +       if (run != &ni->file.run)
70014 +               run_free(run);
70015 +#endif
70016 +out1:
70017 +       vunmap(frame_mem);
70018 +out:
70019 +       for (i = 0; i < pages_per_frame; i++) {
70020 +               pg = pages[i];
70021 +               kunmap(pg);
70022 +               ClearPageError(pg);
70023 +               SetPageUptodate(pg);
70024 +       }
70026 +       return err;
70030 + * ni_write_frame
70031 + *
70032 + * pages - array of locked pages
70033 + */
70034 +int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
70035 +                  u32 pages_per_frame)
70037 +       int err;
70038 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
70039 +       u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
70040 +       u32 frame_size = sbi->cluster_size << NTFS_LZNT_CUNIT;
70041 +       u64 frame_vbo = (u64)pages[0]->index << PAGE_SHIFT;
70042 +       CLST frame = frame_vbo >> frame_bits;
70043 +       char *frame_ondisk = NULL;
70044 +       struct page **pages_disk = NULL;
70045 +       struct ATTR_LIST_ENTRY *le = NULL;
70046 +       char *frame_mem;
70047 +       struct ATTRIB *attr;
70048 +       struct mft_inode *mi;
70049 +       u32 i;
70050 +       struct page *pg;
70051 +       size_t compr_size, ondisk_size;
70052 +       struct lznt *lznt;
70054 +       attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi);
70055 +       if (!attr) {
70056 +               err = -ENOENT;
70057 +               goto out;
70058 +       }
70060 +       if (WARN_ON(!is_attr_compressed(attr))) {
70061 +               err = -EINVAL;
70062 +               goto out;
70063 +       }
70065 +       if (sbi->cluster_size > NTFS_LZNT_MAX_CLUSTER) {
70066 +               err = -EOPNOTSUPP;
70067 +               goto out;
70068 +       }
70070 +       if (!attr->non_res) {
70071 +               down_write(&ni->file.run_lock);
70072 +               err = attr_make_nonresident(ni, attr, le, mi,
70073 +                                           le32_to_cpu(attr->res.data_size),
70074 +                                           &ni->file.run, &attr, pages[0]);
70075 +               up_write(&ni->file.run_lock);
70076 +               if (err)
70077 +                       goto out;
70078 +       }
70080 +       if (attr->nres.c_unit != NTFS_LZNT_CUNIT) {
70081 +               err = -EOPNOTSUPP;
70082 +               goto out;
70083 +       }
70085 +       pages_disk = ntfs_zalloc(pages_per_frame * sizeof(struct page *));
70086 +       if (!pages_disk) {
70087 +               err = -ENOMEM;
70088 +               goto out;
70089 +       }
70091 +       for (i = 0; i < pages_per_frame; i++) {
70092 +               pg = alloc_page(GFP_KERNEL);
70093 +               if (!pg) {
70094 +                       err = -ENOMEM;
70095 +                       goto out1;
70096 +               }
70097 +               pages_disk[i] = pg;
70098 +               lock_page(pg);
70099 +               kmap(pg);
70100 +       }
70102 +       /*
70103 +        * To simplify compress algorithm do vmap for source and target pages
70104 +        */
70105 +       frame_ondisk = vmap(pages_disk, pages_per_frame, VM_MAP, PAGE_KERNEL);
70106 +       if (!frame_ondisk) {
70107 +               err = -ENOMEM;
70108 +               goto out1;
70109 +       }
70111 +       for (i = 0; i < pages_per_frame; i++)
70112 +               kmap(pages[i]);
70114 +       /* map in-memory frame for read-only */
70115 +       frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL_RO);
70116 +       if (!frame_mem) {
70117 +               err = -ENOMEM;
70118 +               goto out2;
70119 +       }
70121 +       mutex_lock(&sbi->compress.mtx_lznt);
70122 +       lznt = NULL;
70123 +       if (!sbi->compress.lznt) {
70124 +               /*
70125 +                * lznt implements two levels of compression:
70126 +                * 0 - standard compression
70127 +                * 1 - best compression, requires a lot of cpu
70128 +                * use mount option?
70129 +                */
70130 +               lznt = get_lznt_ctx(0);
70131 +               if (!lznt) {
70132 +                       mutex_unlock(&sbi->compress.mtx_lznt);
70133 +                       err = -ENOMEM;
70134 +                       goto out3;
70135 +               }
70137 +               sbi->compress.lznt = lznt;
70138 +               lznt = NULL;
70139 +       }
70141 +       /* compress: frame_mem -> frame_ondisk */
70142 +       compr_size = compress_lznt(frame_mem, frame_size, frame_ondisk,
70143 +                                  frame_size, sbi->compress.lznt);
70144 +       mutex_unlock(&sbi->compress.mtx_lznt);
70145 +       ntfs_free(lznt);
70147 +       if (compr_size + sbi->cluster_size > frame_size) {
70148 +               /* frame is not compressed */
70149 +               compr_size = frame_size;
70150 +               ondisk_size = frame_size;
70151 +       } else if (compr_size) {
70152 +               /* frame is compressed */
70153 +               ondisk_size = ntfs_up_cluster(sbi, compr_size);
70154 +               memset(frame_ondisk + compr_size, 0, ondisk_size - compr_size);
70155 +       } else {
70156 +               /* frame is sparsed */
70157 +               ondisk_size = 0;
70158 +       }
70160 +       down_write(&ni->file.run_lock);
70161 +       run_truncate_around(&ni->file.run, le64_to_cpu(attr->nres.svcn));
70162 +       err = attr_allocate_frame(ni, frame, compr_size, ni->i_valid);
70163 +       up_write(&ni->file.run_lock);
70164 +       if (err)
70165 +               goto out2;
70167 +       if (!ondisk_size)
70168 +               goto out2;
70170 +       down_read(&ni->file.run_lock);
70171 +       err = ntfs_bio_pages(sbi, &ni->file.run,
70172 +                            ondisk_size < frame_size ? pages_disk : pages,
70173 +                            pages_per_frame, frame_vbo, ondisk_size,
70174 +                            REQ_OP_WRITE);
70175 +       up_read(&ni->file.run_lock);
70177 +out3:
70178 +       vunmap(frame_mem);
70180 +out2:
70181 +       for (i = 0; i < pages_per_frame; i++)
70182 +               kunmap(pages[i]);
70184 +       vunmap(frame_ondisk);
70185 +out1:
70186 +       for (i = 0; i < pages_per_frame; i++) {
70187 +               pg = pages_disk[i];
70188 +               if (pg) {
70189 +                       kunmap(pg);
70190 +                       unlock_page(pg);
70191 +                       put_page(pg);
70192 +               }
70193 +       }
70194 +       ntfs_free(pages_disk);
70195 +out:
70196 +       return err;
70200 + * update duplicate info of ATTR_FILE_NAME in MFT and in parent directories
70201 + */
70202 +static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
70203 +                            int sync)
70205 +       struct ATTRIB *attr;
70206 +       struct mft_inode *mi;
70207 +       struct ATTR_LIST_ENTRY *le = NULL;
70208 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
70209 +       struct super_block *sb = sbi->sb;
70210 +       bool re_dirty = false;
70211 +       bool active = sb->s_flags & SB_ACTIVE;
70212 +       bool upd_parent = ni->ni_flags & NI_FLAG_UPDATE_PARENT;
70214 +       if (ni->mi.mrec->flags & RECORD_FLAG_DIR) {
70215 +               dup->fa |= FILE_ATTRIBUTE_DIRECTORY;
70216 +               attr = NULL;
70217 +               dup->alloc_size = 0;
70218 +               dup->data_size = 0;
70219 +       } else {
70220 +               dup->fa &= ~FILE_ATTRIBUTE_DIRECTORY;
70222 +               attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL,
70223 +                                   &mi);
70224 +               if (!attr) {
70225 +                       dup->alloc_size = dup->data_size = 0;
70226 +               } else if (!attr->non_res) {
70227 +                       u32 data_size = le32_to_cpu(attr->res.data_size);
70229 +                       dup->alloc_size = cpu_to_le64(QuadAlign(data_size));
70230 +                       dup->data_size = cpu_to_le64(data_size);
70231 +               } else {
70232 +                       u64 new_valid = ni->i_valid;
70233 +                       u64 data_size = le64_to_cpu(attr->nres.data_size);
70234 +                       __le64 valid_le;
70236 +                       dup->alloc_size = is_attr_ext(attr)
70237 +                                                 ? attr->nres.total_size
70238 +                                                 : attr->nres.alloc_size;
70239 +                       dup->data_size = attr->nres.data_size;
70241 +                       if (new_valid > data_size)
70242 +                               new_valid = data_size;
70244 +                       valid_le = cpu_to_le64(new_valid);
70245 +                       if (valid_le != attr->nres.valid_size) {
70246 +                               attr->nres.valid_size = valid_le;
70247 +                               mi->dirty = true;
70248 +                       }
70249 +               }
70250 +       }
70252 +       /* TODO: fill reparse info */
70253 +       dup->reparse = 0;
70254 +       dup->ea_size = 0;
70256 +       if (ni->ni_flags & NI_FLAG_EA) {
70257 +               attr = ni_find_attr(ni, attr, &le, ATTR_EA_INFO, NULL, 0, NULL,
70258 +                                   NULL);
70259 +               if (attr) {
70260 +                       const struct EA_INFO *info;
70262 +                       info = resident_data_ex(attr, sizeof(struct EA_INFO));
70263 +                       dup->ea_size = info->size_pack;
70264 +               }
70265 +       }
70267 +       attr = NULL;
70268 +       le = NULL;
70270 +       while ((attr = ni_find_attr(ni, attr, &le, ATTR_NAME, NULL, 0, NULL,
70271 +                                   &mi))) {
70272 +               struct inode *dir;
70273 +               struct ATTR_FILE_NAME *fname;
70275 +               fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
70276 +               if (!fname)
70277 +                       continue;
70279 +               if (memcmp(&fname->dup, dup, sizeof(fname->dup))) {
70280 +                       memcpy(&fname->dup, dup, sizeof(fname->dup));
70281 +                       mi->dirty = true;
70282 +               } else if (!upd_parent) {
70283 +                       continue;
70284 +               }
70286 +               if (!active)
70287 +                       continue; /*avoid __wait_on_freeing_inode(inode); */
70289 +               /*ntfs_iget5 may sleep*/
70290 +               dir = ntfs_iget5(sb, &fname->home, NULL);
70291 +               if (IS_ERR(dir)) {
70292 +                       ntfs_inode_warn(
70293 +                               &ni->vfs_inode,
70294 +                               "failed to open parent directory r=%lx to update",
70295 +                               (long)ino_get(&fname->home));
70296 +                       continue;
70297 +               }
70299 +               if (!is_bad_inode(dir)) {
70300 +                       struct ntfs_inode *dir_ni = ntfs_i(dir);
70302 +                       if (!ni_trylock(dir_ni)) {
70303 +                               re_dirty = true;
70304 +                       } else {
70305 +                               indx_update_dup(dir_ni, sbi, fname, dup, sync);
70306 +                               ni_unlock(dir_ni);
70307 +                       }
70308 +               }
70309 +               iput(dir);
70310 +       }
70312 +       return re_dirty;
70316 + * ni_write_inode
70317 + *
70318 + * write mft base record and all subrecords to disk
70319 + */
70320 +int ni_write_inode(struct inode *inode, int sync, const char *hint)
70322 +       int err = 0, err2;
70323 +       struct ntfs_inode *ni = ntfs_i(inode);
70324 +       struct super_block *sb = inode->i_sb;
70325 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
70326 +       bool re_dirty = false;
70327 +       struct ATTR_STD_INFO *std;
70328 +       struct rb_node *node, *next;
70329 +       struct NTFS_DUP_INFO dup;
70331 +       if (is_bad_inode(inode) || sb_rdonly(sb))
70332 +               return 0;
70334 +       if (!ni_trylock(ni)) {
70335 +               /* 'ni' is under modification, skip for now */
70336 +               mark_inode_dirty_sync(inode);
70337 +               return 0;
70338 +       }
70340 +       if (is_rec_inuse(ni->mi.mrec) &&
70341 +           !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING) && inode->i_nlink) {
70342 +               bool modified = false;
70344 +               /* update times in standard attribute */
70345 +               std = ni_std(ni);
70346 +               if (!std) {
70347 +                       err = -EINVAL;
70348 +                       goto out;
70349 +               }
70351 +               /* Update the access times if they have changed. */
70352 +               dup.m_time = kernel2nt(&inode->i_mtime);
70353 +               if (std->m_time != dup.m_time) {
70354 +                       std->m_time = dup.m_time;
70355 +                       modified = true;
70356 +               }
70358 +               dup.c_time = kernel2nt(&inode->i_ctime);
70359 +               if (std->c_time != dup.c_time) {
70360 +                       std->c_time = dup.c_time;
70361 +                       modified = true;
70362 +               }
70364 +               dup.a_time = kernel2nt(&inode->i_atime);
70365 +               if (std->a_time != dup.a_time) {
70366 +                       std->a_time = dup.a_time;
70367 +                       modified = true;
70368 +               }
70370 +               dup.fa = ni->std_fa;
70371 +               if (std->fa != dup.fa) {
70372 +                       std->fa = dup.fa;
70373 +                       modified = true;
70374 +               }
70376 +               if (modified)
70377 +                       ni->mi.dirty = true;
70379 +               if (!ntfs_is_meta_file(sbi, inode->i_ino) &&
70380 +                   (modified || (ni->ni_flags & NI_FLAG_UPDATE_PARENT))) {
70381 +                       dup.cr_time = std->cr_time;
70382 +                       /* Not critical if this function fail */
70383 +                       re_dirty = ni_update_parent(ni, &dup, sync);
70385 +                       if (re_dirty)
70386 +                               ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
70387 +                       else
70388 +                               ni->ni_flags &= ~NI_FLAG_UPDATE_PARENT;
70389 +               }
70391 +               /* update attribute list */
70392 +               if (ni->attr_list.size && ni->attr_list.dirty) {
70393 +                       if (inode->i_ino != MFT_REC_MFT || sync) {
70394 +                               err = ni_try_remove_attr_list(ni);
70395 +                               if (err)
70396 +                                       goto out;
70397 +                       }
70399 +                       err = al_update(ni);
70400 +                       if (err)
70401 +                               goto out;
70402 +               }
70403 +       }
70405 +       for (node = rb_first(&ni->mi_tree); node; node = next) {
70406 +               struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
70407 +               bool is_empty;
70409 +               next = rb_next(node);
70411 +               if (!mi->dirty)
70412 +                       continue;
70414 +               is_empty = !mi_enum_attr(mi, NULL);
70416 +               if (is_empty)
70417 +                       clear_rec_inuse(mi->mrec);
70419 +               err2 = mi_write(mi, sync);
70420 +               if (!err && err2)
70421 +                       err = err2;
70423 +               if (is_empty) {
70424 +                       ntfs_mark_rec_free(sbi, mi->rno);
70425 +                       rb_erase(node, &ni->mi_tree);
70426 +                       mi_put(mi);
70427 +               }
70428 +       }
70430 +       if (ni->mi.dirty) {
70431 +               err2 = mi_write(&ni->mi, sync);
70432 +               if (!err && err2)
70433 +                       err = err2;
70434 +       }
70435 +out:
70436 +       ni_unlock(ni);
70438 +       if (err) {
70439 +               ntfs_err(sb, "%s r=%lx failed, %d.", hint, inode->i_ino, err);
70440 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
70441 +               return err;
70442 +       }
70444 +       if (re_dirty && (sb->s_flags & SB_ACTIVE))
70445 +               mark_inode_dirty_sync(inode);
70447 +       return 0;
70449 diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
70450 new file mode 100644
70451 index 000000000000..53da12252408
70452 --- /dev/null
70453 +++ b/fs/ntfs3/fslog.c
70454 @@ -0,0 +1,5181 @@
70455 +// SPDX-License-Identifier: GPL-2.0
70457 + *
70458 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
70459 + *
70460 + */
70462 +#include <linux/blkdev.h>
70463 +#include <linux/buffer_head.h>
70464 +#include <linux/fs.h>
70465 +#include <linux/hash.h>
70466 +#include <linux/nls.h>
70467 +#include <linux/random.h>
70468 +#include <linux/ratelimit.h>
70469 +#include <linux/slab.h>
70471 +#include "debug.h"
70472 +#include "ntfs.h"
70473 +#include "ntfs_fs.h"
70476 + * LOG FILE structs
70477 + */
70479 +// clang-format off
70481 +#define MaxLogFileSize     0x100000000ull
70482 +#define DefaultLogPageSize 4096
70483 +#define MinLogRecordPages  0x30
70485 +struct RESTART_HDR {
70486 +       struct NTFS_RECORD_HEADER rhdr; // 'RSTR'
70487 +       __le32 sys_page_size; // 0x10: Page size of the system which initialized the log
70488 +       __le32 page_size;     // 0x14: Log page size used for this log file
70489 +       __le16 ra_off;        // 0x18:
70490 +       __le16 minor_ver;     // 0x1A:
70491 +       __le16 major_ver;     // 0x1C:
70492 +       __le16 fixups[];
70495 +#define LFS_NO_CLIENT 0xffff
70496 +#define LFS_NO_CLIENT_LE cpu_to_le16(0xffff)
70498 +struct CLIENT_REC {
70499 +       __le64 oldest_lsn;
70500 +       __le64 restart_lsn; // 0x08:
70501 +       __le16 prev_client; // 0x10:
70502 +       __le16 next_client; // 0x12:
70503 +       __le16 seq_num;     // 0x14:
70504 +       u8 align[6];        // 0x16
70505 +       __le32 name_bytes;  // 0x1C: in bytes
70506 +       __le16 name[32];    // 0x20: name of client
70509 +static_assert(sizeof(struct CLIENT_REC) == 0x60);
70511 +/* Two copies of these will exist at the beginning of the log file */
70512 +struct RESTART_AREA {
70513 +       __le64 current_lsn;    // 0x00: Current logical end of log file
70514 +       __le16 log_clients;    // 0x08: Maximum number of clients
70515 +       __le16 client_idx[2];  // 0x0A: free/use index into the client record arrays
70516 +       __le16 flags;          // 0x0E: See RESTART_SINGLE_PAGE_IO
70517 +       __le32 seq_num_bits;   // 0x10: the number of bits in sequence number.
70518 +       __le16 ra_len;         // 0x14:
70519 +       __le16 client_off;     // 0x16:
70520 +       __le64 l_size;         // 0x18: Usable log file size.
70521 +       __le32 last_lsn_data_len; // 0x20:
70522 +       __le16 rec_hdr_len;    // 0x24: log page data offset
70523 +       __le16 data_off;       // 0x26: log page data length
70524 +       __le32 open_log_count; // 0x28:
70525 +       __le32 align[5];       // 0x2C:
70526 +       struct CLIENT_REC clients[]; // 0x40:
70529 +struct LOG_REC_HDR {
70530 +       __le16 redo_op;      // 0x00:  NTFS_LOG_OPERATION
70531 +       __le16 undo_op;      // 0x02:  NTFS_LOG_OPERATION
70532 +       __le16 redo_off;     // 0x04:  Offset to Redo record
70533 +       __le16 redo_len;     // 0x06:  Redo length
70534 +       __le16 undo_off;     // 0x08:  Offset to Undo record
70535 +       __le16 undo_len;     // 0x0A:  Undo length
70536 +       __le16 target_attr;  // 0x0C:
70537 +       __le16 lcns_follow;  // 0x0E:
70538 +       __le16 record_off;   // 0x10:
70539 +       __le16 attr_off;     // 0x12:
70540 +       __le16 cluster_off;  // 0x14:
70541 +       __le16 reserved;     // 0x16:
70542 +       __le64 target_vcn;   // 0x18:
70543 +       __le64 page_lcns[];  // 0x20:
70546 +static_assert(sizeof(struct LOG_REC_HDR) == 0x20);
70548 +#define RESTART_ENTRY_ALLOCATED    0xFFFFFFFF
70549 +#define RESTART_ENTRY_ALLOCATED_LE cpu_to_le32(0xFFFFFFFF)
70551 +struct RESTART_TABLE {
70552 +       __le16 size;       // 0x00:  In bytes
70553 +       __le16 used;       // 0x02: entries
70554 +       __le16 total;      // 0x04: entries
70555 +       __le16 res[3];     // 0x06:
70556 +       __le32 free_goal;  // 0x0C:
70557 +       __le32 first_free; // 0x10
70558 +       __le32 last_free;  // 0x14
70562 +static_assert(sizeof(struct RESTART_TABLE) == 0x18);
70564 +struct ATTR_NAME_ENTRY {
70565 +       __le16 off; // offset in the Open attribute Table
70566 +       __le16 name_bytes;
70567 +       __le16 name[];
70570 +struct OPEN_ATTR_ENRTY {
70571 +       __le32 next;            // 0x00: RESTART_ENTRY_ALLOCATED if allocated
70572 +       __le32 bytes_per_index; // 0x04:
70573 +       enum ATTR_TYPE type;    // 0x08:
70574 +       u8 is_dirty_pages;      // 0x0C:
70575 +       u8 is_attr_name;        // 0x0B: Faked field to manage 'ptr'
70576 +       u8 name_len;            // 0x0C: Faked field to manage 'ptr'
70577 +       u8 res;
70578 +       struct MFT_REF ref; // 0x10: File Reference of file containing attribute
70579 +       __le64 open_record_lsn; // 0x18:
70580 +       void *ptr;              // 0x20:
70583 +/* 32 bit version of 'struct OPEN_ATTR_ENRTY' */
70584 +struct OPEN_ATTR_ENRTY_32 {
70585 +       __le32 next;            // 0x00: RESTART_ENTRY_ALLOCATED if allocated
70586 +       __le32 ptr;             // 0x04:
70587 +       struct MFT_REF ref;     // 0x08:
70588 +       __le64 open_record_lsn; // 0x10:
70589 +       u8 is_dirty_pages;      // 0x18:
70590 +       u8 is_attr_name;        // 0x19
70591 +       u8 res1[2];
70592 +       enum ATTR_TYPE type;    // 0x1C:
70593 +       u8 name_len;            // 0x20:  in wchar
70594 +       u8 res2[3];
70595 +       __le32 AttributeName;   // 0x24:
70596 +       __le32 bytes_per_index; // 0x28:
70599 +#define SIZEOF_OPENATTRIBUTEENTRY0 0x2c
70600 +// static_assert( 0x2C == sizeof(struct OPEN_ATTR_ENRTY_32) );
70601 +static_assert(sizeof(struct OPEN_ATTR_ENRTY) < SIZEOF_OPENATTRIBUTEENTRY0);
70604 + * One entry exists in the Dirty Pages Table for each page which is dirty at the
70605 + * time the Restart Area is written
70606 + */
70607 +struct DIR_PAGE_ENTRY {
70608 +       __le32 next;         // 0x00:  RESTART_ENTRY_ALLOCATED if allocated
70609 +       __le32 target_attr;  // 0x04:  Index into the Open attribute Table
70610 +       __le32 transfer_len; // 0x08:
70611 +       __le32 lcns_follow;  // 0x0C:
70612 +       __le64 vcn;          // 0x10:  Vcn of dirty page
70613 +       __le64 oldest_lsn;   // 0x18:
70614 +       __le64 page_lcns[];  // 0x20:
70617 +static_assert(sizeof(struct DIR_PAGE_ENTRY) == 0x20);
70619 +/* 32 bit version of 'struct DIR_PAGE_ENTRY' */
70620 +struct DIR_PAGE_ENTRY_32 {
70621 +       __le32 next;         // 0x00:  RESTART_ENTRY_ALLOCATED if allocated
70622 +       __le32 target_attr;  // 0x04:  Index into the Open attribute Table
70623 +       __le32 transfer_len; // 0x08:
70624 +       __le32 lcns_follow;  // 0x0C:
70625 +       __le32 reserved;     // 0x10:
70626 +       __le32 vcn_low;      // 0x14:  Vcn of dirty page
70627 +       __le32 vcn_hi;       // 0x18:  Vcn of dirty page
70628 +       __le32 oldest_lsn_low; // 0x1C:
70629 +       __le32 oldest_lsn_hi; // 0x1C:
70630 +       __le32 page_lcns_low; // 0x24:
70631 +       __le32 page_lcns_hi; // 0x24:
70634 +static_assert(offsetof(struct DIR_PAGE_ENTRY_32, vcn_low) == 0x14);
70635 +static_assert(sizeof(struct DIR_PAGE_ENTRY_32) == 0x2c);
70637 +enum transact_state {
70638 +       TransactionUninitialized = 0,
70639 +       TransactionActive,
70640 +       TransactionPrepared,
70641 +       TransactionCommitted
70644 +struct TRANSACTION_ENTRY {
70645 +       __le32 next;          // 0x00: RESTART_ENTRY_ALLOCATED if allocated
70646 +       u8 transact_state;    // 0x04:
70647 +       u8 reserved[3];       // 0x05:
70648 +       __le64 first_lsn;     // 0x08:
70649 +       __le64 prev_lsn;      // 0x10:
70650 +       __le64 undo_next_lsn; // 0x18:
70651 +       __le32 undo_records;  // 0x20: Number of undo log records pending abort
70652 +       __le32 undo_len;      // 0x24: Total undo size
70655 +static_assert(sizeof(struct TRANSACTION_ENTRY) == 0x28);
70657 +struct NTFS_RESTART {
70658 +       __le32 major_ver;             // 0x00:
70659 +       __le32 minor_ver;             // 0x04:
70660 +       __le64 check_point_start;     // 0x08:
70661 +       __le64 open_attr_table_lsn;   // 0x10:
70662 +       __le64 attr_names_lsn;        // 0x18:
70663 +       __le64 dirty_pages_table_lsn; // 0x20:
70664 +       __le64 transact_table_lsn;    // 0x28:
70665 +       __le32 open_attr_len;         // 0x30: In bytes
70666 +       __le32 attr_names_len;        // 0x34: In bytes
70667 +       __le32 dirty_pages_len;       // 0x38: In bytes
70668 +       __le32 transact_table_len;    // 0x3C: In bytes
70671 +static_assert(sizeof(struct NTFS_RESTART) == 0x40);
70673 +struct NEW_ATTRIBUTE_SIZES {
70674 +       __le64 alloc_size;
70675 +       __le64 valid_size;
70676 +       __le64 data_size;
70677 +       __le64 total_size;
70680 +struct BITMAP_RANGE {
70681 +       __le32 bitmap_off;
70682 +       __le32 bits;
70685 +struct LCN_RANGE {
70686 +       __le64 lcn;
70687 +       __le64 len;
70690 +/* The following type defines the different log record types */
70691 +#define LfsClientRecord  cpu_to_le32(1)
70692 +#define LfsClientRestart cpu_to_le32(2)
70694 +/* This is used to uniquely identify a client for a particular log file */
70695 +struct CLIENT_ID {
70696 +       __le16 seq_num;
70697 +       __le16 client_idx;
70700 +/* This is the header that begins every Log Record in the log file */
70701 +struct LFS_RECORD_HDR {
70702 +       __le64 this_lsn;    // 0x00:
70703 +       __le64 client_prev_lsn;  // 0x08:
70704 +       __le64 client_undo_next_lsn; // 0x10:
70705 +       __le32 client_data_len;  // 0x18:
70706 +       struct CLIENT_ID client; // 0x1C: Owner of this log record
70707 +       __le32 record_type; // 0x20: LfsClientRecord or LfsClientRestart
70708 +       __le32 transact_id; // 0x24:
70709 +       __le16 flags;       // 0x28:    LOG_RECORD_MULTI_PAGE
70710 +       u8 align[6];        // 0x2A:
70713 +#define LOG_RECORD_MULTI_PAGE cpu_to_le16(1)
70715 +static_assert(sizeof(struct LFS_RECORD_HDR) == 0x30);
70717 +struct LFS_RECORD {
70718 +       __le16 next_record_off; // 0x00: Offset of the free space in the page
70719 +       u8 align[6];         // 0x02:
70720 +       __le64 last_end_lsn; // 0x08: lsn for the last log record which ends on the page
70723 +static_assert(sizeof(struct LFS_RECORD) == 0x10);
70725 +struct RECORD_PAGE_HDR {
70726 +       struct NTFS_RECORD_HEADER rhdr; // 'RCRD'
70727 +       __le32 rflags;     // 0x10:  See LOG_PAGE_LOG_RECORD_END
70728 +       __le16 page_count; // 0x14:
70729 +       __le16 page_pos;   // 0x16:
70730 +       struct LFS_RECORD record_hdr; // 0x18
70731 +       __le16 fixups[10]; // 0x28
70732 +       __le32 file_off;   // 0x3c: used when major version >= 2
70735 +// clang-format on
70737 +// Page contains the end of a log record
70738 +#define LOG_PAGE_LOG_RECORD_END cpu_to_le32(0x00000001)
70740 +static inline bool is_log_record_end(const struct RECORD_PAGE_HDR *hdr)
70742 +       return hdr->rflags & LOG_PAGE_LOG_RECORD_END;
70745 +static_assert(offsetof(struct RECORD_PAGE_HDR, file_off) == 0x3c);
70748 + * END of NTFS LOG structures
70749 + */
70751 +/* Define some tuning parameters to keep the restart tables a reasonable size */
70752 +#define INITIAL_NUMBER_TRANSACTIONS 5
70754 +enum NTFS_LOG_OPERATION {
70756 +       Noop = 0x00,
70757 +       CompensationLogRecord = 0x01,
70758 +       InitializeFileRecordSegment = 0x02,
70759 +       DeallocateFileRecordSegment = 0x03,
70760 +       WriteEndOfFileRecordSegment = 0x04,
70761 +       CreateAttribute = 0x05,
70762 +       DeleteAttribute = 0x06,
70763 +       UpdateResidentValue = 0x07,
70764 +       UpdateNonresidentValue = 0x08,
70765 +       UpdateMappingPairs = 0x09,
70766 +       DeleteDirtyClusters = 0x0A,
70767 +       SetNewAttributeSizes = 0x0B,
70768 +       AddIndexEntryRoot = 0x0C,
70769 +       DeleteIndexEntryRoot = 0x0D,
70770 +       AddIndexEntryAllocation = 0x0E,
70771 +       DeleteIndexEntryAllocation = 0x0F,
70772 +       WriteEndOfIndexBuffer = 0x10,
70773 +       SetIndexEntryVcnRoot = 0x11,
70774 +       SetIndexEntryVcnAllocation = 0x12,
70775 +       UpdateFileNameRoot = 0x13,
70776 +       UpdateFileNameAllocation = 0x14,
70777 +       SetBitsInNonresidentBitMap = 0x15,
70778 +       ClearBitsInNonresidentBitMap = 0x16,
70779 +       HotFix = 0x17,
70780 +       EndTopLevelAction = 0x18,
70781 +       PrepareTransaction = 0x19,
70782 +       CommitTransaction = 0x1A,
70783 +       ForgetTransaction = 0x1B,
70784 +       OpenNonresidentAttribute = 0x1C,
70785 +       OpenAttributeTableDump = 0x1D,
70786 +       AttributeNamesDump = 0x1E,
70787 +       DirtyPageTableDump = 0x1F,
70788 +       TransactionTableDump = 0x20,
70789 +       UpdateRecordDataRoot = 0x21,
70790 +       UpdateRecordDataAllocation = 0x22,
70792 +       UpdateRelativeDataInIndex =
70793 +               0x23, // NtOfsRestartUpdateRelativeDataInIndex
70794 +       UpdateRelativeDataInIndex2 = 0x24,
70795 +       ZeroEndOfFileRecord = 0x25,
70799 + * Array for log records which require a target attribute
70800 + * A true indicates that the corresponding restart operation requires a target attribute
70801 + */
70802 +static const u8 AttributeRequired[] = {
70803 +       0xFC, 0xFB, 0xFF, 0x10, 0x06,
70806 +static inline bool is_target_required(u16 op)
70808 +       bool ret = op <= UpdateRecordDataAllocation &&
70809 +                  (AttributeRequired[op >> 3] >> (op & 7) & 1);
70810 +       return ret;
70813 +static inline bool can_skip_action(enum NTFS_LOG_OPERATION op)
70815 +       switch (op) {
70816 +       case Noop:
70817 +       case DeleteDirtyClusters:
70818 +       case HotFix:
70819 +       case EndTopLevelAction:
70820 +       case PrepareTransaction:
70821 +       case CommitTransaction:
70822 +       case ForgetTransaction:
70823 +       case CompensationLogRecord:
70824 +       case OpenNonresidentAttribute:
70825 +       case OpenAttributeTableDump:
70826 +       case AttributeNamesDump:
70827 +       case DirtyPageTableDump:
70828 +       case TransactionTableDump:
70829 +               return true;
70830 +       default:
70831 +               return false;
70832 +       }
70835 +enum { lcb_ctx_undo_next, lcb_ctx_prev, lcb_ctx_next };
70837 +/* bytes per restart table */
70838 +static inline u32 bytes_per_rt(const struct RESTART_TABLE *rt)
70840 +       return le16_to_cpu(rt->used) * le16_to_cpu(rt->size) +
70841 +              sizeof(struct RESTART_TABLE);
70844 +/* log record length */
70845 +static inline u32 lrh_length(const struct LOG_REC_HDR *lr)
70847 +       u16 t16 = le16_to_cpu(lr->lcns_follow);
70849 +       return struct_size(lr, page_lcns, max_t(u16, 1, t16));
70852 +struct lcb {
70853 +       struct LFS_RECORD_HDR *lrh; // Log record header of the current lsn
70854 +       struct LOG_REC_HDR *log_rec;
70855 +       u32 ctx_mode; // lcb_ctx_undo_next/lcb_ctx_prev/lcb_ctx_next
70856 +       struct CLIENT_ID client;
70857 +       bool alloc; // if true the we should deallocate 'log_rec'
70860 +static void lcb_put(struct lcb *lcb)
70862 +       if (lcb->alloc)
70863 +               ntfs_free(lcb->log_rec);
70864 +       ntfs_free(lcb->lrh);
70865 +       ntfs_free(lcb);
70869 + * oldest_client_lsn
70870 + *
70871 + * find the oldest lsn from active clients.
70872 + */
70873 +static inline void oldest_client_lsn(const struct CLIENT_REC *ca,
70874 +                                    __le16 next_client, u64 *oldest_lsn)
70876 +       while (next_client != LFS_NO_CLIENT_LE) {
70877 +               const struct CLIENT_REC *cr = ca + le16_to_cpu(next_client);
70878 +               u64 lsn = le64_to_cpu(cr->oldest_lsn);
70880 +               /* ignore this block if it's oldest lsn is 0 */
70881 +               if (lsn && lsn < *oldest_lsn)
70882 +                       *oldest_lsn = lsn;
70884 +               next_client = cr->next_client;
70885 +       }
70888 +static inline bool is_rst_page_hdr_valid(u32 file_off,
70889 +                                        const struct RESTART_HDR *rhdr)
70891 +       u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
70892 +       u32 page_size = le32_to_cpu(rhdr->page_size);
70893 +       u32 end_usa;
70894 +       u16 ro;
70896 +       if (sys_page < SECTOR_SIZE || page_size < SECTOR_SIZE ||
70897 +           sys_page & (sys_page - 1) || page_size & (page_size - 1)) {
70898 +               return false;
70899 +       }
70901 +       /* Check that if the file offset isn't 0, it is the system page size */
70902 +       if (file_off && file_off != sys_page)
70903 +               return false;
70905 +       /* Check support version 1.1+ */
70906 +       if (le16_to_cpu(rhdr->major_ver) <= 1 && !rhdr->minor_ver)
70907 +               return false;
70909 +       if (le16_to_cpu(rhdr->major_ver) > 2)
70910 +               return false;
70912 +       ro = le16_to_cpu(rhdr->ra_off);
70913 +       if (!IsQuadAligned(ro) || ro > sys_page)
70914 +               return false;
70916 +       end_usa = ((sys_page >> SECTOR_SHIFT) + 1) * sizeof(short);
70917 +       end_usa += le16_to_cpu(rhdr->rhdr.fix_off);
70919 +       if (ro < end_usa)
70920 +               return false;
70922 +       return true;
70925 +static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
70927 +       const struct RESTART_AREA *ra;
70928 +       u16 cl, fl, ul;
70929 +       u32 off, l_size, file_dat_bits, file_size_round;
70930 +       u16 ro = le16_to_cpu(rhdr->ra_off);
70931 +       u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
70933 +       if (ro + offsetof(struct RESTART_AREA, l_size) >
70934 +           SECTOR_SIZE - sizeof(short))
70935 +               return false;
70937 +       ra = Add2Ptr(rhdr, ro);
70938 +       cl = le16_to_cpu(ra->log_clients);
70940 +       if (cl > 1)
70941 +               return false;
70943 +       off = le16_to_cpu(ra->client_off);
70945 +       if (!IsQuadAligned(off) || ro + off > SECTOR_SIZE - sizeof(short))
70946 +               return false;
70948 +       off += cl * sizeof(struct CLIENT_REC);
70950 +       if (off > sys_page)
70951 +               return false;
70953 +       /*
70954 +        * Check the restart length field and whether the entire
70955 +        * restart area is contained that length
70956 +        */
70957 +       if (le16_to_cpu(rhdr->ra_off) + le16_to_cpu(ra->ra_len) > sys_page ||
70958 +           off > le16_to_cpu(ra->ra_len)) {
70959 +               return false;
70960 +       }
70962 +       /*
70963 +        * As a final check make sure that the use list and the free list
70964 +        * are either empty or point to a valid client
70965 +        */
70966 +       fl = le16_to_cpu(ra->client_idx[0]);
70967 +       ul = le16_to_cpu(ra->client_idx[1]);
70968 +       if ((fl != LFS_NO_CLIENT && fl >= cl) ||
70969 +           (ul != LFS_NO_CLIENT && ul >= cl))
70970 +               return false;
70972 +       /* Make sure the sequence number bits match the log file size */
70973 +       l_size = le64_to_cpu(ra->l_size);
70975 +       file_dat_bits = sizeof(u64) * 8 - le32_to_cpu(ra->seq_num_bits);
70976 +       file_size_round = 1u << (file_dat_bits + 3);
70977 +       if (file_size_round != l_size &&
70978 +           (file_size_round < l_size || (file_size_round / 2) > l_size)) {
70979 +               return false;
70980 +       }
70982 +       /* The log page data offset and record header length must be quad-aligned */
70983 +       if (!IsQuadAligned(le16_to_cpu(ra->data_off)) ||
70984 +           !IsQuadAligned(le16_to_cpu(ra->rec_hdr_len)))
70985 +               return false;
70987 +       return true;
70990 +static inline bool is_client_area_valid(const struct RESTART_HDR *rhdr,
70991 +                                       bool usa_error)
70993 +       u16 ro = le16_to_cpu(rhdr->ra_off);
70994 +       const struct RESTART_AREA *ra = Add2Ptr(rhdr, ro);
70995 +       u16 ra_len = le16_to_cpu(ra->ra_len);
70996 +       const struct CLIENT_REC *ca;
70997 +       u32 i;
70999 +       if (usa_error && ra_len + ro > SECTOR_SIZE - sizeof(short))
71000 +               return false;
71002 +       /* Find the start of the client array */
71003 +       ca = Add2Ptr(ra, le16_to_cpu(ra->client_off));
71005 +       /*
71006 +        * Start with the free list
71007 +        * Check that all the clients are valid and that there isn't a cycle
71008 +        * Do the in-use list on the second pass
71009 +        */
71010 +       for (i = 0; i < 2; i++) {
71011 +               u16 client_idx = le16_to_cpu(ra->client_idx[i]);
71012 +               bool first_client = true;
71013 +               u16 clients = le16_to_cpu(ra->log_clients);
71015 +               while (client_idx != LFS_NO_CLIENT) {
71016 +                       const struct CLIENT_REC *cr;
71018 +                       if (!clients ||
71019 +                           client_idx >= le16_to_cpu(ra->log_clients))
71020 +                               return false;
71022 +                       clients -= 1;
71023 +                       cr = ca + client_idx;
71025 +                       client_idx = le16_to_cpu(cr->next_client);
71027 +                       if (first_client) {
71028 +                               first_client = false;
71029 +                               if (cr->prev_client != LFS_NO_CLIENT_LE)
71030 +                                       return false;
71031 +                       }
71032 +               }
71033 +       }
71035 +       return true;
71039 + * remove_client
71040 + *
71041 + * remove a client record from a client record list an restart area
71042 + */
71043 +static inline void remove_client(struct CLIENT_REC *ca,
71044 +                                const struct CLIENT_REC *cr, __le16 *head)
71046 +       if (cr->prev_client == LFS_NO_CLIENT_LE)
71047 +               *head = cr->next_client;
71048 +       else
71049 +               ca[le16_to_cpu(cr->prev_client)].next_client = cr->next_client;
71051 +       if (cr->next_client != LFS_NO_CLIENT_LE)
71052 +               ca[le16_to_cpu(cr->next_client)].prev_client = cr->prev_client;
71056 + * add_client
71057 + *
71058 + * add a client record to the start of a list
71059 + */
71060 +static inline void add_client(struct CLIENT_REC *ca, u16 index, __le16 *head)
71062 +       struct CLIENT_REC *cr = ca + index;
71064 +       cr->prev_client = LFS_NO_CLIENT_LE;
71065 +       cr->next_client = *head;
71067 +       if (*head != LFS_NO_CLIENT_LE)
71068 +               ca[le16_to_cpu(*head)].prev_client = cpu_to_le16(index);
71070 +       *head = cpu_to_le16(index);
71074 + * enum_rstbl
71075 + *
71076 + */
71077 +static inline void *enum_rstbl(struct RESTART_TABLE *t, void *c)
71079 +       __le32 *e;
71080 +       u32 bprt;
71081 +       u16 rsize = t ? le16_to_cpu(t->size) : 0;
71083 +       if (!c) {
71084 +               if (!t || !t->total)
71085 +                       return NULL;
71086 +               e = Add2Ptr(t, sizeof(struct RESTART_TABLE));
71087 +       } else {
71088 +               e = Add2Ptr(c, rsize);
71089 +       }
71091 +       /* Loop until we hit the first one allocated, or the end of the list */
71092 +       for (bprt = bytes_per_rt(t); PtrOffset(t, e) < bprt;
71093 +            e = Add2Ptr(e, rsize)) {
71094 +               if (*e == RESTART_ENTRY_ALLOCATED_LE)
71095 +                       return e;
71096 +       }
71097 +       return NULL;
71101 + * find_dp
71102 + *
71103 + * searches for a 'vcn' in Dirty Page Table,
71104 + */
71105 +static inline struct DIR_PAGE_ENTRY *find_dp(struct RESTART_TABLE *dptbl,
71106 +                                            u32 target_attr, u64 vcn)
71108 +       __le32 ta = cpu_to_le32(target_attr);
71109 +       struct DIR_PAGE_ENTRY *dp = NULL;
71111 +       while ((dp = enum_rstbl(dptbl, dp))) {
71112 +               u64 dp_vcn = le64_to_cpu(dp->vcn);
71114 +               if (dp->target_attr == ta && vcn >= dp_vcn &&
71115 +                   vcn < dp_vcn + le32_to_cpu(dp->lcns_follow)) {
71116 +                       return dp;
71117 +               }
71118 +       }
71119 +       return NULL;
71122 +static inline u32 norm_file_page(u32 page_size, u32 *l_size, bool use_default)
71124 +       if (use_default)
71125 +               page_size = DefaultLogPageSize;
71127 +       /* Round the file size down to a system page boundary */
71128 +       *l_size &= ~(page_size - 1);
71130 +       /* File should contain at least 2 restart pages and MinLogRecordPages pages */
71131 +       if (*l_size < (MinLogRecordPages + 2) * page_size)
71132 +               return 0;
71134 +       return page_size;
71137 +static bool check_log_rec(const struct LOG_REC_HDR *lr, u32 bytes, u32 tr,
71138 +                         u32 bytes_per_attr_entry)
71140 +       u16 t16;
71142 +       if (bytes < sizeof(struct LOG_REC_HDR))
71143 +               return false;
71144 +       if (!tr)
71145 +               return false;
71147 +       if ((tr - sizeof(struct RESTART_TABLE)) %
71148 +           sizeof(struct TRANSACTION_ENTRY))
71149 +               return false;
71151 +       if (le16_to_cpu(lr->redo_off) & 7)
71152 +               return false;
71154 +       if (le16_to_cpu(lr->undo_off) & 7)
71155 +               return false;
71157 +       if (lr->target_attr)
71158 +               goto check_lcns;
71160 +       if (is_target_required(le16_to_cpu(lr->redo_op)))
71161 +               return false;
71163 +       if (is_target_required(le16_to_cpu(lr->undo_op)))
71164 +               return false;
71166 +check_lcns:
71167 +       if (!lr->lcns_follow)
71168 +               goto check_length;
71170 +       t16 = le16_to_cpu(lr->target_attr);
71171 +       if ((t16 - sizeof(struct RESTART_TABLE)) % bytes_per_attr_entry)
71172 +               return false;
71174 +check_length:
71175 +       if (bytes < lrh_length(lr))
71176 +               return false;
71178 +       return true;
71181 +static bool check_rstbl(const struct RESTART_TABLE *rt, size_t bytes)
71183 +       u32 ts;
71184 +       u32 i, off;
71185 +       u16 rsize = le16_to_cpu(rt->size);
71186 +       u16 ne = le16_to_cpu(rt->used);
71187 +       u32 ff = le32_to_cpu(rt->first_free);
71188 +       u32 lf = le32_to_cpu(rt->last_free);
71190 +       ts = rsize * ne + sizeof(struct RESTART_TABLE);
71192 +       if (!rsize || rsize > bytes ||
71193 +           rsize + sizeof(struct RESTART_TABLE) > bytes || bytes < ts ||
71194 +           le16_to_cpu(rt->total) > ne || ff > ts || lf > ts ||
71195 +           (ff && ff < sizeof(struct RESTART_TABLE)) ||
71196 +           (lf && lf < sizeof(struct RESTART_TABLE))) {
71197 +               return false;
71198 +       }
71200 +       /* Verify each entry is either allocated or points
71201 +        * to a valid offset the table
71202 +        */
71203 +       for (i = 0; i < ne; i++) {
71204 +               off = le32_to_cpu(*(__le32 *)Add2Ptr(
71205 +                       rt, i * rsize + sizeof(struct RESTART_TABLE)));
71207 +               if (off != RESTART_ENTRY_ALLOCATED && off &&
71208 +                   (off < sizeof(struct RESTART_TABLE) ||
71209 +                    ((off - sizeof(struct RESTART_TABLE)) % rsize))) {
71210 +                       return false;
71211 +               }
71212 +       }
71214 +       /* Walk through the list headed by the first entry to make
71215 +        * sure none of the entries are currently being used
71216 +        */
71217 +       for (off = ff; off;) {
71218 +               if (off == RESTART_ENTRY_ALLOCATED)
71219 +                       return false;
71221 +               off = le32_to_cpu(*(__le32 *)Add2Ptr(rt, off));
71222 +       }
71224 +       return true;
71228 + * free_rsttbl_idx
71229 + *
71230 + * frees a previously allocated index a Restart Table.
71231 + */
71232 +static inline void free_rsttbl_idx(struct RESTART_TABLE *rt, u32 off)
71234 +       __le32 *e;
71235 +       u32 lf = le32_to_cpu(rt->last_free);
71236 +       __le32 off_le = cpu_to_le32(off);
71238 +       e = Add2Ptr(rt, off);
71240 +       if (off < le32_to_cpu(rt->free_goal)) {
71241 +               *e = rt->first_free;
71242 +               rt->first_free = off_le;
71243 +               if (!lf)
71244 +                       rt->last_free = off_le;
71245 +       } else {
71246 +               if (lf)
71247 +                       *(__le32 *)Add2Ptr(rt, lf) = off_le;
71248 +               else
71249 +                       rt->first_free = off_le;
71251 +               rt->last_free = off_le;
71252 +               *e = 0;
71253 +       }
71255 +       le16_sub_cpu(&rt->total, 1);
71258 +static inline struct RESTART_TABLE *init_rsttbl(u16 esize, u16 used)
71260 +       __le32 *e, *last_free;
71261 +       u32 off;
71262 +       u32 bytes = esize * used + sizeof(struct RESTART_TABLE);
71263 +       u32 lf = sizeof(struct RESTART_TABLE) + (used - 1) * esize;
71264 +       struct RESTART_TABLE *t = ntfs_zalloc(bytes);
71266 +       t->size = cpu_to_le16(esize);
71267 +       t->used = cpu_to_le16(used);
71268 +       t->free_goal = cpu_to_le32(~0u);
71269 +       t->first_free = cpu_to_le32(sizeof(struct RESTART_TABLE));
71270 +       t->last_free = cpu_to_le32(lf);
71272 +       e = (__le32 *)(t + 1);
71273 +       last_free = Add2Ptr(t, lf);
71275 +       for (off = sizeof(struct RESTART_TABLE) + esize; e < last_free;
71276 +            e = Add2Ptr(e, esize), off += esize) {
71277 +               *e = cpu_to_le32(off);
71278 +       }
71279 +       return t;
71282 +static inline struct RESTART_TABLE *extend_rsttbl(struct RESTART_TABLE *tbl,
71283 +                                                 u32 add, u32 free_goal)
71285 +       u16 esize = le16_to_cpu(tbl->size);
71286 +       __le32 osize = cpu_to_le32(bytes_per_rt(tbl));
71287 +       u32 used = le16_to_cpu(tbl->used);
71288 +       struct RESTART_TABLE *rt = init_rsttbl(esize, used + add);
71290 +       memcpy(rt + 1, tbl + 1, esize * used);
71292 +       rt->free_goal = free_goal == ~0u
71293 +                               ? cpu_to_le32(~0u)
71294 +                               : cpu_to_le32(sizeof(struct RESTART_TABLE) +
71295 +                                             free_goal * esize);
71297 +       if (tbl->first_free) {
71298 +               rt->first_free = tbl->first_free;
71299 +               *(__le32 *)Add2Ptr(rt, le32_to_cpu(tbl->last_free)) = osize;
71300 +       } else {
71301 +               rt->first_free = osize;
71302 +       }
71304 +       rt->total = tbl->total;
71306 +       ntfs_free(tbl);
71307 +       return rt;
71311 + * alloc_rsttbl_idx
71312 + *
71313 + * allocates an index from within a previously initialized Restart Table
71314 + */
71315 +static inline void *alloc_rsttbl_idx(struct RESTART_TABLE **tbl)
71317 +       u32 off;
71318 +       __le32 *e;
71319 +       struct RESTART_TABLE *t = *tbl;
71321 +       if (!t->first_free)
71322 +               *tbl = t = extend_rsttbl(t, 16, ~0u);
71324 +       off = le32_to_cpu(t->first_free);
71326 +       /* Dequeue this entry and zero it. */
71327 +       e = Add2Ptr(t, off);
71329 +       t->first_free = *e;
71331 +       memset(e, 0, le16_to_cpu(t->size));
71333 +       *e = RESTART_ENTRY_ALLOCATED_LE;
71335 +       /* If list is going empty, then we fix the last_free as well. */
71336 +       if (!t->first_free)
71337 +               t->last_free = 0;
71339 +       le16_add_cpu(&t->total, 1);
71341 +       return Add2Ptr(t, off);
71345 + * alloc_rsttbl_from_idx
71346 + *
71347 + * allocates a specific index from within a previously initialized Restart Table
71348 + */
71349 +static inline void *alloc_rsttbl_from_idx(struct RESTART_TABLE **tbl, u32 vbo)
71351 +       u32 off;
71352 +       __le32 *e;
71353 +       struct RESTART_TABLE *rt = *tbl;
71354 +       u32 bytes = bytes_per_rt(rt);
71355 +       u16 esize = le16_to_cpu(rt->size);
71357 +       /* If the entry is not the table, we will have to extend the table */
71358 +       if (vbo >= bytes) {
71359 +               /*
71360 +                * extend the size by computing the number of entries between
71361 +                * the existing size and the desired index and adding
71362 +                * 1 to that
71363 +                */
71364 +               u32 bytes2idx = vbo - bytes;
71366 +               /* There should always be an integral number of entries being added */
71367 +               /* Now extend the table */
71368 +               *tbl = rt = extend_rsttbl(rt, bytes2idx / esize + 1, bytes);
71369 +               if (!rt)
71370 +                       return NULL;
71371 +       }
71373 +       /* see if the entry is already allocated, and just return if it is. */
71374 +       e = Add2Ptr(rt, vbo);
71376 +       if (*e == RESTART_ENTRY_ALLOCATED_LE)
71377 +               return e;
71379 +       /*
71380 +        * Walk through the table, looking for the entry we're
71381 +        * interested and the previous entry
71382 +        */
71383 +       off = le32_to_cpu(rt->first_free);
71384 +       e = Add2Ptr(rt, off);
71386 +       if (off == vbo) {
71387 +               /* this is a match */
71388 +               rt->first_free = *e;
71389 +               goto skip_looking;
71390 +       }
71392 +       /*
71393 +        * need to walk through the list looking for the predecessor of our entry
71394 +        */
71395 +       for (;;) {
71396 +               /* Remember the entry just found */
71397 +               u32 last_off = off;
71398 +               __le32 *last_e = e;
71400 +               /* should never run of entries. */
71402 +               /* Lookup up the next entry the list */
71403 +               off = le32_to_cpu(*last_e);
71404 +               e = Add2Ptr(rt, off);
71406 +               /* If this is our match we are done */
71407 +               if (off == vbo) {
71408 +                       *last_e = *e;
71410 +                       /* If this was the last entry, we update that the table as well */
71411 +                       if (le32_to_cpu(rt->last_free) == off)
71412 +                               rt->last_free = cpu_to_le32(last_off);
71413 +                       break;
71414 +               }
71415 +       }
71417 +skip_looking:
71418 +       /* If the list is now empty, we fix the last_free as well */
71419 +       if (!rt->first_free)
71420 +               rt->last_free = 0;
71422 +       /* Zero this entry */
71423 +       memset(e, 0, esize);
71424 +       *e = RESTART_ENTRY_ALLOCATED_LE;
71426 +       le16_add_cpu(&rt->total, 1);
71428 +       return e;
71431 +#define RESTART_SINGLE_PAGE_IO cpu_to_le16(0x0001)
71433 +#define NTFSLOG_WRAPPED 0x00000001
71434 +#define NTFSLOG_MULTIPLE_PAGE_IO 0x00000002
71435 +#define NTFSLOG_NO_LAST_LSN 0x00000004
71436 +#define NTFSLOG_REUSE_TAIL 0x00000010
71437 +#define NTFSLOG_NO_OLDEST_LSN 0x00000020
71440 + * Helper struct to work with NTFS LogFile
71441 + */
71442 +struct ntfs_log {
71443 +       struct ntfs_inode *ni;
71445 +       u32 l_size;
71446 +       u32 sys_page_size;
71447 +       u32 sys_page_mask;
71448 +       u32 page_size;
71449 +       u32 page_mask; // page_size - 1
71450 +       u8 page_bits;
71451 +       struct RECORD_PAGE_HDR *one_page_buf;
71453 +       struct RESTART_TABLE *open_attr_tbl;
71454 +       u32 transaction_id;
71455 +       u32 clst_per_page;
71457 +       u32 first_page;
71458 +       u32 next_page;
71459 +       u32 ra_off;
71460 +       u32 data_off;
71461 +       u32 restart_size;
71462 +       u32 data_size;
71463 +       u16 record_header_len;
71464 +       u64 seq_num;
71465 +       u32 seq_num_bits;
71466 +       u32 file_data_bits;
71467 +       u32 seq_num_mask; /* (1 << file_data_bits) - 1 */
71469 +       struct RESTART_AREA *ra; /* in-memory image of the next restart area */
71470 +       u32 ra_size; /* the usable size of the restart area */
71472 +       /*
71473 +        * If true, then the in-memory restart area is to be written
71474 +        * to the first position on the disk
71475 +        */
71476 +       bool init_ra;
71477 +       bool set_dirty; /* true if we need to set dirty flag */
71479 +       u64 oldest_lsn;
71481 +       u32 oldest_lsn_off;
71482 +       u64 last_lsn;
71484 +       u32 total_avail;
71485 +       u32 total_avail_pages;
71486 +       u32 total_undo_commit;
71487 +       u32 max_current_avail;
71488 +       u32 current_avail;
71489 +       u32 reserved;
71491 +       short major_ver;
71492 +       short minor_ver;
71494 +       u32 l_flags; /* See NTFSLOG_XXX */
71495 +       u32 current_openlog_count; /* On-disk value for open_log_count */
71497 +       struct CLIENT_ID client_id;
71498 +       u32 client_undo_commit;
71501 +static inline u32 lsn_to_vbo(struct ntfs_log *log, const u64 lsn)
71503 +       u32 vbo = (lsn << log->seq_num_bits) >> (log->seq_num_bits - 3);
71505 +       return vbo;
71508 +/* compute the offset in the log file of the next log page */
71509 +static inline u32 next_page_off(struct ntfs_log *log, u32 off)
71511 +       off = (off & ~log->sys_page_mask) + log->page_size;
71512 +       return off >= log->l_size ? log->first_page : off;
71515 +static inline u32 lsn_to_page_off(struct ntfs_log *log, u64 lsn)
71517 +       return (((u32)lsn) << 3) & log->page_mask;
71520 +static inline u64 vbo_to_lsn(struct ntfs_log *log, u32 off, u64 Seq)
71522 +       return (off >> 3) + (Seq << log->file_data_bits);
71525 +static inline bool is_lsn_in_file(struct ntfs_log *log, u64 lsn)
71527 +       return lsn >= log->oldest_lsn &&
71528 +              lsn <= le64_to_cpu(log->ra->current_lsn);
71531 +static inline u32 hdr_file_off(struct ntfs_log *log,
71532 +                              struct RECORD_PAGE_HDR *hdr)
71534 +       if (log->major_ver < 2)
71535 +               return le64_to_cpu(hdr->rhdr.lsn);
71537 +       return le32_to_cpu(hdr->file_off);
71540 +static inline u64 base_lsn(struct ntfs_log *log,
71541 +                          const struct RECORD_PAGE_HDR *hdr, u64 lsn)
71543 +       u64 h_lsn = le64_to_cpu(hdr->rhdr.lsn);
71544 +       u64 ret = (((h_lsn >> log->file_data_bits) +
71545 +                   (lsn < (lsn_to_vbo(log, h_lsn) & ~log->page_mask) ? 1 : 0))
71546 +                  << log->file_data_bits) +
71547 +                 ((((is_log_record_end(hdr) &&
71548 +                     h_lsn <= le64_to_cpu(hdr->record_hdr.last_end_lsn))
71549 +                            ? le16_to_cpu(hdr->record_hdr.next_record_off)
71550 +                            : log->page_size) +
71551 +                   lsn) >>
71552 +                  3);
71554 +       return ret;
71557 +static inline bool verify_client_lsn(struct ntfs_log *log,
71558 +                                    const struct CLIENT_REC *client, u64 lsn)
71560 +       return lsn >= le64_to_cpu(client->oldest_lsn) &&
71561 +              lsn <= le64_to_cpu(log->ra->current_lsn) && lsn;
71564 +struct restart_info {
71565 +       u64 last_lsn;
71566 +       struct RESTART_HDR *r_page;
71567 +       u32 vbo;
71568 +       bool chkdsk_was_run;
71569 +       bool valid_page;
71570 +       bool initialized;
71571 +       bool restart;
71574 +static int read_log_page(struct ntfs_log *log, u32 vbo,
71575 +                        struct RECORD_PAGE_HDR **buffer, bool *usa_error)
71577 +       int err = 0;
71578 +       u32 page_idx = vbo >> log->page_bits;
71579 +       u32 page_off = vbo & log->page_mask;
71580 +       u32 bytes = log->page_size - page_off;
71581 +       void *to_free = NULL;
71582 +       u32 page_vbo = page_idx << log->page_bits;
71583 +       struct RECORD_PAGE_HDR *page_buf;
71584 +       struct ntfs_inode *ni = log->ni;
71585 +       bool bBAAD;
71587 +       if (vbo >= log->l_size)
71588 +               return -EINVAL;
71590 +       if (!*buffer) {
71591 +               to_free = ntfs_malloc(bytes);
71592 +               if (!to_free)
71593 +                       return -ENOMEM;
71594 +               *buffer = to_free;
71595 +       }
71597 +       page_buf = page_off ? log->one_page_buf : *buffer;
71599 +       err = ntfs_read_run_nb(ni->mi.sbi, &ni->file.run, page_vbo, page_buf,
71600 +                              log->page_size, NULL);
71601 +       if (err)
71602 +               goto out;
71604 +       if (page_buf->rhdr.sign != NTFS_FFFF_SIGNATURE)
71605 +               ntfs_fix_post_read(&page_buf->rhdr, PAGE_SIZE, false);
71607 +       if (page_buf != *buffer)
71608 +               memcpy(*buffer, Add2Ptr(page_buf, page_off), bytes);
71610 +       bBAAD = page_buf->rhdr.sign == NTFS_BAAD_SIGNATURE;
71612 +       if (usa_error)
71613 +               *usa_error = bBAAD;
71614 +       /* Check that the update sequence array for this page is valid */
71615 +       /* If we don't allow errors, raise an error status */
71616 +       else if (bBAAD)
71617 +               err = -EINVAL;
71619 +out:
71620 +       if (err && to_free) {
71621 +               ntfs_free(to_free);
71622 +               *buffer = NULL;
71623 +       }
71625 +       return err;
71629 + * log_read_rst
71630 + *
71631 + * it walks through 512 blocks of the file looking for a valid restart page header
71632 + * It will stop the first time we find a valid page header
71633 + */
71634 +static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
71635 +                       struct restart_info *info)
71637 +       u32 skip, vbo;
71638 +       struct RESTART_HDR *r_page = ntfs_malloc(DefaultLogPageSize);
71640 +       if (!r_page)
71641 +               return -ENOMEM;
71643 +       memset(info, 0, sizeof(struct restart_info));
71645 +       /* Determine which restart area we are looking for */
71646 +       if (first) {
71647 +               vbo = 0;
71648 +               skip = 512;
71649 +       } else {
71650 +               vbo = 512;
71651 +               skip = 0;
71652 +       }
71654 +       /* loop continuously until we succeed */
71655 +       for (; vbo < l_size; vbo = 2 * vbo + skip, skip = 0) {
71656 +               bool usa_error;
71657 +               u32 sys_page_size;
71658 +               bool brst, bchk;
71659 +               struct RESTART_AREA *ra;
71661 +               /* Read a page header at the current offset */
71662 +               if (read_log_page(log, vbo, (struct RECORD_PAGE_HDR **)&r_page,
71663 +                                 &usa_error)) {
71664 +                       /* ignore any errors */
71665 +                       continue;
71666 +               }
71668 +               /* exit if the signature is a log record page */
71669 +               if (r_page->rhdr.sign == NTFS_RCRD_SIGNATURE) {
71670 +                       info->initialized = true;
71671 +                       break;
71672 +               }
71674 +               brst = r_page->rhdr.sign == NTFS_RSTR_SIGNATURE;
71675 +               bchk = r_page->rhdr.sign == NTFS_CHKD_SIGNATURE;
71677 +               if (!bchk && !brst) {
71678 +                       if (r_page->rhdr.sign != NTFS_FFFF_SIGNATURE) {
71679 +                               /*
71680 +                                * Remember if the signature does not
71681 +                                * indicate uninitialized file
71682 +                                */
71683 +                               info->initialized = true;
71684 +                       }
71685 +                       continue;
71686 +               }
71688 +               ra = NULL;
71689 +               info->valid_page = false;
71690 +               info->initialized = true;
71691 +               info->vbo = vbo;
71693 +               /* Let's check the restart area if this is a valid page */
71694 +               if (!is_rst_page_hdr_valid(vbo, r_page))
71695 +                       goto check_result;
71696 +               ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off));
71698 +               if (!is_rst_area_valid(r_page))
71699 +                       goto check_result;
71701 +               /*
71702 +                * We have a valid restart page header and restart area.
71703 +                * If chkdsk was run or we have no clients then we have
71704 +                * no more checking to do
71705 +                */
71706 +               if (bchk || ra->client_idx[1] == LFS_NO_CLIENT_LE) {
71707 +                       info->valid_page = true;
71708 +                       goto check_result;
71709 +               }
71711 +               /* Read the entire restart area */
71712 +               sys_page_size = le32_to_cpu(r_page->sys_page_size);
71713 +               if (DefaultLogPageSize != sys_page_size) {
71714 +                       ntfs_free(r_page);
71715 +                       r_page = ntfs_zalloc(sys_page_size);
71716 +                       if (!r_page)
71717 +                               return -ENOMEM;
71719 +                       if (read_log_page(log, vbo,
71720 +                                         (struct RECORD_PAGE_HDR **)&r_page,
71721 +                                         &usa_error)) {
71722 +                               /* ignore any errors */
71723 +                               ntfs_free(r_page);
71724 +                               r_page = NULL;
71725 +                               continue;
71726 +                       }
71727 +               }
71729 +               if (is_client_area_valid(r_page, usa_error)) {
71730 +                       info->valid_page = true;
71731 +                       ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off));
71732 +               }
71734 +check_result:
71735 +               /* If chkdsk was run then update the caller's values and return */
71736 +               if (r_page->rhdr.sign == NTFS_CHKD_SIGNATURE) {
71737 +                       info->chkdsk_was_run = true;
71738 +                       info->last_lsn = le64_to_cpu(r_page->rhdr.lsn);
71739 +                       info->restart = true;
71740 +                       info->r_page = r_page;
71741 +                       return 0;
71742 +               }
71744 +               /* If we have a valid page then copy the values we need from it */
71745 +               if (info->valid_page) {
71746 +                       info->last_lsn = le64_to_cpu(ra->current_lsn);
71747 +                       info->restart = true;
71748 +                       info->r_page = r_page;
71749 +                       return 0;
71750 +               }
71751 +       }
71753 +       ntfs_free(r_page);
71755 +       return 0;
71759 + * log_init_pg_hdr
71760 + *
71761 + * init "log' from restart page header
71762 + */
71763 +static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size,
71764 +                           u32 page_size, u16 major_ver, u16 minor_ver)
71766 +       log->sys_page_size = sys_page_size;
71767 +       log->sys_page_mask = sys_page_size - 1;
71768 +       log->page_size = page_size;
71769 +       log->page_mask = page_size - 1;
71770 +       log->page_bits = blksize_bits(page_size);
71772 +       log->clst_per_page = log->page_size >> log->ni->mi.sbi->cluster_bits;
71773 +       if (!log->clst_per_page)
71774 +               log->clst_per_page = 1;
71776 +       log->first_page = major_ver >= 2
71777 +                                 ? 0x22 * page_size
71778 +                                 : ((sys_page_size << 1) + (page_size << 1));
71779 +       log->major_ver = major_ver;
71780 +       log->minor_ver = minor_ver;
71784 + * log_create
71785 + *
71786 + * init "log" in cases when we don't have a restart area to use
71787 + */
71788 +static void log_create(struct ntfs_log *log, u32 l_size, const u64 last_lsn,
71789 +                      u32 open_log_count, bool wrapped, bool use_multi_page)
71791 +       log->l_size = l_size;
71792 +       /* All file offsets must be quadword aligned */
71793 +       log->file_data_bits = blksize_bits(l_size) - 3;
71794 +       log->seq_num_mask = (8 << log->file_data_bits) - 1;
71795 +       log->seq_num_bits = sizeof(u64) * 8 - log->file_data_bits;
71796 +       log->seq_num = (last_lsn >> log->file_data_bits) + 2;
71797 +       log->next_page = log->first_page;
71798 +       log->oldest_lsn = log->seq_num << log->file_data_bits;
71799 +       log->oldest_lsn_off = 0;
71800 +       log->last_lsn = log->oldest_lsn;
71802 +       log->l_flags |= NTFSLOG_NO_LAST_LSN | NTFSLOG_NO_OLDEST_LSN;
71804 +       /* Set the correct flags for the I/O and indicate if we have wrapped */
71805 +       if (wrapped)
71806 +               log->l_flags |= NTFSLOG_WRAPPED;
71808 +       if (use_multi_page)
71809 +               log->l_flags |= NTFSLOG_MULTIPLE_PAGE_IO;
71811 +       /* Compute the log page values */
71812 +       log->data_off = QuadAlign(
71813 +               offsetof(struct RECORD_PAGE_HDR, fixups) +
71814 +               sizeof(short) * ((log->page_size >> SECTOR_SHIFT) + 1));
71815 +       log->data_size = log->page_size - log->data_off;
71816 +       log->record_header_len = sizeof(struct LFS_RECORD_HDR);
71818 +       /* Remember the different page sizes for reservation */
71819 +       log->reserved = log->data_size - log->record_header_len;
71821 +       /* Compute the restart page values. */
71822 +       log->ra_off = QuadAlign(
71823 +               offsetof(struct RESTART_HDR, fixups) +
71824 +               sizeof(short) * ((log->sys_page_size >> SECTOR_SHIFT) + 1));
71825 +       log->restart_size = log->sys_page_size - log->ra_off;
71826 +       log->ra_size = struct_size(log->ra, clients, 1);
71827 +       log->current_openlog_count = open_log_count;
71829 +       /*
71830 +        * The total available log file space is the number of
71831 +        * log file pages times the space available on each page
71832 +        */
71833 +       log->total_avail_pages = log->l_size - log->first_page;
71834 +       log->total_avail = log->total_avail_pages >> log->page_bits;
71836 +       /*
71837 +        * We assume that we can't use the end of the page less than
71838 +        * the file record size
71839 +        * Then we won't need to reserve more than the caller asks for
71840 +        */
71841 +       log->max_current_avail = log->total_avail * log->reserved;
71842 +       log->total_avail = log->total_avail * log->data_size;
71843 +       log->current_avail = log->max_current_avail;
71847 + * log_create_ra
71848 + *
71849 + * This routine is called to fill a restart area from the values stored in 'log'
71850 + */
71851 +static struct RESTART_AREA *log_create_ra(struct ntfs_log *log)
71853 +       struct CLIENT_REC *cr;
71854 +       struct RESTART_AREA *ra = ntfs_zalloc(log->restart_size);
71856 +       if (!ra)
71857 +               return NULL;
71859 +       ra->current_lsn = cpu_to_le64(log->last_lsn);
71860 +       ra->log_clients = cpu_to_le16(1);
71861 +       ra->client_idx[1] = LFS_NO_CLIENT_LE;
71862 +       if (log->l_flags & NTFSLOG_MULTIPLE_PAGE_IO)
71863 +               ra->flags = RESTART_SINGLE_PAGE_IO;
71864 +       ra->seq_num_bits = cpu_to_le32(log->seq_num_bits);
71865 +       ra->ra_len = cpu_to_le16(log->ra_size);
71866 +       ra->client_off = cpu_to_le16(offsetof(struct RESTART_AREA, clients));
71867 +       ra->l_size = cpu_to_le64(log->l_size);
71868 +       ra->rec_hdr_len = cpu_to_le16(log->record_header_len);
71869 +       ra->data_off = cpu_to_le16(log->data_off);
71870 +       ra->open_log_count = cpu_to_le32(log->current_openlog_count + 1);
71872 +       cr = ra->clients;
71874 +       cr->prev_client = LFS_NO_CLIENT_LE;
71875 +       cr->next_client = LFS_NO_CLIENT_LE;
71877 +       return ra;
71880 +static u32 final_log_off(struct ntfs_log *log, u64 lsn, u32 data_len)
71882 +       u32 base_vbo = lsn << 3;
71883 +       u32 final_log_off = (base_vbo & log->seq_num_mask) & ~log->page_mask;
71884 +       u32 page_off = base_vbo & log->page_mask;
71885 +       u32 tail = log->page_size - page_off;
71887 +       page_off -= 1;
71889 +       /* Add the length of the header */
71890 +       data_len += log->record_header_len;
71892 +       /*
71893 +        * If this lsn is contained this log page we are done
71894 +        * Otherwise we need to walk through several log pages
71895 +        */
71896 +       if (data_len > tail) {
71897 +               data_len -= tail;
71898 +               tail = log->data_size;
71899 +               page_off = log->data_off - 1;
71901 +               for (;;) {
71902 +                       final_log_off = next_page_off(log, final_log_off);
71904 +                       /* We are done if the remaining bytes fit on this page */
71905 +                       if (data_len <= tail)
71906 +                               break;
71907 +                       data_len -= tail;
71908 +               }
71909 +       }
71911 +       /*
71912 +        * We add the remaining bytes to our starting position on this page
71913 +        * and then add that value to the file offset of this log page
71914 +        */
71915 +       return final_log_off + data_len + page_off;
71918 +static int next_log_lsn(struct ntfs_log *log, const struct LFS_RECORD_HDR *rh,
71919 +                       u64 *lsn)
71921 +       int err;
71922 +       u64 this_lsn = le64_to_cpu(rh->this_lsn);
71923 +       u32 vbo = lsn_to_vbo(log, this_lsn);
71924 +       u32 end =
71925 +               final_log_off(log, this_lsn, le32_to_cpu(rh->client_data_len));
71926 +       u32 hdr_off = end & ~log->sys_page_mask;
71927 +       u64 seq = this_lsn >> log->file_data_bits;
71928 +       struct RECORD_PAGE_HDR *page = NULL;
71930 +       /* Remember if we wrapped */
71931 +       if (end <= vbo)
71932 +               seq += 1;
71934 +       /* log page header for this page */
71935 +       err = read_log_page(log, hdr_off, &page, NULL);
71936 +       if (err)
71937 +               return err;
71939 +       /*
71940 +        * If the lsn we were given was not the last lsn on this page,
71941 +        * then the starting offset for the next lsn is on a quad word
71942 +        * boundary following the last file offset for the current lsn
71943 +        * Otherwise the file offset is the start of the data on the next page
71944 +        */
71945 +       if (this_lsn == le64_to_cpu(page->rhdr.lsn)) {
71946 +               /* If we wrapped, we need to increment the sequence number */
71947 +               hdr_off = next_page_off(log, hdr_off);
71948 +               if (hdr_off == log->first_page)
71949 +                       seq += 1;
71951 +               vbo = hdr_off + log->data_off;
71952 +       } else {
71953 +               vbo = QuadAlign(end);
71954 +       }
71956 +       /* Compute the lsn based on the file offset and the sequence count */
71957 +       *lsn = vbo_to_lsn(log, vbo, seq);
71959 +       /*
71960 +        * If this lsn is within the legal range for the file, we return true
71961 +        * Otherwise false indicates that there are no more lsn's
71962 +        */
71963 +       if (!is_lsn_in_file(log, *lsn))
71964 +               *lsn = 0;
71966 +       ntfs_free(page);
71968 +       return 0;
71972 + * current_log_avail
71973 + *
71974 + * calculate the number of bytes available for log records
71975 + */
71976 +static u32 current_log_avail(struct ntfs_log *log)
71978 +       u32 oldest_off, next_free_off, free_bytes;
71980 +       if (log->l_flags & NTFSLOG_NO_LAST_LSN) {
71981 +               /* The entire file is available */
71982 +               return log->max_current_avail;
71983 +       }
71985 +       /*
71986 +        * If there is a last lsn the restart area then we know that we will
71987 +        * have to compute the free range
71988 +        * If there is no oldest lsn then start at the first page of the file
71989 +        */
71990 +       oldest_off = (log->l_flags & NTFSLOG_NO_OLDEST_LSN)
71991 +                            ? log->first_page
71992 +                            : (log->oldest_lsn_off & ~log->sys_page_mask);
71994 +       /*
71995 +        * We will use the next log page offset to compute the next free page\
71996 +        * If we are going to reuse this page go to the next page
71997 +        * If we are at the first page then use the end of the file
71998 +        */
71999 +       next_free_off = (log->l_flags & NTFSLOG_REUSE_TAIL)
72000 +                               ? log->next_page + log->page_size
72001 +                       : log->next_page == log->first_page ? log->l_size
72002 +                                                           : log->next_page;
72004 +       /* If the two offsets are the same then there is no available space */
72005 +       if (oldest_off == next_free_off)
72006 +               return 0;
72007 +       /*
72008 +        * If the free offset follows the oldest offset then subtract
72009 +        * this range from the total available pages
72010 +        */
72011 +       free_bytes =
72012 +               oldest_off < next_free_off
72013 +                       ? log->total_avail_pages - (next_free_off - oldest_off)
72014 +                       : oldest_off - next_free_off;
72016 +       free_bytes >>= log->page_bits;
72017 +       return free_bytes * log->reserved;
72020 +static bool check_subseq_log_page(struct ntfs_log *log,
72021 +                                 const struct RECORD_PAGE_HDR *rp, u32 vbo,
72022 +                                 u64 seq)
72024 +       u64 lsn_seq;
72025 +       const struct NTFS_RECORD_HEADER *rhdr = &rp->rhdr;
72026 +       u64 lsn = le64_to_cpu(rhdr->lsn);
72028 +       if (rhdr->sign == NTFS_FFFF_SIGNATURE || !rhdr->sign)
72029 +               return false;
72031 +       /*
72032 +        * If the last lsn on the page occurs was written after the page
72033 +        * that caused the original error then we have a fatal error
72034 +        */
72035 +       lsn_seq = lsn >> log->file_data_bits;
72037 +       /*
72038 +        * If the sequence number for the lsn the page is equal or greater
72039 +        * than lsn we expect, then this is a subsequent write
72040 +        */
72041 +       return lsn_seq >= seq ||
72042 +              (lsn_seq == seq - 1 && log->first_page == vbo &&
72043 +               vbo != (lsn_to_vbo(log, lsn) & ~log->page_mask));
72047 + * last_log_lsn
72048 + *
72049 + * This routine walks through the log pages for a file, searching for the
72050 + * last log page written to the file
72051 + */
72052 +static int last_log_lsn(struct ntfs_log *log)
72054 +       int err;
72055 +       bool usa_error = false;
72056 +       bool replace_page = false;
72057 +       bool reuse_page = log->l_flags & NTFSLOG_REUSE_TAIL;
72058 +       bool wrapped_file, wrapped;
72060 +       u32 page_cnt = 1, page_pos = 1;
72061 +       u32 page_off = 0, page_off1 = 0, saved_off = 0;
72062 +       u32 final_off, second_off, final_off_prev = 0, second_off_prev = 0;
72063 +       u32 first_file_off = 0, second_file_off = 0;
72064 +       u32 part_io_count = 0;
72065 +       u32 tails = 0;
72066 +       u32 this_off, curpage_off, nextpage_off, remain_pages;
72068 +       u64 expected_seq, seq_base = 0, lsn_base = 0;
72069 +       u64 best_lsn, best_lsn1, best_lsn2;
72070 +       u64 lsn_cur, lsn1, lsn2;
72071 +       u64 last_ok_lsn = reuse_page ? log->last_lsn : 0;
72073 +       u16 cur_pos, best_page_pos;
72075 +       struct RECORD_PAGE_HDR *page = NULL;
72076 +       struct RECORD_PAGE_HDR *tst_page = NULL;
72077 +       struct RECORD_PAGE_HDR *first_tail = NULL;
72078 +       struct RECORD_PAGE_HDR *second_tail = NULL;
72079 +       struct RECORD_PAGE_HDR *tail_page = NULL;
72080 +       struct RECORD_PAGE_HDR *second_tail_prev = NULL;
72081 +       struct RECORD_PAGE_HDR *first_tail_prev = NULL;
72082 +       struct RECORD_PAGE_HDR *page_bufs = NULL;
72083 +       struct RECORD_PAGE_HDR *best_page;
72085 +       if (log->major_ver >= 2) {
72086 +               final_off = 0x02 * log->page_size;
72087 +               second_off = 0x12 * log->page_size;
72089 +               // 0x10 == 0x12 - 0x2
72090 +               page_bufs = ntfs_malloc(log->page_size * 0x10);
72091 +               if (!page_bufs)
72092 +                       return -ENOMEM;
72093 +       } else {
72094 +               second_off = log->first_page - log->page_size;
72095 +               final_off = second_off - log->page_size;
72096 +       }
72098 +next_tail:
72099 +       /* Read second tail page (at pos 3/0x12000) */
72100 +       if (read_log_page(log, second_off, &second_tail, &usa_error) ||
72101 +           usa_error || second_tail->rhdr.sign != NTFS_RCRD_SIGNATURE) {
72102 +               ntfs_free(second_tail);
72103 +               second_tail = NULL;
72104 +               second_file_off = 0;
72105 +               lsn2 = 0;
72106 +       } else {
72107 +               second_file_off = hdr_file_off(log, second_tail);
72108 +               lsn2 = le64_to_cpu(second_tail->record_hdr.last_end_lsn);
72109 +       }
72111 +       /* Read first tail page (at pos 2/0x2000 ) */
72112 +       if (read_log_page(log, final_off, &first_tail, &usa_error) ||
72113 +           usa_error || first_tail->rhdr.sign != NTFS_RCRD_SIGNATURE) {
72114 +               ntfs_free(first_tail);
72115 +               first_tail = NULL;
72116 +               first_file_off = 0;
72117 +               lsn1 = 0;
72118 +       } else {
72119 +               first_file_off = hdr_file_off(log, first_tail);
72120 +               lsn1 = le64_to_cpu(first_tail->record_hdr.last_end_lsn);
72121 +       }
72123 +       if (log->major_ver < 2) {
72124 +               int best_page;
72126 +               first_tail_prev = first_tail;
72127 +               final_off_prev = first_file_off;
72128 +               second_tail_prev = second_tail;
72129 +               second_off_prev = second_file_off;
72130 +               tails = 1;
72132 +               if (!first_tail && !second_tail)
72133 +                       goto tail_read;
72135 +               if (first_tail && second_tail)
72136 +                       best_page = lsn1 < lsn2 ? 1 : 0;
72137 +               else if (first_tail)
72138 +                       best_page = 0;
72139 +               else
72140 +                       best_page = 1;
72142 +               page_off = best_page ? second_file_off : first_file_off;
72143 +               seq_base = (best_page ? lsn2 : lsn1) >> log->file_data_bits;
72144 +               goto tail_read;
72145 +       }
72147 +       best_lsn1 = first_tail ? base_lsn(log, first_tail, first_file_off) : 0;
72148 +       best_lsn2 =
72149 +               second_tail ? base_lsn(log, second_tail, second_file_off) : 0;
72151 +       if (first_tail && second_tail) {
72152 +               if (best_lsn1 > best_lsn2) {
72153 +                       best_lsn = best_lsn1;
72154 +                       best_page = first_tail;
72155 +                       this_off = first_file_off;
72156 +               } else {
72157 +                       best_lsn = best_lsn2;
72158 +                       best_page = second_tail;
72159 +                       this_off = second_file_off;
72160 +               }
72161 +       } else if (first_tail) {
72162 +               best_lsn = best_lsn1;
72163 +               best_page = first_tail;
72164 +               this_off = first_file_off;
72165 +       } else if (second_tail) {
72166 +               best_lsn = best_lsn2;
72167 +               best_page = second_tail;
72168 +               this_off = second_file_off;
72169 +       } else {
72170 +               goto tail_read;
72171 +       }
72173 +       best_page_pos = le16_to_cpu(best_page->page_pos);
72175 +       if (!tails) {
72176 +               if (best_page_pos == page_pos) {
72177 +                       seq_base = best_lsn >> log->file_data_bits;
72178 +                       saved_off = page_off = le32_to_cpu(best_page->file_off);
72179 +                       lsn_base = best_lsn;
72181 +                       memmove(page_bufs, best_page, log->page_size);
72183 +                       page_cnt = le16_to_cpu(best_page->page_count);
72184 +                       if (page_cnt > 1)
72185 +                               page_pos += 1;
72187 +                       tails = 1;
72188 +               }
72189 +       } else if (seq_base == (best_lsn >> log->file_data_bits) &&
72190 +                  saved_off + log->page_size == this_off &&
72191 +                  lsn_base < best_lsn &&
72192 +                  (page_pos != page_cnt || best_page_pos == page_pos ||
72193 +                   best_page_pos == 1) &&
72194 +                  (page_pos >= page_cnt || best_page_pos == page_pos)) {
72195 +               u16 bppc = le16_to_cpu(best_page->page_count);
72197 +               saved_off += log->page_size;
72198 +               lsn_base = best_lsn;
72200 +               memmove(Add2Ptr(page_bufs, tails * log->page_size), best_page,
72201 +                       log->page_size);
72203 +               tails += 1;
72205 +               if (best_page_pos != bppc) {
72206 +                       page_cnt = bppc;
72207 +                       page_pos = best_page_pos;
72209 +                       if (page_cnt > 1)
72210 +                               page_pos += 1;
72211 +               } else {
72212 +                       page_pos = page_cnt = 1;
72213 +               }
72214 +       } else {
72215 +               ntfs_free(first_tail);
72216 +               ntfs_free(second_tail);
72217 +               goto tail_read;
72218 +       }
72220 +       ntfs_free(first_tail_prev);
72221 +       first_tail_prev = first_tail;
72222 +       final_off_prev = first_file_off;
72223 +       first_tail = NULL;
72225 +       ntfs_free(second_tail_prev);
72226 +       second_tail_prev = second_tail;
72227 +       second_off_prev = second_file_off;
72228 +       second_tail = NULL;
72230 +       final_off += log->page_size;
72231 +       second_off += log->page_size;
72233 +       if (tails < 0x10)
72234 +               goto next_tail;
72235 +tail_read:
72236 +       first_tail = first_tail_prev;
72237 +       final_off = final_off_prev;
72239 +       second_tail = second_tail_prev;
72240 +       second_off = second_off_prev;
72242 +       page_cnt = page_pos = 1;
72244 +       curpage_off = seq_base == log->seq_num ? min(log->next_page, page_off)
72245 +                                              : log->next_page;
72247 +       wrapped_file =
72248 +               curpage_off == log->first_page &&
72249 +               !(log->l_flags & (NTFSLOG_NO_LAST_LSN | NTFSLOG_REUSE_TAIL));
72251 +       expected_seq = wrapped_file ? (log->seq_num + 1) : log->seq_num;
72253 +       nextpage_off = curpage_off;
72255 +next_page:
72256 +       tail_page = NULL;
72257 +       /* Read the next log page */
72258 +       err = read_log_page(log, curpage_off, &page, &usa_error);
72260 +       /* Compute the next log page offset the file */
72261 +       nextpage_off = next_page_off(log, curpage_off);
72262 +       wrapped = nextpage_off == log->first_page;
72264 +       if (tails > 1) {
72265 +               struct RECORD_PAGE_HDR *cur_page =
72266 +                       Add2Ptr(page_bufs, curpage_off - page_off);
72268 +               if (curpage_off == saved_off) {
72269 +                       tail_page = cur_page;
72270 +                       goto use_tail_page;
72271 +               }
72273 +               if (page_off > curpage_off || curpage_off >= saved_off)
72274 +                       goto use_tail_page;
72276 +               if (page_off1)
72277 +                       goto use_cur_page;
72279 +               if (!err && !usa_error &&
72280 +                   page->rhdr.sign == NTFS_RCRD_SIGNATURE &&
72281 +                   cur_page->rhdr.lsn == page->rhdr.lsn &&
72282 +                   cur_page->record_hdr.next_record_off ==
72283 +                           page->record_hdr.next_record_off &&
72284 +                   ((page_pos == page_cnt &&
72285 +                     le16_to_cpu(page->page_pos) == 1) ||
72286 +                    (page_pos != page_cnt &&
72287 +                     le16_to_cpu(page->page_pos) == page_pos + 1 &&
72288 +                     le16_to_cpu(page->page_count) == page_cnt))) {
72289 +                       cur_page = NULL;
72290 +                       goto use_tail_page;
72291 +               }
72293 +               page_off1 = page_off;
72295 +use_cur_page:
72297 +               lsn_cur = le64_to_cpu(cur_page->rhdr.lsn);
72299 +               if (last_ok_lsn !=
72300 +                           le64_to_cpu(cur_page->record_hdr.last_end_lsn) &&
72301 +                   ((lsn_cur >> log->file_data_bits) +
72302 +                    ((curpage_off <
72303 +                      (lsn_to_vbo(log, lsn_cur) & ~log->page_mask))
72304 +                             ? 1
72305 +                             : 0)) != expected_seq) {
72306 +                       goto check_tail;
72307 +               }
72309 +               if (!is_log_record_end(cur_page)) {
72310 +                       tail_page = NULL;
72311 +                       last_ok_lsn = lsn_cur;
72312 +                       goto next_page_1;
72313 +               }
72315 +               log->seq_num = expected_seq;
72316 +               log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
72317 +               log->last_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn);
72318 +               log->ra->current_lsn = cur_page->record_hdr.last_end_lsn;
72320 +               if (log->record_header_len <=
72321 +                   log->page_size -
72322 +                           le16_to_cpu(cur_page->record_hdr.next_record_off)) {
72323 +                       log->l_flags |= NTFSLOG_REUSE_TAIL;
72324 +                       log->next_page = curpage_off;
72325 +               } else {
72326 +                       log->l_flags &= ~NTFSLOG_REUSE_TAIL;
72327 +                       log->next_page = nextpage_off;
72328 +               }
72330 +               if (wrapped_file)
72331 +                       log->l_flags |= NTFSLOG_WRAPPED;
72333 +               last_ok_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn);
72334 +               goto next_page_1;
72335 +       }
72337 +       /*
72338 +        * If we are at the expected first page of a transfer check to see
72339 +        * if either tail copy is at this offset
72340 +        * If this page is the last page of a transfer, check if we wrote
72341 +        * a subsequent tail copy
72342 +        */
72343 +       if (page_cnt == page_pos || page_cnt == page_pos + 1) {
72344 +               /*
72345 +                * Check if the offset matches either the first or second
72346 +                * tail copy. It is possible it will match both
72347 +                */
72348 +               if (curpage_off == final_off)
72349 +                       tail_page = first_tail;
72351 +               /*
72352 +                * If we already matched on the first page then
72353 +                * check the ending lsn's.
72354 +                */
72355 +               if (curpage_off == second_off) {
72356 +                       if (!tail_page ||
72357 +                           (second_tail &&
72358 +                            le64_to_cpu(second_tail->record_hdr.last_end_lsn) >
72359 +                                    le64_to_cpu(first_tail->record_hdr
72360 +                                                        .last_end_lsn))) {
72361 +                               tail_page = second_tail;
72362 +                       }
72363 +               }
72364 +       }
72366 +use_tail_page:
72367 +       if (tail_page) {
72368 +               /* we have a candidate for a tail copy */
72369 +               lsn_cur = le64_to_cpu(tail_page->record_hdr.last_end_lsn);
72371 +               if (last_ok_lsn < lsn_cur) {
72372 +                       /*
72373 +                        * If the sequence number is not expected,
72374 +                        * then don't use the tail copy
72375 +                        */
72376 +                       if (expected_seq != (lsn_cur >> log->file_data_bits))
72377 +                               tail_page = NULL;
72378 +               } else if (last_ok_lsn > lsn_cur) {
72379 +                       /*
72380 +                        * If the last lsn is greater than the one on
72381 +                        * this page then forget this tail
72382 +                        */
72383 +                       tail_page = NULL;
72384 +               }
72385 +       }
72387 +       /* If we have an error on the current page, we will break of this loop */
72388 +       if (err || usa_error)
72389 +               goto check_tail;
72391 +       /*
72392 +        * Done if the last lsn on this page doesn't match the previous known
72393 +        * last lsn or the sequence number is not expected
72394 +        */
72395 +       lsn_cur = le64_to_cpu(page->rhdr.lsn);
72396 +       if (last_ok_lsn != lsn_cur &&
72397 +           expected_seq != (lsn_cur >> log->file_data_bits)) {
72398 +               goto check_tail;
72399 +       }
72401 +       /*
72402 +        * Check that the page position and page count values are correct
72403 +        * If this is the first page of a transfer the position must be 1
72404 +        * and the count will be unknown
72405 +        */
72406 +       if (page_cnt == page_pos) {
72407 +               if (page->page_pos != cpu_to_le16(1) &&
72408 +                   (!reuse_page || page->page_pos != page->page_count)) {
72409 +                       /*
72410 +                        * If the current page is the first page we are
72411 +                        * looking at and we are reusing this page then
72412 +                        * it can be either the first or last page of a
72413 +                        * transfer. Otherwise it can only be the first.
72414 +                        */
72415 +                       goto check_tail;
72416 +               }
72417 +       } else if (le16_to_cpu(page->page_count) != page_cnt ||
72418 +                  le16_to_cpu(page->page_pos) != page_pos + 1) {
72419 +               /*
72420 +                * The page position better be 1 more than the last page
72421 +                * position and the page count better match
72422 +                */
72423 +               goto check_tail;
72424 +       }
72426 +       /*
72427 +        * We have a valid page the file and may have a valid page
72428 +        * the tail copy area
72429 +        * If the tail page was written after the page the file then
72430 +        * break of the loop
72431 +        */
72432 +       if (tail_page &&
72433 +           le64_to_cpu(tail_page->record_hdr.last_end_lsn) > lsn_cur) {
72434 +               /* Remember if we will replace the page */
72435 +               replace_page = true;
72436 +               goto check_tail;
72437 +       }
72439 +       tail_page = NULL;
72441 +       if (is_log_record_end(page)) {
72442 +               /*
72443 +                * Since we have read this page we know the sequence number
72444 +                * is the same as our expected value
72445 +                */
72446 +               log->seq_num = expected_seq;
72447 +               log->last_lsn = le64_to_cpu(page->record_hdr.last_end_lsn);
72448 +               log->ra->current_lsn = page->record_hdr.last_end_lsn;
72449 +               log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
72451 +               /*
72452 +                * If there is room on this page for another header then
72453 +                * remember we want to reuse the page
72454 +                */
72455 +               if (log->record_header_len <=
72456 +                   log->page_size -
72457 +                           le16_to_cpu(page->record_hdr.next_record_off)) {
72458 +                       log->l_flags |= NTFSLOG_REUSE_TAIL;
72459 +                       log->next_page = curpage_off;
72460 +               } else {
72461 +                       log->l_flags &= ~NTFSLOG_REUSE_TAIL;
72462 +                       log->next_page = nextpage_off;
72463 +               }
72465 +               /* Remember if we wrapped the log file */
72466 +               if (wrapped_file)
72467 +                       log->l_flags |= NTFSLOG_WRAPPED;
72468 +       }
72470 +       /*
72471 +        * Remember the last page count and position.
72472 +        * Also remember the last known lsn
72473 +        */
72474 +       page_cnt = le16_to_cpu(page->page_count);
72475 +       page_pos = le16_to_cpu(page->page_pos);
72476 +       last_ok_lsn = le64_to_cpu(page->rhdr.lsn);
72478 +next_page_1:
72480 +       if (wrapped) {
72481 +               expected_seq += 1;
72482 +               wrapped_file = 1;
72483 +       }
72485 +       curpage_off = nextpage_off;
72486 +       ntfs_free(page);
72487 +       page = NULL;
72488 +       reuse_page = 0;
72489 +       goto next_page;
72491 +check_tail:
72492 +       if (tail_page) {
72493 +               log->seq_num = expected_seq;
72494 +               log->last_lsn = le64_to_cpu(tail_page->record_hdr.last_end_lsn);
72495 +               log->ra->current_lsn = tail_page->record_hdr.last_end_lsn;
72496 +               log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
72498 +               if (log->page_size -
72499 +                           le16_to_cpu(
72500 +                                   tail_page->record_hdr.next_record_off) >=
72501 +                   log->record_header_len) {
72502 +                       log->l_flags |= NTFSLOG_REUSE_TAIL;
72503 +                       log->next_page = curpage_off;
72504 +               } else {
72505 +                       log->l_flags &= ~NTFSLOG_REUSE_TAIL;
72506 +                       log->next_page = nextpage_off;
72507 +               }
72509 +               if (wrapped)
72510 +                       log->l_flags |= NTFSLOG_WRAPPED;
72511 +       }
72513 +       /* Remember that the partial IO will start at the next page */
72514 +       second_off = nextpage_off;
72516 +       /*
72517 +        * If the next page is the first page of the file then update
72518 +        * the sequence number for log records which begon the next page
72519 +        */
72520 +       if (wrapped)
72521 +               expected_seq += 1;
72523 +       /*
72524 +        * If we have a tail copy or are performing single page I/O we can
72525 +        * immediately look at the next page
72526 +        */
72527 +       if (replace_page || (log->ra->flags & RESTART_SINGLE_PAGE_IO)) {
72528 +               page_cnt = 2;
72529 +               page_pos = 1;
72530 +               goto check_valid;
72531 +       }
72533 +       if (page_pos != page_cnt)
72534 +               goto check_valid;
72535 +       /*
72536 +        * If the next page causes us to wrap to the beginning of the log
72537 +        * file then we know which page to check next.
72538 +        */
72539 +       if (wrapped) {
72540 +               page_cnt = 2;
72541 +               page_pos = 1;
72542 +               goto check_valid;
72543 +       }
72545 +       cur_pos = 2;
72547 +next_test_page:
72548 +       ntfs_free(tst_page);
72549 +       tst_page = NULL;
72551 +       /* Walk through the file, reading log pages */
72552 +       err = read_log_page(log, nextpage_off, &tst_page, &usa_error);
72554 +       /*
72555 +        * If we get a USA error then assume that we correctly found
72556 +        * the end of the original transfer
72557 +        */
72558 +       if (usa_error)
72559 +               goto file_is_valid;
72561 +       /*
72562 +        * If we were able to read the page, we examine it to see if it
72563 +        * is the same or different Io block
72564 +        */
72565 +       if (err)
72566 +               goto next_test_page_1;
72568 +       if (le16_to_cpu(tst_page->page_pos) == cur_pos &&
72569 +           check_subseq_log_page(log, tst_page, nextpage_off, expected_seq)) {
72570 +               page_cnt = le16_to_cpu(tst_page->page_count) + 1;
72571 +               page_pos = le16_to_cpu(tst_page->page_pos);
72572 +               goto check_valid;
72573 +       } else {
72574 +               goto file_is_valid;
72575 +       }
72577 +next_test_page_1:
72579 +       nextpage_off = next_page_off(log, curpage_off);
72580 +       wrapped = nextpage_off == log->first_page;
72582 +       if (wrapped) {
72583 +               expected_seq += 1;
72584 +               page_cnt = 2;
72585 +               page_pos = 1;
72586 +       }
72588 +       cur_pos += 1;
72589 +       part_io_count += 1;
72590 +       if (!wrapped)
72591 +               goto next_test_page;
72593 +check_valid:
72594 +       /* Skip over the remaining pages this transfer */
72595 +       remain_pages = page_cnt - page_pos - 1;
72596 +       part_io_count += remain_pages;
72598 +       while (remain_pages--) {
72599 +               nextpage_off = next_page_off(log, curpage_off);
72600 +               wrapped = nextpage_off == log->first_page;
72602 +               if (wrapped)
72603 +                       expected_seq += 1;
72604 +       }
72606 +       /* Call our routine to check this log page */
72607 +       ntfs_free(tst_page);
72608 +       tst_page = NULL;
72610 +       err = read_log_page(log, nextpage_off, &tst_page, &usa_error);
72611 +       if (!err && !usa_error &&
72612 +           check_subseq_log_page(log, tst_page, nextpage_off, expected_seq)) {
72613 +               err = -EINVAL;
72614 +               goto out;
72615 +       }
72617 +file_is_valid:
72619 +       /* We have a valid file */
72620 +       if (page_off1 || tail_page) {
72621 +               struct RECORD_PAGE_HDR *tmp_page;
72623 +               if (sb_rdonly(log->ni->mi.sbi->sb)) {
72624 +                       err = -EROFS;
72625 +                       goto out;
72626 +               }
72628 +               if (page_off1) {
72629 +                       tmp_page = Add2Ptr(page_bufs, page_off1 - page_off);
72630 +                       tails -= (page_off1 - page_off) / log->page_size;
72631 +                       if (!tail_page)
72632 +                               tails -= 1;
72633 +               } else {
72634 +                       tmp_page = tail_page;
72635 +                       tails = 1;
72636 +               }
72638 +               while (tails--) {
72639 +                       u64 off = hdr_file_off(log, tmp_page);
72641 +                       if (!page) {
72642 +                               page = ntfs_malloc(log->page_size);
72643 +                               if (!page)
72644 +                                       return -ENOMEM;
72645 +                       }
72647 +                       /*
72648 +                        * Correct page and copy the data from this page
72649 +                        * into it and flush it to disk
72650 +                        */
72651 +                       memcpy(page, tmp_page, log->page_size);
72653 +                       /* Fill last flushed lsn value flush the page */
72654 +                       if (log->major_ver < 2)
72655 +                               page->rhdr.lsn = page->record_hdr.last_end_lsn;
72656 +                       else
72657 +                               page->file_off = 0;
72659 +                       page->page_pos = page->page_count = cpu_to_le16(1);
72661 +                       ntfs_fix_pre_write(&page->rhdr, log->page_size);
72663 +                       err = ntfs_sb_write_run(log->ni->mi.sbi,
72664 +                                               &log->ni->file.run, off, page,
72665 +                                               log->page_size);
72667 +                       if (err)
72668 +                               goto out;
72670 +                       if (part_io_count && second_off == off) {
72671 +                               second_off += log->page_size;
72672 +                               part_io_count -= 1;
72673 +                       }
72675 +                       tmp_page = Add2Ptr(tmp_page, log->page_size);
72676 +               }
72677 +       }
72679 +       if (part_io_count) {
72680 +               if (sb_rdonly(log->ni->mi.sbi->sb)) {
72681 +                       err = -EROFS;
72682 +                       goto out;
72683 +               }
72684 +       }
72686 +out:
72687 +       ntfs_free(second_tail);
72688 +       ntfs_free(first_tail);
72689 +       ntfs_free(page);
72690 +       ntfs_free(tst_page);
72691 +       ntfs_free(page_bufs);
72693 +       return err;
72697 + * read_log_rec_buf
72698 + *
72699 + * copies a log record from the file to a buffer
72700 + * The log record may span several log pages and may even wrap the file
72701 + */
72702 +static int read_log_rec_buf(struct ntfs_log *log,
72703 +                           const struct LFS_RECORD_HDR *rh, void *buffer)
72705 +       int err;
72706 +       struct RECORD_PAGE_HDR *ph = NULL;
72707 +       u64 lsn = le64_to_cpu(rh->this_lsn);
72708 +       u32 vbo = lsn_to_vbo(log, lsn) & ~log->page_mask;
72709 +       u32 off = lsn_to_page_off(log, lsn) + log->record_header_len;
72710 +       u32 data_len = le32_to_cpu(rh->client_data_len);
72712 +       /*
72713 +        * While there are more bytes to transfer,
72714 +        * we continue to attempt to perform the read
72715 +        */
72716 +       for (;;) {
72717 +               bool usa_error;
72718 +               u32 tail = log->page_size - off;
72720 +               if (tail >= data_len)
72721 +                       tail = data_len;
72723 +               data_len -= tail;
72725 +               err = read_log_page(log, vbo, &ph, &usa_error);
72726 +               if (err)
72727 +                       goto out;
72729 +               /*
72730 +                * The last lsn on this page better be greater or equal
72731 +                * to the lsn we are copying
72732 +                */
72733 +               if (lsn > le64_to_cpu(ph->rhdr.lsn)) {
72734 +                       err = -EINVAL;
72735 +                       goto out;
72736 +               }
72738 +               memcpy(buffer, Add2Ptr(ph, off), tail);
72740 +               /* If there are no more bytes to transfer, we exit the loop */
72741 +               if (!data_len) {
72742 +                       if (!is_log_record_end(ph) ||
72743 +                           lsn > le64_to_cpu(ph->record_hdr.last_end_lsn)) {
72744 +                               err = -EINVAL;
72745 +                               goto out;
72746 +                       }
72747 +                       break;
72748 +               }
72750 +               if (ph->rhdr.lsn == ph->record_hdr.last_end_lsn ||
72751 +                   lsn > le64_to_cpu(ph->rhdr.lsn)) {
72752 +                       err = -EINVAL;
72753 +                       goto out;
72754 +               }
72756 +               vbo = next_page_off(log, vbo);
72757 +               off = log->data_off;
72759 +               /*
72760 +                * adjust our pointer the user's buffer to transfer
72761 +                * the next block to
72762 +                */
72763 +               buffer = Add2Ptr(buffer, tail);
72764 +       }
72766 +out:
72767 +       ntfs_free(ph);
72768 +       return err;
72771 +static int read_rst_area(struct ntfs_log *log, struct NTFS_RESTART **rst_,
72772 +                        u64 *lsn)
72774 +       int err;
72775 +       struct LFS_RECORD_HDR *rh = NULL;
72776 +       const struct CLIENT_REC *cr =
72777 +               Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off));
72778 +       u64 lsnr, lsnc = le64_to_cpu(cr->restart_lsn);
72779 +       u32 len;
72780 +       struct NTFS_RESTART *rst;
72782 +       *lsn = 0;
72783 +       *rst_ = NULL;
72785 +       /* If the client doesn't have a restart area, go ahead and exit now */
72786 +       if (!lsnc)
72787 +               return 0;
72789 +       err = read_log_page(log, lsn_to_vbo(log, lsnc),
72790 +                           (struct RECORD_PAGE_HDR **)&rh, NULL);
72791 +       if (err)
72792 +               return err;
72794 +       rst = NULL;
72795 +       lsnr = le64_to_cpu(rh->this_lsn);
72797 +       if (lsnc != lsnr) {
72798 +               /* If the lsn values don't match, then the disk is corrupt */
72799 +               err = -EINVAL;
72800 +               goto out;
72801 +       }
72803 +       *lsn = lsnr;
72804 +       len = le32_to_cpu(rh->client_data_len);
72806 +       if (!len) {
72807 +               err = 0;
72808 +               goto out;
72809 +       }
72811 +       if (len < sizeof(struct NTFS_RESTART)) {
72812 +               err = -EINVAL;
72813 +               goto out;
72814 +       }
72816 +       rst = ntfs_malloc(len);
72817 +       if (!rst) {
72818 +               err = -ENOMEM;
72819 +               goto out;
72820 +       }
72822 +       /* Copy the data into the 'rst' buffer */
72823 +       err = read_log_rec_buf(log, rh, rst);
72824 +       if (err)
72825 +               goto out;
72827 +       *rst_ = rst;
72828 +       rst = NULL;
72830 +out:
72831 +       ntfs_free(rh);
72832 +       ntfs_free(rst);
72834 +       return err;
72837 +static int find_log_rec(struct ntfs_log *log, u64 lsn, struct lcb *lcb)
72839 +       int err;
72840 +       struct LFS_RECORD_HDR *rh = lcb->lrh;
72841 +       u32 rec_len, len;
72843 +       /* Read the record header for this lsn */
72844 +       if (!rh) {
72845 +               err = read_log_page(log, lsn_to_vbo(log, lsn),
72846 +                                   (struct RECORD_PAGE_HDR **)&rh, NULL);
72848 +               lcb->lrh = rh;
72849 +               if (err)
72850 +                       return err;
72851 +       }
72853 +       /*
72854 +        * If the lsn the log record doesn't match the desired
72855 +        * lsn then the disk is corrupt
72856 +        */
72857 +       if (lsn != le64_to_cpu(rh->this_lsn))
72858 +               return -EINVAL;
72860 +       len = le32_to_cpu(rh->client_data_len);
72862 +       /*
72863 +        * check that the length field isn't greater than the total
72864 +        * available space the log file
72865 +        */
72866 +       rec_len = len + log->record_header_len;
72867 +       if (rec_len >= log->total_avail)
72868 +               return -EINVAL;
72870 +       /*
72871 +        * If the entire log record is on this log page,
72872 +        * put a pointer to the log record the context block
72873 +        */
72874 +       if (rh->flags & LOG_RECORD_MULTI_PAGE) {
72875 +               void *lr = ntfs_malloc(len);
72877 +               if (!lr)
72878 +                       return -ENOMEM;
72880 +               lcb->log_rec = lr;
72881 +               lcb->alloc = true;
72883 +               /* Copy the data into the buffer returned */
72884 +               err = read_log_rec_buf(log, rh, lr);
72885 +               if (err)
72886 +                       return err;
72887 +       } else {
72888 +               /* If beyond the end of the current page -> an error */
72889 +               u32 page_off = lsn_to_page_off(log, lsn);
72891 +               if (page_off + len + log->record_header_len > log->page_size)
72892 +                       return -EINVAL;
72894 +               lcb->log_rec = Add2Ptr(rh, sizeof(struct LFS_RECORD_HDR));
72895 +               lcb->alloc = false;
72896 +       }
72898 +       return 0;
72902 + * read_log_rec_lcb
72903 + *
72904 + * initiates the query operation.
72905 + */
72906 +static int read_log_rec_lcb(struct ntfs_log *log, u64 lsn, u32 ctx_mode,
72907 +                           struct lcb **lcb_)
72909 +       int err;
72910 +       const struct CLIENT_REC *cr;
72911 +       struct lcb *lcb;
72913 +       switch (ctx_mode) {
72914 +       case lcb_ctx_undo_next:
72915 +       case lcb_ctx_prev:
72916 +       case lcb_ctx_next:
72917 +               break;
72918 +       default:
72919 +               return -EINVAL;
72920 +       }
72922 +       /* check that the given lsn is the legal range for this client */
72923 +       cr = Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off));
72925 +       if (!verify_client_lsn(log, cr, lsn))
72926 +               return -EINVAL;
72928 +       lcb = ntfs_zalloc(sizeof(struct lcb));
72929 +       if (!lcb)
72930 +               return -ENOMEM;
72931 +       lcb->client = log->client_id;
72932 +       lcb->ctx_mode = ctx_mode;
72934 +       /* Find the log record indicated by the given lsn */
72935 +       err = find_log_rec(log, lsn, lcb);
72936 +       if (err)
72937 +               goto out;
72939 +       *lcb_ = lcb;
72940 +       return 0;
72942 +out:
72943 +       lcb_put(lcb);
72944 +       *lcb_ = NULL;
72945 +       return err;
72949 + * find_client_next_lsn
72950 + *
72951 + * attempt to find the next lsn to return to a client based on the context mode.
72952 + */
72953 +static int find_client_next_lsn(struct ntfs_log *log, struct lcb *lcb, u64 *lsn)
72955 +       int err;
72956 +       u64 next_lsn;
72957 +       struct LFS_RECORD_HDR *hdr;
72959 +       hdr = lcb->lrh;
72960 +       *lsn = 0;
72962 +       if (lcb_ctx_next != lcb->ctx_mode)
72963 +               goto check_undo_next;
72965 +       /* Loop as long as another lsn can be found */
72966 +       for (;;) {
72967 +               u64 current_lsn;
72969 +               err = next_log_lsn(log, hdr, &current_lsn);
72970 +               if (err)
72971 +                       goto out;
72973 +               if (!current_lsn)
72974 +                       break;
72976 +               if (hdr != lcb->lrh)
72977 +                       ntfs_free(hdr);
72979 +               hdr = NULL;
72980 +               err = read_log_page(log, lsn_to_vbo(log, current_lsn),
72981 +                                   (struct RECORD_PAGE_HDR **)&hdr, NULL);
72982 +               if (err)
72983 +                       goto out;
72985 +               if (memcmp(&hdr->client, &lcb->client,
72986 +                          sizeof(struct CLIENT_ID))) {
72987 +                       /*err = -EINVAL; */
72988 +               } else if (LfsClientRecord == hdr->record_type) {
72989 +                       ntfs_free(lcb->lrh);
72990 +                       lcb->lrh = hdr;
72991 +                       *lsn = current_lsn;
72992 +                       return 0;
72993 +               }
72994 +       }
72996 +out:
72997 +       if (hdr != lcb->lrh)
72998 +               ntfs_free(hdr);
72999 +       return err;
73001 +check_undo_next:
73002 +       if (lcb_ctx_undo_next == lcb->ctx_mode)
73003 +               next_lsn = le64_to_cpu(hdr->client_undo_next_lsn);
73004 +       else if (lcb_ctx_prev == lcb->ctx_mode)
73005 +               next_lsn = le64_to_cpu(hdr->client_prev_lsn);
73006 +       else
73007 +               return 0;
73009 +       if (!next_lsn)
73010 +               return 0;
73012 +       if (!verify_client_lsn(
73013 +                   log, Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off)),
73014 +                   next_lsn))
73015 +               return 0;
73017 +       hdr = NULL;
73018 +       err = read_log_page(log, lsn_to_vbo(log, next_lsn),
73019 +                           (struct RECORD_PAGE_HDR **)&hdr, NULL);
73020 +       if (err)
73021 +               return err;
73022 +       ntfs_free(lcb->lrh);
73023 +       lcb->lrh = hdr;
73025 +       *lsn = next_lsn;
73027 +       return 0;
73030 +static int read_next_log_rec(struct ntfs_log *log, struct lcb *lcb, u64 *lsn)
73032 +       int err;
73034 +       err = find_client_next_lsn(log, lcb, lsn);
73035 +       if (err)
73036 +               return err;
73038 +       if (!*lsn)
73039 +               return 0;
73041 +       if (lcb->alloc)
73042 +               ntfs_free(lcb->log_rec);
73044 +       lcb->log_rec = NULL;
73045 +       lcb->alloc = false;
73046 +       ntfs_free(lcb->lrh);
73047 +       lcb->lrh = NULL;
73049 +       return find_log_rec(log, *lsn, lcb);
73052 +static inline bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes)
73054 +       __le16 mask;
73055 +       u32 min_de, de_off, used, total;
73056 +       const struct NTFS_DE *e;
73058 +       if (hdr_has_subnode(hdr)) {
73059 +               min_de = sizeof(struct NTFS_DE) + sizeof(u64);
73060 +               mask = NTFS_IE_HAS_SUBNODES;
73061 +       } else {
73062 +               min_de = sizeof(struct NTFS_DE);
73063 +               mask = 0;
73064 +       }
73066 +       de_off = le32_to_cpu(hdr->de_off);
73067 +       used = le32_to_cpu(hdr->used);
73068 +       total = le32_to_cpu(hdr->total);
73070 +       if (de_off > bytes - min_de || used > bytes || total > bytes ||
73071 +           de_off + min_de > used || used > total) {
73072 +               return false;
73073 +       }
73075 +       e = Add2Ptr(hdr, de_off);
73076 +       for (;;) {
73077 +               u16 esize = le16_to_cpu(e->size);
73078 +               struct NTFS_DE *next = Add2Ptr(e, esize);
73080 +               if (esize < min_de || PtrOffset(hdr, next) > used ||
73081 +                   (e->flags & NTFS_IE_HAS_SUBNODES) != mask) {
73082 +                       return false;
73083 +               }
73085 +               if (de_is_last(e))
73086 +                       break;
73088 +               e = next;
73089 +       }
73091 +       return true;
73094 +static inline bool check_index_buffer(const struct INDEX_BUFFER *ib, u32 bytes)
73096 +       u16 fo;
73097 +       const struct NTFS_RECORD_HEADER *r = &ib->rhdr;
73099 +       if (r->sign != NTFS_INDX_SIGNATURE)
73100 +               return false;
73102 +       fo = (SECTOR_SIZE - ((bytes >> SECTOR_SHIFT) + 1) * sizeof(short));
73104 +       if (le16_to_cpu(r->fix_off) > fo)
73105 +               return false;
73107 +       if ((le16_to_cpu(r->fix_num) - 1) * SECTOR_SIZE != bytes)
73108 +               return false;
73110 +       return check_index_header(&ib->ihdr,
73111 +                                 bytes - offsetof(struct INDEX_BUFFER, ihdr));
73114 +static inline bool check_index_root(const struct ATTRIB *attr,
73115 +                                   struct ntfs_sb_info *sbi)
73117 +       bool ret;
73118 +       const struct INDEX_ROOT *root = resident_data(attr);
73119 +       u8 index_bits = le32_to_cpu(root->index_block_size) >= sbi->cluster_size
73120 +                               ? sbi->cluster_bits
73121 +                               : SECTOR_SHIFT;
73122 +       u8 block_clst = root->index_block_clst;
73124 +       if (le32_to_cpu(attr->res.data_size) < sizeof(struct INDEX_ROOT) ||
73125 +           (root->type != ATTR_NAME && root->type != ATTR_ZERO) ||
73126 +           (root->type == ATTR_NAME &&
73127 +            root->rule != NTFS_COLLATION_TYPE_FILENAME) ||
73128 +           (le32_to_cpu(root->index_block_size) !=
73129 +            (block_clst << index_bits)) ||
73130 +           (block_clst != 1 && block_clst != 2 && block_clst != 4 &&
73131 +            block_clst != 8 && block_clst != 0x10 && block_clst != 0x20 &&
73132 +            block_clst != 0x40 && block_clst != 0x80)) {
73133 +               return false;
73134 +       }
73136 +       ret = check_index_header(&root->ihdr,
73137 +                                le32_to_cpu(attr->res.data_size) -
73138 +                                        offsetof(struct INDEX_ROOT, ihdr));
73139 +       return ret;
73142 +static inline bool check_attr(const struct MFT_REC *rec,
73143 +                             const struct ATTRIB *attr,
73144 +                             struct ntfs_sb_info *sbi)
73146 +       u32 asize = le32_to_cpu(attr->size);
73147 +       u32 rsize = 0;
73148 +       u64 dsize, svcn, evcn;
73149 +       u16 run_off;
73151 +       /* Check the fixed part of the attribute record header */
73152 +       if (asize >= sbi->record_size ||
73153 +           asize + PtrOffset(rec, attr) >= sbi->record_size ||
73154 +           (attr->name_len &&
73155 +            le16_to_cpu(attr->name_off) + attr->name_len * sizeof(short) >
73156 +                    asize)) {
73157 +               return false;
73158 +       }
73160 +       /* Check the attribute fields */
73161 +       switch (attr->non_res) {
73162 +       case 0:
73163 +               rsize = le32_to_cpu(attr->res.data_size);
73164 +               if (rsize >= asize ||
73165 +                   le16_to_cpu(attr->res.data_off) + rsize > asize) {
73166 +                       return false;
73167 +               }
73168 +               break;
73170 +       case 1:
73171 +               dsize = le64_to_cpu(attr->nres.data_size);
73172 +               svcn = le64_to_cpu(attr->nres.svcn);
73173 +               evcn = le64_to_cpu(attr->nres.evcn);
73174 +               run_off = le16_to_cpu(attr->nres.run_off);
73176 +               if (svcn > evcn + 1 || run_off >= asize ||
73177 +                   le64_to_cpu(attr->nres.valid_size) > dsize ||
73178 +                   dsize > le64_to_cpu(attr->nres.alloc_size)) {
73179 +                       return false;
73180 +               }
73182 +               if (run_unpack(NULL, sbi, 0, svcn, evcn, svcn,
73183 +                              Add2Ptr(attr, run_off), asize - run_off) < 0) {
73184 +                       return false;
73185 +               }
73187 +               return true;
73189 +       default:
73190 +               return false;
73191 +       }
73193 +       switch (attr->type) {
73194 +       case ATTR_NAME:
73195 +               if (fname_full_size(Add2Ptr(
73196 +                           attr, le16_to_cpu(attr->res.data_off))) > asize) {
73197 +                       return false;
73198 +               }
73199 +               break;
73201 +       case ATTR_ROOT:
73202 +               return check_index_root(attr, sbi);
73204 +       case ATTR_STD:
73205 +               if (rsize < sizeof(struct ATTR_STD_INFO5) &&
73206 +                   rsize != sizeof(struct ATTR_STD_INFO)) {
73207 +                       return false;
73208 +               }
73209 +               break;
73211 +       case ATTR_LIST:
73212 +       case ATTR_ID:
73213 +       case ATTR_SECURE:
73214 +       case ATTR_LABEL:
73215 +       case ATTR_VOL_INFO:
73216 +       case ATTR_DATA:
73217 +       case ATTR_ALLOC:
73218 +       case ATTR_BITMAP:
73219 +       case ATTR_REPARSE:
73220 +       case ATTR_EA_INFO:
73221 +       case ATTR_EA:
73222 +       case ATTR_PROPERTYSET:
73223 +       case ATTR_LOGGED_UTILITY_STREAM:
73224 +               break;
73226 +       default:
73227 +               return false;
73228 +       }
73230 +       return true;
73233 +static inline bool check_file_record(const struct MFT_REC *rec,
73234 +                                    const struct MFT_REC *rec2,
73235 +                                    struct ntfs_sb_info *sbi)
73237 +       const struct ATTRIB *attr;
73238 +       u16 fo = le16_to_cpu(rec->rhdr.fix_off);
73239 +       u16 fn = le16_to_cpu(rec->rhdr.fix_num);
73240 +       u16 ao = le16_to_cpu(rec->attr_off);
73241 +       u32 rs = sbi->record_size;
73243 +       /* check the file record header for consistency */
73244 +       if (rec->rhdr.sign != NTFS_FILE_SIGNATURE ||
73245 +           fo > (SECTOR_SIZE - ((rs >> SECTOR_SHIFT) + 1) * sizeof(short)) ||
73246 +           (fn - 1) * SECTOR_SIZE != rs || ao < MFTRECORD_FIXUP_OFFSET_1 ||
73247 +           ao > sbi->record_size - SIZEOF_RESIDENT || !is_rec_inuse(rec) ||
73248 +           le32_to_cpu(rec->total) != rs) {
73249 +               return false;
73250 +       }
73252 +       /* Loop to check all of the attributes */
73253 +       for (attr = Add2Ptr(rec, ao); attr->type != ATTR_END;
73254 +            attr = Add2Ptr(attr, le32_to_cpu(attr->size))) {
73255 +               if (check_attr(rec, attr, sbi))
73256 +                       continue;
73257 +               return false;
73258 +       }
73260 +       return true;
73263 +static inline int check_lsn(const struct NTFS_RECORD_HEADER *hdr,
73264 +                           const u64 *rlsn)
73266 +       u64 lsn;
73268 +       if (!rlsn)
73269 +               return true;
73271 +       lsn = le64_to_cpu(hdr->lsn);
73273 +       if (hdr->sign == NTFS_HOLE_SIGNATURE)
73274 +               return false;
73276 +       if (*rlsn > lsn)
73277 +               return true;
73279 +       return false;
73282 +static inline bool check_if_attr(const struct MFT_REC *rec,
73283 +                                const struct LOG_REC_HDR *lrh)
73285 +       u16 ro = le16_to_cpu(lrh->record_off);
73286 +       u16 o = le16_to_cpu(rec->attr_off);
73287 +       const struct ATTRIB *attr = Add2Ptr(rec, o);
73289 +       while (o < ro) {
73290 +               u32 asize;
73292 +               if (attr->type == ATTR_END)
73293 +                       break;
73295 +               asize = le32_to_cpu(attr->size);
73296 +               if (!asize)
73297 +                       break;
73299 +               o += asize;
73300 +               attr = Add2Ptr(attr, asize);
73301 +       }
73303 +       return o == ro;
73306 +static inline bool check_if_index_root(const struct MFT_REC *rec,
73307 +                                      const struct LOG_REC_HDR *lrh)
73309 +       u16 ro = le16_to_cpu(lrh->record_off);
73310 +       u16 o = le16_to_cpu(rec->attr_off);
73311 +       const struct ATTRIB *attr = Add2Ptr(rec, o);
73313 +       while (o < ro) {
73314 +               u32 asize;
73316 +               if (attr->type == ATTR_END)
73317 +                       break;
73319 +               asize = le32_to_cpu(attr->size);
73320 +               if (!asize)
73321 +                       break;
73323 +               o += asize;
73324 +               attr = Add2Ptr(attr, asize);
73325 +       }
73327 +       return o == ro && attr->type == ATTR_ROOT;
73330 +static inline bool check_if_root_index(const struct ATTRIB *attr,
73331 +                                      const struct INDEX_HDR *hdr,
73332 +                                      const struct LOG_REC_HDR *lrh)
73334 +       u16 ao = le16_to_cpu(lrh->attr_off);
73335 +       u32 de_off = le32_to_cpu(hdr->de_off);
73336 +       u32 o = PtrOffset(attr, hdr) + de_off;
73337 +       const struct NTFS_DE *e = Add2Ptr(hdr, de_off);
73338 +       u32 asize = le32_to_cpu(attr->size);
73340 +       while (o < ao) {
73341 +               u16 esize;
73343 +               if (o >= asize)
73344 +                       break;
73346 +               esize = le16_to_cpu(e->size);
73347 +               if (!esize)
73348 +                       break;
73350 +               o += esize;
73351 +               e = Add2Ptr(e, esize);
73352 +       }
73354 +       return o == ao;
73357 +static inline bool check_if_alloc_index(const struct INDEX_HDR *hdr,
73358 +                                       u32 attr_off)
73360 +       u32 de_off = le32_to_cpu(hdr->de_off);
73361 +       u32 o = offsetof(struct INDEX_BUFFER, ihdr) + de_off;
73362 +       const struct NTFS_DE *e = Add2Ptr(hdr, de_off);
73363 +       u32 used = le32_to_cpu(hdr->used);
73365 +       while (o < attr_off) {
73366 +               u16 esize;
73368 +               if (de_off >= used)
73369 +                       break;
73371 +               esize = le16_to_cpu(e->size);
73372 +               if (!esize)
73373 +                       break;
73375 +               o += esize;
73376 +               de_off += esize;
73377 +               e = Add2Ptr(e, esize);
73378 +       }
73380 +       return o == attr_off;
73383 +static inline void change_attr_size(struct MFT_REC *rec, struct ATTRIB *attr,
73384 +                                   u32 nsize)
73386 +       u32 asize = le32_to_cpu(attr->size);
73387 +       int dsize = nsize - asize;
73388 +       u8 *next = Add2Ptr(attr, asize);
73389 +       u32 used = le32_to_cpu(rec->used);
73391 +       memmove(Add2Ptr(attr, nsize), next, used - PtrOffset(rec, next));
73393 +       rec->used = cpu_to_le32(used + dsize);
73394 +       attr->size = cpu_to_le32(nsize);
73397 +struct OpenAttr {
73398 +       struct ATTRIB *attr;
73399 +       struct runs_tree *run1;
73400 +       struct runs_tree run0;
73401 +       struct ntfs_inode *ni;
73402 +       // CLST rno;
73405 +/* Returns 0 if 'attr' has the same type and name */
73406 +static inline int cmp_type_and_name(const struct ATTRIB *a1,
73407 +                                   const struct ATTRIB *a2)
73409 +       return a1->type != a2->type || a1->name_len != a2->name_len ||
73410 +              (a1->name_len && memcmp(attr_name(a1), attr_name(a2),
73411 +                                      a1->name_len * sizeof(short)));
73414 +static struct OpenAttr *find_loaded_attr(struct ntfs_log *log,
73415 +                                        const struct ATTRIB *attr, CLST rno)
73417 +       struct OPEN_ATTR_ENRTY *oe = NULL;
73419 +       while ((oe = enum_rstbl(log->open_attr_tbl, oe))) {
73420 +               struct OpenAttr *op_attr;
73422 +               if (ino_get(&oe->ref) != rno)
73423 +                       continue;
73425 +               op_attr = (struct OpenAttr *)oe->ptr;
73426 +               if (!cmp_type_and_name(op_attr->attr, attr))
73427 +                       return op_attr;
73428 +       }
73429 +       return NULL;
73432 +static struct ATTRIB *attr_create_nonres_log(struct ntfs_sb_info *sbi,
73433 +                                            enum ATTR_TYPE type, u64 size,
73434 +                                            const u16 *name, size_t name_len,
73435 +                                            __le16 flags)
73437 +       struct ATTRIB *attr;
73438 +       u32 name_size = QuadAlign(name_len * sizeof(short));
73439 +       bool is_ext = flags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED);
73440 +       u32 asize = name_size +
73441 +                   (is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT);
73443 +       attr = ntfs_zalloc(asize);
73444 +       if (!attr)
73445 +               return NULL;
73447 +       attr->type = type;
73448 +       attr->size = cpu_to_le32(asize);
73449 +       attr->flags = flags;
73450 +       attr->non_res = 1;
73451 +       attr->name_len = name_len;
73453 +       attr->nres.evcn = cpu_to_le64((u64)bytes_to_cluster(sbi, size) - 1);
73454 +       attr->nres.alloc_size = cpu_to_le64(ntfs_up_cluster(sbi, size));
73455 +       attr->nres.data_size = cpu_to_le64(size);
73456 +       attr->nres.valid_size = attr->nres.data_size;
73457 +       if (is_ext) {
73458 +               attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
73459 +               if (is_attr_compressed(attr))
73460 +                       attr->nres.c_unit = COMPRESSION_UNIT;
73462 +               attr->nres.run_off =
73463 +                       cpu_to_le16(SIZEOF_NONRESIDENT_EX + name_size);
73464 +               memcpy(Add2Ptr(attr, SIZEOF_NONRESIDENT_EX), name,
73465 +                      name_len * sizeof(short));
73466 +       } else {
73467 +               attr->name_off = SIZEOF_NONRESIDENT_LE;
73468 +               attr->nres.run_off =
73469 +                       cpu_to_le16(SIZEOF_NONRESIDENT + name_size);
73470 +               memcpy(Add2Ptr(attr, SIZEOF_NONRESIDENT), name,
73471 +                      name_len * sizeof(short));
73472 +       }
73474 +       return attr;
73478 + * do_action
73479 + *
73480 + * common routine for the Redo and Undo Passes
73481 + * If rlsn is NULL then undo
73482 + */
73483 +static int do_action(struct ntfs_log *log, struct OPEN_ATTR_ENRTY *oe,
73484 +                    const struct LOG_REC_HDR *lrh, u32 op, void *data,
73485 +                    u32 dlen, u32 rec_len, const u64 *rlsn)
73487 +       int err = 0;
73488 +       struct ntfs_sb_info *sbi = log->ni->mi.sbi;
73489 +       struct inode *inode = NULL, *inode_parent;
73490 +       struct mft_inode *mi = NULL, *mi2_child = NULL;
73491 +       CLST rno = 0, rno_base = 0;
73492 +       struct INDEX_BUFFER *ib = NULL;
73493 +       struct MFT_REC *rec = NULL;
73494 +       struct ATTRIB *attr = NULL, *attr2;
73495 +       struct INDEX_HDR *hdr;
73496 +       struct INDEX_ROOT *root;
73497 +       struct NTFS_DE *e, *e1, *e2;
73498 +       struct NEW_ATTRIBUTE_SIZES *new_sz;
73499 +       struct ATTR_FILE_NAME *fname;
73500 +       struct OpenAttr *oa, *oa2;
73501 +       u32 nsize, t32, asize, used, esize, bmp_off, bmp_bits;
73502 +       u16 id, id2;
73503 +       u32 record_size = sbi->record_size;
73504 +       u64 t64;
73505 +       u16 roff = le16_to_cpu(lrh->record_off);
73506 +       u16 aoff = le16_to_cpu(lrh->attr_off);
73507 +       u64 lco = 0;
73508 +       u64 cbo = (u64)le16_to_cpu(lrh->cluster_off) << SECTOR_SHIFT;
73509 +       u64 tvo = le64_to_cpu(lrh->target_vcn) << sbi->cluster_bits;
73510 +       u64 vbo = cbo + tvo;
73511 +       void *buffer_le = NULL;
73512 +       u32 bytes = 0;
73513 +       bool a_dirty = false;
73514 +       u16 data_off;
73516 +       oa = oe->ptr;
73518 +       /* Big switch to prepare */
73519 +       switch (op) {
73520 +       /* ============================================================
73521 +        * Process MFT records, as described by the current log record
73522 +        * ============================================================
73523 +        */
73524 +       case InitializeFileRecordSegment:
73525 +       case DeallocateFileRecordSegment:
73526 +       case WriteEndOfFileRecordSegment:
73527 +       case CreateAttribute:
73528 +       case DeleteAttribute:
73529 +       case UpdateResidentValue:
73530 +       case UpdateMappingPairs:
73531 +       case SetNewAttributeSizes:
73532 +       case AddIndexEntryRoot:
73533 +       case DeleteIndexEntryRoot:
73534 +       case SetIndexEntryVcnRoot:
73535 +       case UpdateFileNameRoot:
73536 +       case UpdateRecordDataRoot:
73537 +       case ZeroEndOfFileRecord:
73538 +               rno = vbo >> sbi->record_bits;
73539 +               inode = ilookup(sbi->sb, rno);
73540 +               if (inode) {
73541 +                       mi = &ntfs_i(inode)->mi;
73542 +               } else if (op == InitializeFileRecordSegment) {
73543 +                       mi = ntfs_zalloc(sizeof(struct mft_inode));
73544 +                       if (!mi)
73545 +                               return -ENOMEM;
73546 +                       err = mi_format_new(mi, sbi, rno, 0, false);
73547 +                       if (err)
73548 +                               goto out;
73549 +               } else {
73550 +                       /* read from disk */
73551 +                       err = mi_get(sbi, rno, &mi);
73552 +                       if (err)
73553 +                               return err;
73554 +               }
73555 +               rec = mi->mrec;
73557 +               if (op == DeallocateFileRecordSegment)
73558 +                       goto skip_load_parent;
73560 +               if (InitializeFileRecordSegment != op) {
73561 +                       if (rec->rhdr.sign == NTFS_BAAD_SIGNATURE)
73562 +                               goto dirty_vol;
73563 +                       if (!check_lsn(&rec->rhdr, rlsn))
73564 +                               goto out;
73565 +                       if (!check_file_record(rec, NULL, sbi))
73566 +                               goto dirty_vol;
73567 +                       attr = Add2Ptr(rec, roff);
73568 +               }
73570 +               if (is_rec_base(rec) || InitializeFileRecordSegment == op) {
73571 +                       rno_base = rno;
73572 +                       goto skip_load_parent;
73573 +               }
73575 +               rno_base = ino_get(&rec->parent_ref);
73576 +               inode_parent = ntfs_iget5(sbi->sb, &rec->parent_ref, NULL);
73577 +               if (IS_ERR(inode_parent))
73578 +                       goto skip_load_parent;
73580 +               if (is_bad_inode(inode_parent)) {
73581 +                       iput(inode_parent);
73582 +                       goto skip_load_parent;
73583 +               }
73585 +               if (ni_load_mi_ex(ntfs_i(inode_parent), rno, &mi2_child)) {
73586 +                       iput(inode_parent);
73587 +               } else {
73588 +                       if (mi2_child->mrec != mi->mrec)
73589 +                               memcpy(mi2_child->mrec, mi->mrec,
73590 +                                      sbi->record_size);
73592 +                       if (inode)
73593 +                               iput(inode);
73594 +                       else if (mi)
73595 +                               mi_put(mi);
73597 +                       inode = inode_parent;
73598 +                       mi = mi2_child;
73599 +                       rec = mi2_child->mrec;
73600 +                       attr = Add2Ptr(rec, roff);
73601 +               }
73603 +skip_load_parent:
73604 +               inode_parent = NULL;
73605 +               break;
73607 +       /* ============================================================
73608 +        * Process attributes, as described by the current log record
73609 +        * ============================================================
73610 +        */
73611 +       case UpdateNonresidentValue:
73612 +       case AddIndexEntryAllocation:
73613 +       case DeleteIndexEntryAllocation:
73614 +       case WriteEndOfIndexBuffer:
73615 +       case SetIndexEntryVcnAllocation:
73616 +       case UpdateFileNameAllocation:
73617 +       case SetBitsInNonresidentBitMap:
73618 +       case ClearBitsInNonresidentBitMap:
73619 +       case UpdateRecordDataAllocation:
73620 +               attr = oa->attr;
73621 +               bytes = UpdateNonresidentValue == op ? dlen : 0;
73622 +               lco = (u64)le16_to_cpu(lrh->lcns_follow) << sbi->cluster_bits;
73624 +               if (attr->type == ATTR_ALLOC) {
73625 +                       t32 = le32_to_cpu(oe->bytes_per_index);
73626 +                       if (bytes < t32)
73627 +                               bytes = t32;
73628 +               }
73630 +               if (!bytes)
73631 +                       bytes = lco - cbo;
73633 +               bytes += roff;
73634 +               if (attr->type == ATTR_ALLOC)
73635 +                       bytes = (bytes + 511) & ~511; // align
73637 +               buffer_le = ntfs_malloc(bytes);
73638 +               if (!buffer_le)
73639 +                       return -ENOMEM;
73641 +               err = ntfs_read_run_nb(sbi, oa->run1, vbo, buffer_le, bytes,
73642 +                                      NULL);
73643 +               if (err)
73644 +                       goto out;
73646 +               if (attr->type == ATTR_ALLOC && *(int *)buffer_le)
73647 +                       ntfs_fix_post_read(buffer_le, bytes, false);
73648 +               break;
73650 +       default:
73651 +               WARN_ON(1);
73652 +       }
73654 +       /* Big switch to do operation */
73655 +       switch (op) {
73656 +       case InitializeFileRecordSegment:
73657 +               if (roff + dlen > record_size)
73658 +                       goto dirty_vol;
73660 +               memcpy(Add2Ptr(rec, roff), data, dlen);
73661 +               mi->dirty = true;
73662 +               break;
73664 +       case DeallocateFileRecordSegment:
73665 +               clear_rec_inuse(rec);
73666 +               le16_add_cpu(&rec->seq, 1);
73667 +               mi->dirty = true;
73668 +               break;
73670 +       case WriteEndOfFileRecordSegment:
73671 +               attr2 = (struct ATTRIB *)data;
73672 +               if (!check_if_attr(rec, lrh) || roff + dlen > record_size)
73673 +                       goto dirty_vol;
73675 +               memmove(attr, attr2, dlen);
73676 +               rec->used = cpu_to_le32(QuadAlign(roff + dlen));
73678 +               mi->dirty = true;
73679 +               break;
73681 +       case CreateAttribute:
73682 +               attr2 = (struct ATTRIB *)data;
73683 +               asize = le32_to_cpu(attr2->size);
73684 +               used = le32_to_cpu(rec->used);
73686 +               if (!check_if_attr(rec, lrh) || dlen < SIZEOF_RESIDENT ||
73687 +                   !IsQuadAligned(asize) ||
73688 +                   Add2Ptr(attr2, asize) > Add2Ptr(lrh, rec_len) ||
73689 +                   dlen > record_size - used) {
73690 +                       goto dirty_vol;
73691 +               }
73693 +               memmove(Add2Ptr(attr, asize), attr, used - roff);
73694 +               memcpy(attr, attr2, asize);
73696 +               rec->used = cpu_to_le32(used + asize);
73697 +               id = le16_to_cpu(rec->next_attr_id);
73698 +               id2 = le16_to_cpu(attr2->id);
73699 +               if (id <= id2)
73700 +                       rec->next_attr_id = cpu_to_le16(id2 + 1);
73701 +               if (is_attr_indexed(attr))
73702 +                       le16_add_cpu(&rec->hard_links, 1);
73704 +               oa2 = find_loaded_attr(log, attr, rno_base);
73705 +               if (oa2) {
73706 +                       void *p2 = ntfs_memdup(attr, le32_to_cpu(attr->size));
73708 +                       if (p2) {
73709 +                               // run_close(oa2->run1);
73710 +                               ntfs_free(oa2->attr);
73711 +                               oa2->attr = p2;
73712 +                       }
73713 +               }
73715 +               mi->dirty = true;
73716 +               break;
73718 +       case DeleteAttribute:
73719 +               asize = le32_to_cpu(attr->size);
73720 +               used = le32_to_cpu(rec->used);
73722 +               if (!check_if_attr(rec, lrh))
73723 +                       goto dirty_vol;
73725 +               rec->used = cpu_to_le32(used - asize);
73726 +               if (is_attr_indexed(attr))
73727 +                       le16_add_cpu(&rec->hard_links, -1);
73729 +               memmove(attr, Add2Ptr(attr, asize), used - asize - roff);
73731 +               mi->dirty = true;
73732 +               break;
73734 +       case UpdateResidentValue:
73735 +               nsize = aoff + dlen;
73737 +               if (!check_if_attr(rec, lrh))
73738 +                       goto dirty_vol;
73740 +               asize = le32_to_cpu(attr->size);
73741 +               used = le32_to_cpu(rec->used);
73743 +               if (lrh->redo_len == lrh->undo_len) {
73744 +                       if (nsize > asize)
73745 +                               goto dirty_vol;
73746 +                       goto move_data;
73747 +               }
73749 +               if (nsize > asize && nsize - asize > record_size - used)
73750 +                       goto dirty_vol;
73752 +               nsize = QuadAlign(nsize);
73753 +               data_off = le16_to_cpu(attr->res.data_off);
73755 +               if (nsize < asize) {
73756 +                       memmove(Add2Ptr(attr, aoff), data, dlen);
73757 +                       data = NULL; // To skip below memmove
73758 +               }
73760 +               memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize),
73761 +                       used - le16_to_cpu(lrh->record_off) - asize);
73763 +               rec->used = cpu_to_le32(used + nsize - asize);
73764 +               attr->size = cpu_to_le32(nsize);
73765 +               attr->res.data_size = cpu_to_le32(aoff + dlen - data_off);
73767 +move_data:
73768 +               if (data)
73769 +                       memmove(Add2Ptr(attr, aoff), data, dlen);
73771 +               oa2 = find_loaded_attr(log, attr, rno_base);
73772 +               if (oa2) {
73773 +                       void *p2 = ntfs_memdup(attr, le32_to_cpu(attr->size));
73775 +                       if (p2) {
73776 +                               // run_close(&oa2->run0);
73777 +                               oa2->run1 = &oa2->run0;
73778 +                               ntfs_free(oa2->attr);
73779 +                               oa2->attr = p2;
73780 +                       }
73781 +               }
73783 +               mi->dirty = true;
73784 +               break;
73786 +       case UpdateMappingPairs:
73787 +               nsize = aoff + dlen;
73788 +               asize = le32_to_cpu(attr->size);
73789 +               used = le32_to_cpu(rec->used);
73791 +               if (!check_if_attr(rec, lrh) || !attr->non_res ||
73792 +                   aoff < le16_to_cpu(attr->nres.run_off) || aoff > asize ||
73793 +                   (nsize > asize && nsize - asize > record_size - used)) {
73794 +                       goto dirty_vol;
73795 +               }
73797 +               nsize = QuadAlign(nsize);
73799 +               memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize),
73800 +                       used - le16_to_cpu(lrh->record_off) - asize);
73801 +               rec->used = cpu_to_le32(used + nsize - asize);
73802 +               attr->size = cpu_to_le32(nsize);
73803 +               memmove(Add2Ptr(attr, aoff), data, dlen);
73805 +               if (run_get_highest_vcn(le64_to_cpu(attr->nres.svcn),
73806 +                                       attr_run(attr), &t64)) {
73807 +                       goto dirty_vol;
73808 +               }
73810 +               attr->nres.evcn = cpu_to_le64(t64);
73811 +               oa2 = find_loaded_attr(log, attr, rno_base);
73812 +               if (oa2 && oa2->attr->non_res)
73813 +                       oa2->attr->nres.evcn = attr->nres.evcn;
73815 +               mi->dirty = true;
73816 +               break;
73818 +       case SetNewAttributeSizes:
73819 +               new_sz = data;
73820 +               if (!check_if_attr(rec, lrh) || !attr->non_res)
73821 +                       goto dirty_vol;
73823 +               attr->nres.alloc_size = new_sz->alloc_size;
73824 +               attr->nres.data_size = new_sz->data_size;
73825 +               attr->nres.valid_size = new_sz->valid_size;
73827 +               if (dlen >= sizeof(struct NEW_ATTRIBUTE_SIZES))
73828 +                       attr->nres.total_size = new_sz->total_size;
73830 +               oa2 = find_loaded_attr(log, attr, rno_base);
73831 +               if (oa2) {
73832 +                       void *p2 = ntfs_memdup(attr, le32_to_cpu(attr->size));
73834 +                       if (p2) {
73835 +                               ntfs_free(oa2->attr);
73836 +                               oa2->attr = p2;
73837 +                       }
73838 +               }
73839 +               mi->dirty = true;
73840 +               break;
73842 +       case AddIndexEntryRoot:
73843 +               e = (struct NTFS_DE *)data;
73844 +               esize = le16_to_cpu(e->size);
73845 +               root = resident_data(attr);
73846 +               hdr = &root->ihdr;
73847 +               used = le32_to_cpu(hdr->used);
73849 +               if (!check_if_index_root(rec, lrh) ||
73850 +                   !check_if_root_index(attr, hdr, lrh) ||
73851 +                   Add2Ptr(data, esize) > Add2Ptr(lrh, rec_len) ||
73852 +                   esize > le32_to_cpu(rec->total) - le32_to_cpu(rec->used)) {
73853 +                       goto dirty_vol;
73854 +               }
73856 +               e1 = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
73858 +               change_attr_size(rec, attr, le32_to_cpu(attr->size) + esize);
73860 +               memmove(Add2Ptr(e1, esize), e1,
73861 +                       PtrOffset(e1, Add2Ptr(hdr, used)));
73862 +               memmove(e1, e, esize);
73864 +               le32_add_cpu(&attr->res.data_size, esize);
73865 +               hdr->used = cpu_to_le32(used + esize);
73866 +               le32_add_cpu(&hdr->total, esize);
73868 +               mi->dirty = true;
73869 +               break;
73871 +       case DeleteIndexEntryRoot:
73872 +               root = resident_data(attr);
73873 +               hdr = &root->ihdr;
73874 +               used = le32_to_cpu(hdr->used);
73876 +               if (!check_if_index_root(rec, lrh) ||
73877 +                   !check_if_root_index(attr, hdr, lrh)) {
73878 +                       goto dirty_vol;
73879 +               }
73881 +               e1 = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
73882 +               esize = le16_to_cpu(e1->size);
73883 +               e2 = Add2Ptr(e1, esize);
73885 +               memmove(e1, e2, PtrOffset(e2, Add2Ptr(hdr, used)));
73887 +               le32_sub_cpu(&attr->res.data_size, esize);
73888 +               hdr->used = cpu_to_le32(used - esize);
73889 +               le32_sub_cpu(&hdr->total, esize);
73891 +               change_attr_size(rec, attr, le32_to_cpu(attr->size) - esize);
73893 +               mi->dirty = true;
73894 +               break;
73896 +       case SetIndexEntryVcnRoot:
73897 +               root = resident_data(attr);
73898 +               hdr = &root->ihdr;
73900 +               if (!check_if_index_root(rec, lrh) ||
73901 +                   !check_if_root_index(attr, hdr, lrh)) {
73902 +                       goto dirty_vol;
73903 +               }
73905 +               e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
73907 +               de_set_vbn_le(e, *(__le64 *)data);
73908 +               mi->dirty = true;
73909 +               break;
73911 +       case UpdateFileNameRoot:
73912 +               root = resident_data(attr);
73913 +               hdr = &root->ihdr;
73915 +               if (!check_if_index_root(rec, lrh) ||
73916 +                   !check_if_root_index(attr, hdr, lrh)) {
73917 +                       goto dirty_vol;
73918 +               }
73920 +               e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
73921 +               fname = (struct ATTR_FILE_NAME *)(e + 1);
73922 +               memmove(&fname->dup, data, sizeof(fname->dup)); //
73923 +               mi->dirty = true;
73924 +               break;
73926 +       case UpdateRecordDataRoot:
73927 +               root = resident_data(attr);
73928 +               hdr = &root->ihdr;
73930 +               if (!check_if_index_root(rec, lrh) ||
73931 +                   !check_if_root_index(attr, hdr, lrh)) {
73932 +                       goto dirty_vol;
73933 +               }
73935 +               e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
73937 +               memmove(Add2Ptr(e, le16_to_cpu(e->view.data_off)), data, dlen);
73939 +               mi->dirty = true;
73940 +               break;
73942 +       case ZeroEndOfFileRecord:
73943 +               if (roff + dlen > record_size)
73944 +                       goto dirty_vol;
73946 +               memset(attr, 0, dlen);
73947 +               mi->dirty = true;
73948 +               break;
73950 +       case UpdateNonresidentValue:
73951 +               if (lco < cbo + roff + dlen)
73952 +                       goto dirty_vol;
73954 +               memcpy(Add2Ptr(buffer_le, roff), data, dlen);
73956 +               a_dirty = true;
73957 +               if (attr->type == ATTR_ALLOC)
73958 +                       ntfs_fix_pre_write(buffer_le, bytes);
73959 +               break;
73961 +       case AddIndexEntryAllocation:
73962 +               ib = Add2Ptr(buffer_le, roff);
73963 +               hdr = &ib->ihdr;
73964 +               e = data;
73965 +               esize = le16_to_cpu(e->size);
73966 +               e1 = Add2Ptr(ib, aoff);
73968 +               if (is_baad(&ib->rhdr))
73969 +                       goto dirty_vol;
73970 +               if (!check_lsn(&ib->rhdr, rlsn))
73971 +                       goto out;
73973 +               used = le32_to_cpu(hdr->used);
73975 +               if (!check_index_buffer(ib, bytes) ||
73976 +                   !check_if_alloc_index(hdr, aoff) ||
73977 +                   Add2Ptr(e, esize) > Add2Ptr(lrh, rec_len) ||
73978 +                   used + esize > le32_to_cpu(hdr->total)) {
73979 +                       goto dirty_vol;
73980 +               }
73982 +               memmove(Add2Ptr(e1, esize), e1,
73983 +                       PtrOffset(e1, Add2Ptr(hdr, used)));
73984 +               memcpy(e1, e, esize);
73986 +               hdr->used = cpu_to_le32(used + esize);
73988 +               a_dirty = true;
73990 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
73991 +               break;
73993 +       case DeleteIndexEntryAllocation:
73994 +               ib = Add2Ptr(buffer_le, roff);
73995 +               hdr = &ib->ihdr;
73996 +               e = Add2Ptr(ib, aoff);
73997 +               esize = le16_to_cpu(e->size);
73999 +               if (is_baad(&ib->rhdr))
74000 +                       goto dirty_vol;
74001 +               if (!check_lsn(&ib->rhdr, rlsn))
74002 +                       goto out;
74004 +               if (!check_index_buffer(ib, bytes) ||
74005 +                   !check_if_alloc_index(hdr, aoff)) {
74006 +                       goto dirty_vol;
74007 +               }
74009 +               e1 = Add2Ptr(e, esize);
74010 +               nsize = esize;
74011 +               used = le32_to_cpu(hdr->used);
74013 +               memmove(e, e1, PtrOffset(e1, Add2Ptr(hdr, used)));
74015 +               hdr->used = cpu_to_le32(used - nsize);
74017 +               a_dirty = true;
74019 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
74020 +               break;
74022 +       case WriteEndOfIndexBuffer:
74023 +               ib = Add2Ptr(buffer_le, roff);
74024 +               hdr = &ib->ihdr;
74025 +               e = Add2Ptr(ib, aoff);
74027 +               if (is_baad(&ib->rhdr))
74028 +                       goto dirty_vol;
74029 +               if (!check_lsn(&ib->rhdr, rlsn))
74030 +                       goto out;
74031 +               if (!check_index_buffer(ib, bytes) ||
74032 +                   !check_if_alloc_index(hdr, aoff) ||
74033 +                   aoff + dlen > offsetof(struct INDEX_BUFFER, ihdr) +
74034 +                                         le32_to_cpu(hdr->total)) {
74035 +                       goto dirty_vol;
74036 +               }
74038 +               hdr->used = cpu_to_le32(dlen + PtrOffset(hdr, e));
74039 +               memmove(e, data, dlen);
74041 +               a_dirty = true;
74042 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
74043 +               break;
74045 +       case SetIndexEntryVcnAllocation:
74046 +               ib = Add2Ptr(buffer_le, roff);
74047 +               hdr = &ib->ihdr;
74048 +               e = Add2Ptr(ib, aoff);
74050 +               if (is_baad(&ib->rhdr))
74051 +                       goto dirty_vol;
74053 +               if (!check_lsn(&ib->rhdr, rlsn))
74054 +                       goto out;
74055 +               if (!check_index_buffer(ib, bytes) ||
74056 +                   !check_if_alloc_index(hdr, aoff)) {
74057 +                       goto dirty_vol;
74058 +               }
74060 +               de_set_vbn_le(e, *(__le64 *)data);
74062 +               a_dirty = true;
74063 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
74064 +               break;
74066 +       case UpdateFileNameAllocation:
74067 +               ib = Add2Ptr(buffer_le, roff);
74068 +               hdr = &ib->ihdr;
74069 +               e = Add2Ptr(ib, aoff);
74071 +               if (is_baad(&ib->rhdr))
74072 +                       goto dirty_vol;
74074 +               if (!check_lsn(&ib->rhdr, rlsn))
74075 +                       goto out;
74076 +               if (!check_index_buffer(ib, bytes) ||
74077 +                   !check_if_alloc_index(hdr, aoff)) {
74078 +                       goto dirty_vol;
74079 +               }
74081 +               fname = (struct ATTR_FILE_NAME *)(e + 1);
74082 +               memmove(&fname->dup, data, sizeof(fname->dup));
74084 +               a_dirty = true;
74085 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
74086 +               break;
74088 +       case SetBitsInNonresidentBitMap:
74089 +               bmp_off =
74090 +                       le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off);
74091 +               bmp_bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits);
74093 +               if (cbo + (bmp_off + 7) / 8 > lco ||
74094 +                   cbo + ((bmp_off + bmp_bits + 7) / 8) > lco) {
74095 +                       goto dirty_vol;
74096 +               }
74098 +               __bitmap_set(Add2Ptr(buffer_le, roff), bmp_off, bmp_bits);
74099 +               a_dirty = true;
74100 +               break;
74102 +       case ClearBitsInNonresidentBitMap:
74103 +               bmp_off =
74104 +                       le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off);
74105 +               bmp_bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits);
74107 +               if (cbo + (bmp_off + 7) / 8 > lco ||
74108 +                   cbo + ((bmp_off + bmp_bits + 7) / 8) > lco) {
74109 +                       goto dirty_vol;
74110 +               }
74112 +               __bitmap_clear(Add2Ptr(buffer_le, roff), bmp_off, bmp_bits);
74113 +               a_dirty = true;
74114 +               break;
74116 +       case UpdateRecordDataAllocation:
74117 +               ib = Add2Ptr(buffer_le, roff);
74118 +               hdr = &ib->ihdr;
74119 +               e = Add2Ptr(ib, aoff);
74121 +               if (is_baad(&ib->rhdr))
74122 +                       goto dirty_vol;
74124 +               if (!check_lsn(&ib->rhdr, rlsn))
74125 +                       goto out;
74126 +               if (!check_index_buffer(ib, bytes) ||
74127 +                   !check_if_alloc_index(hdr, aoff)) {
74128 +                       goto dirty_vol;
74129 +               }
74131 +               memmove(Add2Ptr(e, le16_to_cpu(e->view.data_off)), data, dlen);
74133 +               a_dirty = true;
74134 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
74135 +               break;
74137 +       default:
74138 +               WARN_ON(1);
74139 +       }
74141 +       if (rlsn) {
74142 +               __le64 t64 = cpu_to_le64(*rlsn);
74144 +               if (rec)
74145 +                       rec->rhdr.lsn = t64;
74146 +               if (ib)
74147 +                       ib->rhdr.lsn = t64;
74148 +       }
74150 +       if (mi && mi->dirty) {
74151 +               err = mi_write(mi, 0);
74152 +               if (err)
74153 +                       goto out;
74154 +       }
74156 +       if (a_dirty) {
74157 +               attr = oa->attr;
74158 +               err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes);
74159 +               if (err)
74160 +                       goto out;
74161 +       }
74163 +out:
74165 +       if (inode)
74166 +               iput(inode);
74167 +       else if (mi != mi2_child)
74168 +               mi_put(mi);
74170 +       ntfs_free(buffer_le);
74172 +       return err;
74174 +dirty_vol:
74175 +       log->set_dirty = true;
74176 +       goto out;
74180 + * log_replay
74181 + *
74182 + * this function is called during mount operation
74183 + * it replays log and empties it
74184 + * initialized is set false if logfile contains '-1'
74185 + */
74186 +int log_replay(struct ntfs_inode *ni, bool *initialized)
74188 +       int err;
74189 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
74190 +       struct ntfs_log *log;
74192 +       struct restart_info rst_info, rst_info2;
74193 +       u64 rec_lsn, ra_lsn, checkpt_lsn = 0, rlsn = 0;
74194 +       struct ATTR_NAME_ENTRY *attr_names = NULL;
74195 +       struct ATTR_NAME_ENTRY *ane;
74196 +       struct RESTART_TABLE *dptbl = NULL;
74197 +       struct RESTART_TABLE *trtbl = NULL;
74198 +       const struct RESTART_TABLE *rt;
74199 +       struct RESTART_TABLE *oatbl = NULL;
74200 +       struct inode *inode;
74201 +       struct OpenAttr *oa;
74202 +       struct ntfs_inode *ni_oe;
74203 +       struct ATTRIB *attr = NULL;
74204 +       u64 size, vcn, undo_next_lsn;
74205 +       CLST rno, lcn, lcn0, len0, clen;
74206 +       void *data;
74207 +       struct NTFS_RESTART *rst = NULL;
74208 +       struct lcb *lcb = NULL;
74209 +       struct OPEN_ATTR_ENRTY *oe;
74210 +       struct TRANSACTION_ENTRY *tr;
74211 +       struct DIR_PAGE_ENTRY *dp;
74212 +       u32 i, bytes_per_attr_entry;
74213 +       u32 l_size = ni->vfs_inode.i_size;
74214 +       u32 orig_file_size = l_size;
74215 +       u32 page_size, vbo, tail, off, dlen;
74216 +       u32 saved_len, rec_len, transact_id;
74217 +       bool use_second_page;
74218 +       struct RESTART_AREA *ra2, *ra = NULL;
74219 +       struct CLIENT_REC *ca, *cr;
74220 +       __le16 client;
74221 +       struct RESTART_HDR *rh;
74222 +       const struct LFS_RECORD_HDR *frh;
74223 +       const struct LOG_REC_HDR *lrh;
74224 +       bool is_mapped;
74225 +       bool is_ro = sb_rdonly(sbi->sb);
74226 +       u64 t64;
74227 +       u16 t16;
74228 +       u32 t32;
74230 +       /* Get the size of page. NOTE: To replay we can use default page */
74231 +#if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2
74232 +       page_size = norm_file_page(PAGE_SIZE, &l_size, true);
74233 +#else
74234 +       page_size = norm_file_page(PAGE_SIZE, &l_size, false);
74235 +#endif
74236 +       if (!page_size)
74237 +               return -EINVAL;
74239 +       log = ntfs_zalloc(sizeof(struct ntfs_log));
74240 +       if (!log)
74241 +               return -ENOMEM;
74243 +       log->ni = ni;
74244 +       log->l_size = l_size;
74245 +       log->one_page_buf = ntfs_malloc(page_size);
74247 +       if (!log->one_page_buf) {
74248 +               err = -ENOMEM;
74249 +               goto out;
74250 +       }
74252 +       log->page_size = page_size;
74253 +       log->page_mask = page_size - 1;
74254 +       log->page_bits = blksize_bits(page_size);
74256 +       /* Look for a restart area on the disk */
74257 +       err = log_read_rst(log, l_size, true, &rst_info);
74258 +       if (err)
74259 +               goto out;
74261 +       /* remember 'initialized' */
74262 +       *initialized = rst_info.initialized;
74264 +       if (!rst_info.restart) {
74265 +               if (rst_info.initialized) {
74266 +                       /* no restart area but the file is not initialized */
74267 +                       err = -EINVAL;
74268 +                       goto out;
74269 +               }
74271 +               log_init_pg_hdr(log, page_size, page_size, 1, 1);
74272 +               log_create(log, l_size, 0, get_random_int(), false, false);
74274 +               log->ra = ra;
74276 +               ra = log_create_ra(log);
74277 +               if (!ra) {
74278 +                       err = -ENOMEM;
74279 +                       goto out;
74280 +               }
74281 +               log->ra = ra;
74282 +               log->init_ra = true;
74284 +               goto process_log;
74285 +       }
74287 +       /*
74288 +        * If the restart offset above wasn't zero then we won't
74289 +        * look for a second restart
74290 +        */
74291 +       if (rst_info.vbo)
74292 +               goto check_restart_area;
74294 +       err = log_read_rst(log, l_size, false, &rst_info2);
74296 +       /* Determine which restart area to use */
74297 +       if (!rst_info2.restart || rst_info2.last_lsn <= rst_info.last_lsn)
74298 +               goto use_first_page;
74300 +       use_second_page = true;
74302 +       if (rst_info.chkdsk_was_run && page_size != rst_info.vbo) {
74303 +               struct RECORD_PAGE_HDR *sp = NULL;
74304 +               bool usa_error;
74306 +               if (!read_log_page(log, page_size, &sp, &usa_error) &&
74307 +                   sp->rhdr.sign == NTFS_CHKD_SIGNATURE) {
74308 +                       use_second_page = false;
74309 +               }
74310 +               ntfs_free(sp);
74311 +       }
74313 +       if (use_second_page) {
74314 +               ntfs_free(rst_info.r_page);
74315 +               memcpy(&rst_info, &rst_info2, sizeof(struct restart_info));
74316 +               rst_info2.r_page = NULL;
74317 +       }
74319 +use_first_page:
74320 +       ntfs_free(rst_info2.r_page);
74322 +check_restart_area:
74323 +       /* If the restart area is at offset 0, we want to write the second restart area first */
74324 +       log->init_ra = !!rst_info.vbo;
74326 +       /* If we have a valid page then grab a pointer to the restart area */
74327 +       ra2 = rst_info.valid_page
74328 +                     ? Add2Ptr(rst_info.r_page,
74329 +                               le16_to_cpu(rst_info.r_page->ra_off))
74330 +                     : NULL;
74332 +       if (rst_info.chkdsk_was_run ||
74333 +           (ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) {
74334 +               bool wrapped = false;
74335 +               bool use_multi_page = false;
74336 +               u32 open_log_count;
74338 +               /* Do some checks based on whether we have a valid log page */
74339 +               if (!rst_info.valid_page) {
74340 +                       open_log_count = get_random_int();
74341 +                       goto init_log_instance;
74342 +               }
74343 +               open_log_count = le32_to_cpu(ra2->open_log_count);
74345 +               /*
74346 +                * If the restart page size isn't changing then we want to
74347 +                * check how much work we need to do
74348 +                */
74349 +               if (page_size != le32_to_cpu(rst_info.r_page->sys_page_size))
74350 +                       goto init_log_instance;
74352 +init_log_instance:
74353 +               log_init_pg_hdr(log, page_size, page_size, 1, 1);
74355 +               log_create(log, l_size, rst_info.last_lsn, open_log_count,
74356 +                          wrapped, use_multi_page);
74358 +               ra = log_create_ra(log);
74359 +               if (!ra) {
74360 +                       err = -ENOMEM;
74361 +                       goto out;
74362 +               }
74363 +               log->ra = ra;
74365 +               /* Put the restart areas and initialize the log file as required */
74366 +               goto process_log;
74367 +       }
74369 +       if (!ra2) {
74370 +               err = -EINVAL;
74371 +               goto out;
74372 +       }
74374 +       /*
74375 +        * If the log page or the system page sizes have changed, we can't use the log file
74376 +        * We must use the system page size instead of the default size
74377 +        * if there is not a clean shutdown
74378 +        */
74379 +       t32 = le32_to_cpu(rst_info.r_page->sys_page_size);
74380 +       if (page_size != t32) {
74381 +               l_size = orig_file_size;
74382 +               page_size =
74383 +                       norm_file_page(t32, &l_size, t32 == DefaultLogPageSize);
74384 +       }
74386 +       if (page_size != t32 ||
74387 +           page_size != le32_to_cpu(rst_info.r_page->page_size)) {
74388 +               err = -EINVAL;
74389 +               goto out;
74390 +       }
74392 +       /* If the file size has shrunk then we won't mount it */
74393 +       if (l_size < le64_to_cpu(ra2->l_size)) {
74394 +               err = -EINVAL;
74395 +               goto out;
74396 +       }
74398 +       log_init_pg_hdr(log, page_size, page_size,
74399 +                       le16_to_cpu(rst_info.r_page->major_ver),
74400 +                       le16_to_cpu(rst_info.r_page->minor_ver));
74402 +       log->l_size = le64_to_cpu(ra2->l_size);
74403 +       log->seq_num_bits = le32_to_cpu(ra2->seq_num_bits);
74404 +       log->file_data_bits = sizeof(u64) * 8 - log->seq_num_bits;
74405 +       log->seq_num_mask = (8 << log->file_data_bits) - 1;
74406 +       log->last_lsn = le64_to_cpu(ra2->current_lsn);
74407 +       log->seq_num = log->last_lsn >> log->file_data_bits;
74408 +       log->ra_off = le16_to_cpu(rst_info.r_page->ra_off);
74409 +       log->restart_size = log->sys_page_size - log->ra_off;
74410 +       log->record_header_len = le16_to_cpu(ra2->rec_hdr_len);
74411 +       log->ra_size = le16_to_cpu(ra2->ra_len);
74412 +       log->data_off = le16_to_cpu(ra2->data_off);
74413 +       log->data_size = log->page_size - log->data_off;
74414 +       log->reserved = log->data_size - log->record_header_len;
74416 +       vbo = lsn_to_vbo(log, log->last_lsn);
74418 +       if (vbo < log->first_page) {
74419 +               /* This is a pseudo lsn */
74420 +               log->l_flags |= NTFSLOG_NO_LAST_LSN;
74421 +               log->next_page = log->first_page;
74422 +               goto find_oldest;
74423 +       }
74425 +       /* Find the end of this log record */
74426 +       off = final_log_off(log, log->last_lsn,
74427 +                           le32_to_cpu(ra2->last_lsn_data_len));
74429 +       /* If we wrapped the file then increment the sequence number */
74430 +       if (off <= vbo) {
74431 +               log->seq_num += 1;
74432 +               log->l_flags |= NTFSLOG_WRAPPED;
74433 +       }
74435 +       /* Now compute the next log page to use */
74436 +       vbo &= ~log->sys_page_mask;
74437 +       tail = log->page_size - (off & log->page_mask) - 1;
74439 +       /* If we can fit another log record on the page, move back a page the log file */
74440 +       if (tail >= log->record_header_len) {
74441 +               log->l_flags |= NTFSLOG_REUSE_TAIL;
74442 +               log->next_page = vbo;
74443 +       } else {
74444 +               log->next_page = next_page_off(log, vbo);
74445 +       }
74447 +find_oldest:
74448 +       /* Find the oldest client lsn. Use the last flushed lsn as a starting point */
74449 +       log->oldest_lsn = log->last_lsn;
74450 +       oldest_client_lsn(Add2Ptr(ra2, le16_to_cpu(ra2->client_off)),
74451 +                         ra2->client_idx[1], &log->oldest_lsn);
74452 +       log->oldest_lsn_off = lsn_to_vbo(log, log->oldest_lsn);
74454 +       if (log->oldest_lsn_off < log->first_page)
74455 +               log->l_flags |= NTFSLOG_NO_OLDEST_LSN;
74457 +       if (!(ra2->flags & RESTART_SINGLE_PAGE_IO))
74458 +               log->l_flags |= NTFSLOG_WRAPPED | NTFSLOG_MULTIPLE_PAGE_IO;
74460 +       log->current_openlog_count = le32_to_cpu(ra2->open_log_count);
74461 +       log->total_avail_pages = log->l_size - log->first_page;
74462 +       log->total_avail = log->total_avail_pages >> log->page_bits;
74463 +       log->max_current_avail = log->total_avail * log->reserved;
74464 +       log->total_avail = log->total_avail * log->data_size;
74466 +       log->current_avail = current_log_avail(log);
74468 +       ra = ntfs_zalloc(log->restart_size);
74469 +       if (!ra) {
74470 +               err = -ENOMEM;
74471 +               goto out;
74472 +       }
74473 +       log->ra = ra;
74475 +       t16 = le16_to_cpu(ra2->client_off);
74476 +       if (t16 == offsetof(struct RESTART_AREA, clients)) {
74477 +               memcpy(ra, ra2, log->ra_size);
74478 +       } else {
74479 +               memcpy(ra, ra2, offsetof(struct RESTART_AREA, clients));
74480 +               memcpy(ra->clients, Add2Ptr(ra2, t16),
74481 +                      le16_to_cpu(ra2->ra_len) - t16);
74483 +               log->current_openlog_count = get_random_int();
74484 +               ra->open_log_count = cpu_to_le32(log->current_openlog_count);
74485 +               log->ra_size = offsetof(struct RESTART_AREA, clients) +
74486 +                              sizeof(struct CLIENT_REC);
74487 +               ra->client_off =
74488 +                       cpu_to_le16(offsetof(struct RESTART_AREA, clients));
74489 +               ra->ra_len = cpu_to_le16(log->ra_size);
74490 +       }
74492 +       le32_add_cpu(&ra->open_log_count, 1);
74494 +       /* Now we need to walk through looking for the last lsn */
74495 +       err = last_log_lsn(log);
74496 +       if (err)
74497 +               goto out;
74499 +       log->current_avail = current_log_avail(log);
74501 +       /* Remember which restart area to write first */
74502 +       log->init_ra = rst_info.vbo;
74504 +process_log:
74505 +       /* 1.0, 1.1, 2.0 log->major_ver/minor_ver - short values */
74506 +       switch ((log->major_ver << 16) + log->minor_ver) {
74507 +       case 0x10000:
74508 +       case 0x10001:
74509 +       case 0x20000:
74510 +               break;
74511 +       default:
74512 +               ntfs_warn(sbi->sb, "\x24LogFile version %d.%d is not supported",
74513 +                         log->major_ver, log->minor_ver);
74514 +               err = -EOPNOTSUPP;
74515 +               log->set_dirty = true;
74516 +               goto out;
74517 +       }
74519 +       /* One client "NTFS" per logfile */
74520 +       ca = Add2Ptr(ra, le16_to_cpu(ra->client_off));
74522 +       for (client = ra->client_idx[1];; client = cr->next_client) {
74523 +               if (client == LFS_NO_CLIENT_LE) {
74524 +                       /* Insert "NTFS" client LogFile */
74525 +                       client = ra->client_idx[0];
74526 +                       if (client == LFS_NO_CLIENT_LE)
74527 +                               return -EINVAL;
74529 +                       t16 = le16_to_cpu(client);
74530 +                       cr = ca + t16;
74532 +                       remove_client(ca, cr, &ra->client_idx[0]);
74534 +                       cr->restart_lsn = 0;
74535 +                       cr->oldest_lsn = cpu_to_le64(log->oldest_lsn);
74536 +                       cr->name_bytes = cpu_to_le32(8);
74537 +                       cr->name[0] = cpu_to_le16('N');
74538 +                       cr->name[1] = cpu_to_le16('T');
74539 +                       cr->name[2] = cpu_to_le16('F');
74540 +                       cr->name[3] = cpu_to_le16('S');
74542 +                       add_client(ca, t16, &ra->client_idx[1]);
74543 +                       break;
74544 +               }
74546 +               cr = ca + le16_to_cpu(client);
74548 +               if (cpu_to_le32(8) == cr->name_bytes &&
74549 +                   cpu_to_le16('N') == cr->name[0] &&
74550 +                   cpu_to_le16('T') == cr->name[1] &&
74551 +                   cpu_to_le16('F') == cr->name[2] &&
74552 +                   cpu_to_le16('S') == cr->name[3])
74553 +                       break;
74554 +       }
74556 +       /* Update the client handle with the client block information */
74557 +       log->client_id.seq_num = cr->seq_num;
74558 +       log->client_id.client_idx = client;
74560 +       err = read_rst_area(log, &rst, &ra_lsn);
74561 +       if (err)
74562 +               goto out;
74564 +       if (!rst)
74565 +               goto out;
74567 +       bytes_per_attr_entry = !rst->major_ver ? 0x2C : 0x28;
74569 +       checkpt_lsn = le64_to_cpu(rst->check_point_start);
74570 +       if (!checkpt_lsn)
74571 +               checkpt_lsn = ra_lsn;
74573 +       /* Allocate and Read the Transaction Table */
74574 +       if (!rst->transact_table_len)
74575 +               goto check_dirty_page_table;
74577 +       t64 = le64_to_cpu(rst->transact_table_lsn);
74578 +       err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
74579 +       if (err)
74580 +               goto out;
74582 +       lrh = lcb->log_rec;
74583 +       frh = lcb->lrh;
74584 +       rec_len = le32_to_cpu(frh->client_data_len);
74586 +       if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
74587 +                          bytes_per_attr_entry)) {
74588 +               err = -EINVAL;
74589 +               goto out;
74590 +       }
74592 +       t16 = le16_to_cpu(lrh->redo_off);
74594 +       rt = Add2Ptr(lrh, t16);
74595 +       t32 = rec_len - t16;
74597 +       /* Now check that this is a valid restart table */
74598 +       if (!check_rstbl(rt, t32)) {
74599 +               err = -EINVAL;
74600 +               goto out;
74601 +       }
74603 +       trtbl = ntfs_memdup(rt, t32);
74604 +       if (!trtbl) {
74605 +               err = -ENOMEM;
74606 +               goto out;
74607 +       }
74609 +       lcb_put(lcb);
74610 +       lcb = NULL;
74612 +check_dirty_page_table:
74613 +       /* The next record back should be the Dirty Pages Table */
74614 +       if (!rst->dirty_pages_len)
74615 +               goto check_attribute_names;
74617 +       t64 = le64_to_cpu(rst->dirty_pages_table_lsn);
74618 +       err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
74619 +       if (err)
74620 +               goto out;
74622 +       lrh = lcb->log_rec;
74623 +       frh = lcb->lrh;
74624 +       rec_len = le32_to_cpu(frh->client_data_len);
74626 +       if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
74627 +                          bytes_per_attr_entry)) {
74628 +               err = -EINVAL;
74629 +               goto out;
74630 +       }
74632 +       t16 = le16_to_cpu(lrh->redo_off);
74634 +       rt = Add2Ptr(lrh, t16);
74635 +       t32 = rec_len - t16;
74637 +       /* Now check that this is a valid restart table */
74638 +       if (!check_rstbl(rt, t32)) {
74639 +               err = -EINVAL;
74640 +               goto out;
74641 +       }
74643 +       dptbl = ntfs_memdup(rt, t32);
74644 +       if (!dptbl) {
74645 +               err = -ENOMEM;
74646 +               goto out;
74647 +       }
74649 +       /* Convert Ra version '0' into version '1' */
74650 +       if (rst->major_ver)
74651 +               goto end_conv_1;
74653 +       dp = NULL;
74654 +       while ((dp = enum_rstbl(dptbl, dp))) {
74655 +               struct DIR_PAGE_ENTRY_32 *dp0 = (struct DIR_PAGE_ENTRY_32 *)dp;
74656 +               // NOTE: Danger. Check for of boundary
74657 +               memmove(&dp->vcn, &dp0->vcn_low,
74658 +                       2 * sizeof(u64) +
74659 +                               le32_to_cpu(dp->lcns_follow) * sizeof(u64));
74660 +       }
74662 +end_conv_1:
74663 +       lcb_put(lcb);
74664 +       lcb = NULL;
74666 +       /* Go through the table and remove the duplicates, remembering the oldest lsn values */
74667 +       if (sbi->cluster_size <= log->page_size)
74668 +               goto trace_dp_table;
74670 +       dp = NULL;
74671 +       while ((dp = enum_rstbl(dptbl, dp))) {
74672 +               struct DIR_PAGE_ENTRY *next = dp;
74674 +               while ((next = enum_rstbl(dptbl, next))) {
74675 +                       if (next->target_attr == dp->target_attr &&
74676 +                           next->vcn == dp->vcn) {
74677 +                               if (le64_to_cpu(next->oldest_lsn) <
74678 +                                   le64_to_cpu(dp->oldest_lsn)) {
74679 +                                       dp->oldest_lsn = next->oldest_lsn;
74680 +                               }
74682 +                               free_rsttbl_idx(dptbl, PtrOffset(dptbl, next));
74683 +                       }
74684 +               }
74685 +       }
74686 +trace_dp_table:
74687 +check_attribute_names:
74688 +       /* The next record should be the Attribute Names */
74689 +       if (!rst->attr_names_len)
74690 +               goto check_attr_table;
74692 +       t64 = le64_to_cpu(rst->attr_names_lsn);
74693 +       err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
74694 +       if (err)
74695 +               goto out;
74697 +       lrh = lcb->log_rec;
74698 +       frh = lcb->lrh;
74699 +       rec_len = le32_to_cpu(frh->client_data_len);
74701 +       if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
74702 +                          bytes_per_attr_entry)) {
74703 +               err = -EINVAL;
74704 +               goto out;
74705 +       }
74707 +       t32 = lrh_length(lrh);
74708 +       rec_len -= t32;
74710 +       attr_names = ntfs_memdup(Add2Ptr(lrh, t32), rec_len);
74712 +       lcb_put(lcb);
74713 +       lcb = NULL;
74715 +check_attr_table:
74716 +       /* The next record should be the attribute Table */
74717 +       if (!rst->open_attr_len)
74718 +               goto check_attribute_names2;
74720 +       t64 = le64_to_cpu(rst->open_attr_table_lsn);
74721 +       err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
74722 +       if (err)
74723 +               goto out;
74725 +       lrh = lcb->log_rec;
74726 +       frh = lcb->lrh;
74727 +       rec_len = le32_to_cpu(frh->client_data_len);
74729 +       if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
74730 +                          bytes_per_attr_entry)) {
74731 +               err = -EINVAL;
74732 +               goto out;
74733 +       }
74735 +       t16 = le16_to_cpu(lrh->redo_off);
74737 +       rt = Add2Ptr(lrh, t16);
74738 +       t32 = rec_len - t16;
74740 +       if (!check_rstbl(rt, t32)) {
74741 +               err = -EINVAL;
74742 +               goto out;
74743 +       }
74745 +       oatbl = ntfs_memdup(rt, t32);
74746 +       if (!oatbl) {
74747 +               err = -ENOMEM;
74748 +               goto out;
74749 +       }
74751 +       log->open_attr_tbl = oatbl;
74753 +       /* Clear all of the Attr pointers */
74754 +       oe = NULL;
74755 +       while ((oe = enum_rstbl(oatbl, oe))) {
74756 +               if (!rst->major_ver) {
74757 +                       struct OPEN_ATTR_ENRTY_32 oe0;
74759 +                       /* Really 'oe' points to OPEN_ATTR_ENRTY_32 */
74760 +                       memcpy(&oe0, oe, SIZEOF_OPENATTRIBUTEENTRY0);
74762 +                       oe->bytes_per_index = oe0.bytes_per_index;
74763 +                       oe->type = oe0.type;
74764 +                       oe->is_dirty_pages = oe0.is_dirty_pages;
74765 +                       oe->name_len = 0;
74766 +                       oe->ref = oe0.ref;
74767 +                       oe->open_record_lsn = oe0.open_record_lsn;
74768 +               }
74770 +               oe->is_attr_name = 0;
74771 +               oe->ptr = NULL;
74772 +       }
74774 +       lcb_put(lcb);
74775 +       lcb = NULL;
74777 +check_attribute_names2:
74778 +       if (!rst->attr_names_len)
74779 +               goto trace_attribute_table;
74781 +       ane = attr_names;
74782 +       if (!oatbl)
74783 +               goto trace_attribute_table;
74784 +       while (ane->off) {
74785 +               /* TODO: Clear table on exit! */
74786 +               oe = Add2Ptr(oatbl, le16_to_cpu(ane->off));
74787 +               t16 = le16_to_cpu(ane->name_bytes);
74788 +               oe->name_len = t16 / sizeof(short);
74789 +               oe->ptr = ane->name;
74790 +               oe->is_attr_name = 2;
74791 +               ane = Add2Ptr(ane, sizeof(struct ATTR_NAME_ENTRY) + t16);
74792 +       }
74794 +trace_attribute_table:
74795 +       /*
74796 +        * If the checkpt_lsn is zero, then this is a freshly
74797 +        * formatted disk and we have no work to do
74798 +        */
74799 +       if (!checkpt_lsn) {
74800 +               err = 0;
74801 +               goto out;
74802 +       }
74804 +       if (!oatbl) {
74805 +               oatbl = init_rsttbl(bytes_per_attr_entry, 8);
74806 +               if (!oatbl) {
74807 +                       err = -ENOMEM;
74808 +                       goto out;
74809 +               }
74810 +       }
74812 +       log->open_attr_tbl = oatbl;
74814 +       /* Start the analysis pass from the Checkpoint lsn. */
74815 +       rec_lsn = checkpt_lsn;
74817 +       /* Read the first lsn */
74818 +       err = read_log_rec_lcb(log, checkpt_lsn, lcb_ctx_next, &lcb);
74819 +       if (err)
74820 +               goto out;
74822 +       /* Loop to read all subsequent records to the end of the log file */
74823 +next_log_record_analyze:
74824 +       err = read_next_log_rec(log, lcb, &rec_lsn);
74825 +       if (err)
74826 +               goto out;
74828 +       if (!rec_lsn)
74829 +               goto end_log_records_enumerate;
74831 +       frh = lcb->lrh;
74832 +       transact_id = le32_to_cpu(frh->transact_id);
74833 +       rec_len = le32_to_cpu(frh->client_data_len);
74834 +       lrh = lcb->log_rec;
74836 +       if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
74837 +               err = -EINVAL;
74838 +               goto out;
74839 +       }
74841 +       /*
74842 +        * The first lsn after the previous lsn remembered
74843 +        * the checkpoint is the first candidate for the rlsn
74844 +        */
74845 +       if (!rlsn)
74846 +               rlsn = rec_lsn;
74848 +       if (LfsClientRecord != frh->record_type)
74849 +               goto next_log_record_analyze;
74851 +       /*
74852 +        * Now update the Transaction Table for this transaction
74853 +        * If there is no entry present or it is unallocated we allocate the entry
74854 +        */
74855 +       if (!trtbl) {
74856 +               trtbl = init_rsttbl(sizeof(struct TRANSACTION_ENTRY),
74857 +                                   INITIAL_NUMBER_TRANSACTIONS);
74858 +               if (!trtbl) {
74859 +                       err = -ENOMEM;
74860 +                       goto out;
74861 +               }
74862 +       }
74864 +       tr = Add2Ptr(trtbl, transact_id);
74866 +       if (transact_id >= bytes_per_rt(trtbl) ||
74867 +           tr->next != RESTART_ENTRY_ALLOCATED_LE) {
74868 +               tr = alloc_rsttbl_from_idx(&trtbl, transact_id);
74869 +               if (!tr) {
74870 +                       err = -ENOMEM;
74871 +                       goto out;
74872 +               }
74873 +               tr->transact_state = TransactionActive;
74874 +               tr->first_lsn = cpu_to_le64(rec_lsn);
74875 +       }
74877 +       tr->prev_lsn = tr->undo_next_lsn = cpu_to_le64(rec_lsn);
74879 +       /*
74880 +        * If this is a compensation log record, then change
74881 +        * the undo_next_lsn to be the undo_next_lsn of this record
74882 +        */
74883 +       if (lrh->undo_op == cpu_to_le16(CompensationLogRecord))
74884 +               tr->undo_next_lsn = frh->client_undo_next_lsn;
74886 +       /* Dispatch to handle log record depending on type */
74887 +       switch (le16_to_cpu(lrh->redo_op)) {
74888 +       case InitializeFileRecordSegment:
74889 +       case DeallocateFileRecordSegment:
74890 +       case WriteEndOfFileRecordSegment:
74891 +       case CreateAttribute:
74892 +       case DeleteAttribute:
74893 +       case UpdateResidentValue:
74894 +       case UpdateNonresidentValue:
74895 +       case UpdateMappingPairs:
74896 +       case SetNewAttributeSizes:
74897 +       case AddIndexEntryRoot:
74898 +       case DeleteIndexEntryRoot:
74899 +       case AddIndexEntryAllocation:
74900 +       case DeleteIndexEntryAllocation:
74901 +       case WriteEndOfIndexBuffer:
74902 +       case SetIndexEntryVcnRoot:
74903 +       case SetIndexEntryVcnAllocation:
74904 +       case UpdateFileNameRoot:
74905 +       case UpdateFileNameAllocation:
74906 +       case SetBitsInNonresidentBitMap:
74907 +       case ClearBitsInNonresidentBitMap:
74908 +       case UpdateRecordDataRoot:
74909 +       case UpdateRecordDataAllocation:
74910 +       case ZeroEndOfFileRecord:
74911 +               t16 = le16_to_cpu(lrh->target_attr);
74912 +               t64 = le64_to_cpu(lrh->target_vcn);
74913 +               dp = find_dp(dptbl, t16, t64);
74915 +               if (dp)
74916 +                       goto copy_lcns;
74918 +               /*
74919 +                * Calculate the number of clusters per page the system
74920 +                * which wrote the checkpoint, possibly creating the table
74921 +                */
74922 +               if (dptbl) {
74923 +                       t32 = (le16_to_cpu(dptbl->size) -
74924 +                              sizeof(struct DIR_PAGE_ENTRY)) /
74925 +                             sizeof(u64);
74926 +               } else {
74927 +                       t32 = log->clst_per_page;
74928 +                       ntfs_free(dptbl);
74929 +                       dptbl = init_rsttbl(struct_size(dp, page_lcns, t32),
74930 +                                           32);
74931 +                       if (!dptbl) {
74932 +                               err = -ENOMEM;
74933 +                               goto out;
74934 +                       }
74935 +               }
74937 +               dp = alloc_rsttbl_idx(&dptbl);
74938 +               dp->target_attr = cpu_to_le32(t16);
74939 +               dp->transfer_len = cpu_to_le32(t32 << sbi->cluster_bits);
74940 +               dp->lcns_follow = cpu_to_le32(t32);
74941 +               dp->vcn = cpu_to_le64(t64 & ~((u64)t32 - 1));
74942 +               dp->oldest_lsn = cpu_to_le64(rec_lsn);
74944 +copy_lcns:
74945 +               /*
74946 +                * Copy the Lcns from the log record into the Dirty Page Entry
74947 +                * TODO: for different page size support, must somehow make
74948 +                * whole routine a loop, case Lcns do not fit below
74949 +                */
74950 +               t16 = le16_to_cpu(lrh->lcns_follow);
74951 +               for (i = 0; i < t16; i++) {
74952 +                       size_t j = (size_t)(le64_to_cpu(lrh->target_vcn) -
74953 +                                           le64_to_cpu(dp->vcn));
74954 +                       dp->page_lcns[j + i] = lrh->page_lcns[i];
74955 +               }
74957 +               goto next_log_record_analyze;
74959 +       case DeleteDirtyClusters: {
74960 +               u32 range_count =
74961 +                       le16_to_cpu(lrh->redo_len) / sizeof(struct LCN_RANGE);
74962 +               const struct LCN_RANGE *r =
74963 +                       Add2Ptr(lrh, le16_to_cpu(lrh->redo_off));
74965 +               /* Loop through all of the Lcn ranges this log record */
74966 +               for (i = 0; i < range_count; i++, r++) {
74967 +                       u64 lcn0 = le64_to_cpu(r->lcn);
74968 +                       u64 lcn_e = lcn0 + le64_to_cpu(r->len) - 1;
74970 +                       dp = NULL;
74971 +                       while ((dp = enum_rstbl(dptbl, dp))) {
74972 +                               u32 j;
74974 +                               t32 = le32_to_cpu(dp->lcns_follow);
74975 +                               for (j = 0; j < t32; j++) {
74976 +                                       t64 = le64_to_cpu(dp->page_lcns[j]);
74977 +                                       if (t64 >= lcn0 && t64 <= lcn_e)
74978 +                                               dp->page_lcns[j] = 0;
74979 +                               }
74980 +                       }
74981 +               }
74982 +               goto next_log_record_analyze;
74983 +               ;
74984 +       }
74986 +       case OpenNonresidentAttribute:
74987 +               t16 = le16_to_cpu(lrh->target_attr);
74988 +               if (t16 >= bytes_per_rt(oatbl)) {
74989 +                       /*
74990 +                        * Compute how big the table needs to be.
74991 +                        * Add 10 extra entries for some cushion
74992 +                        */
74993 +                       u32 new_e = t16 / le16_to_cpu(oatbl->size);
74995 +                       new_e += 10 - le16_to_cpu(oatbl->used);
74997 +                       oatbl = extend_rsttbl(oatbl, new_e, ~0u);
74998 +                       log->open_attr_tbl = oatbl;
74999 +                       if (!oatbl) {
75000 +                               err = -ENOMEM;
75001 +                               goto out;
75002 +                       }
75003 +               }
75005 +               /* Point to the entry being opened */
75006 +               oe = alloc_rsttbl_from_idx(&oatbl, t16);
75007 +               log->open_attr_tbl = oatbl;
75008 +               if (!oe) {
75009 +                       err = -ENOMEM;
75010 +                       goto out;
75011 +               }
75013 +               /* Initialize this entry from the log record */
75014 +               t16 = le16_to_cpu(lrh->redo_off);
75015 +               if (!rst->major_ver) {
75016 +                       /* Convert version '0' into version '1' */
75017 +                       struct OPEN_ATTR_ENRTY_32 *oe0 = Add2Ptr(lrh, t16);
75019 +                       oe->bytes_per_index = oe0->bytes_per_index;
75020 +                       oe->type = oe0->type;
75021 +                       oe->is_dirty_pages = oe0->is_dirty_pages;
75022 +                       oe->name_len = 0; //oe0.name_len;
75023 +                       oe->ref = oe0->ref;
75024 +                       oe->open_record_lsn = oe0->open_record_lsn;
75025 +               } else {
75026 +                       memcpy(oe, Add2Ptr(lrh, t16), bytes_per_attr_entry);
75027 +               }
75029 +               t16 = le16_to_cpu(lrh->undo_len);
75030 +               if (t16) {
75031 +                       oe->ptr = ntfs_malloc(t16);
75032 +                       if (!oe->ptr) {
75033 +                               err = -ENOMEM;
75034 +                               goto out;
75035 +                       }
75036 +                       oe->name_len = t16 / sizeof(short);
75037 +                       memcpy(oe->ptr,
75038 +                              Add2Ptr(lrh, le16_to_cpu(lrh->undo_off)), t16);
75039 +                       oe->is_attr_name = 1;
75040 +               } else {
75041 +                       oe->ptr = NULL;
75042 +                       oe->is_attr_name = 0;
75043 +               }
75045 +               goto next_log_record_analyze;
75047 +       case HotFix:
75048 +               t16 = le16_to_cpu(lrh->target_attr);
75049 +               t64 = le64_to_cpu(lrh->target_vcn);
75050 +               dp = find_dp(dptbl, t16, t64);
75051 +               if (dp) {
75052 +                       size_t j = le64_to_cpu(lrh->target_vcn) -
75053 +                                  le64_to_cpu(dp->vcn);
75054 +                       if (dp->page_lcns[j])
75055 +                               dp->page_lcns[j] = lrh->page_lcns[0];
75056 +               }
75057 +               goto next_log_record_analyze;
75059 +       case EndTopLevelAction:
75060 +               tr = Add2Ptr(trtbl, transact_id);
75061 +               tr->prev_lsn = cpu_to_le64(rec_lsn);
75062 +               tr->undo_next_lsn = frh->client_undo_next_lsn;
75063 +               goto next_log_record_analyze;
75065 +       case PrepareTransaction:
75066 +               tr = Add2Ptr(trtbl, transact_id);
75067 +               tr->transact_state = TransactionPrepared;
75068 +               goto next_log_record_analyze;
75070 +       case CommitTransaction:
75071 +               tr = Add2Ptr(trtbl, transact_id);
75072 +               tr->transact_state = TransactionCommitted;
75073 +               goto next_log_record_analyze;
75075 +       case ForgetTransaction:
75076 +               free_rsttbl_idx(trtbl, transact_id);
75077 +               goto next_log_record_analyze;
75079 +       case Noop:
75080 +       case OpenAttributeTableDump:
75081 +       case AttributeNamesDump:
75082 +       case DirtyPageTableDump:
75083 +       case TransactionTableDump:
75084 +               /* The following cases require no action the Analysis Pass */
75085 +               goto next_log_record_analyze;
75087 +       default:
75088 +               /*
75089 +                * All codes will be explicitly handled.
75090 +                * If we see a code we do not expect, then we are trouble
75091 +                */
75092 +               goto next_log_record_analyze;
75093 +       }
75095 +end_log_records_enumerate:
75096 +       lcb_put(lcb);
75097 +       lcb = NULL;
75099 +       /*
75100 +        * Scan the Dirty Page Table and Transaction Table for
75101 +        * the lowest lsn, and return it as the Redo lsn
75102 +        */
75103 +       dp = NULL;
75104 +       while ((dp = enum_rstbl(dptbl, dp))) {
75105 +               t64 = le64_to_cpu(dp->oldest_lsn);
75106 +               if (t64 && t64 < rlsn)
75107 +                       rlsn = t64;
75108 +       }
75110 +       tr = NULL;
75111 +       while ((tr = enum_rstbl(trtbl, tr))) {
75112 +               t64 = le64_to_cpu(tr->first_lsn);
75113 +               if (t64 && t64 < rlsn)
75114 +                       rlsn = t64;
75115 +       }
75117 +       /* Only proceed if the Dirty Page Table or Transaction table are not empty */
75118 +       if ((!dptbl || !dptbl->total) && (!trtbl || !trtbl->total))
75119 +               goto end_reply;
75121 +       sbi->flags |= NTFS_FLAGS_NEED_REPLAY;
75122 +       if (is_ro)
75123 +               goto out;
75125 +       /* Reopen all of the attributes with dirty pages */
75126 +       oe = NULL;
75127 +next_open_attribute:
75129 +       oe = enum_rstbl(oatbl, oe);
75130 +       if (!oe) {
75131 +               err = 0;
75132 +               dp = NULL;
75133 +               goto next_dirty_page;
75134 +       }
75136 +       oa = ntfs_zalloc(sizeof(struct OpenAttr));
75137 +       if (!oa) {
75138 +               err = -ENOMEM;
75139 +               goto out;
75140 +       }
75142 +       inode = ntfs_iget5(sbi->sb, &oe->ref, NULL);
75143 +       if (IS_ERR(inode))
75144 +               goto fake_attr;
75146 +       if (is_bad_inode(inode)) {
75147 +               iput(inode);
75148 +fake_attr:
75149 +               if (oa->ni) {
75150 +                       iput(&oa->ni->vfs_inode);
75151 +                       oa->ni = NULL;
75152 +               }
75154 +               attr = attr_create_nonres_log(sbi, oe->type, 0, oe->ptr,
75155 +                                             oe->name_len, 0);
75156 +               if (!attr) {
75157 +                       ntfs_free(oa);
75158 +                       err = -ENOMEM;
75159 +                       goto out;
75160 +               }
75161 +               oa->attr = attr;
75162 +               oa->run1 = &oa->run0;
75163 +               goto final_oe;
75164 +       }
75166 +       ni_oe = ntfs_i(inode);
75167 +       oa->ni = ni_oe;
75169 +       attr = ni_find_attr(ni_oe, NULL, NULL, oe->type, oe->ptr, oe->name_len,
75170 +                           NULL, NULL);
75172 +       if (!attr)
75173 +               goto fake_attr;
75175 +       t32 = le32_to_cpu(attr->size);
75176 +       oa->attr = ntfs_memdup(attr, t32);
75177 +       if (!oa->attr)
75178 +               goto fake_attr;
75180 +       if (!S_ISDIR(inode->i_mode)) {
75181 +               if (attr->type == ATTR_DATA && !attr->name_len) {
75182 +                       oa->run1 = &ni_oe->file.run;
75183 +                       goto final_oe;
75184 +               }
75185 +       } else {
75186 +               if (attr->type == ATTR_ALLOC &&
75187 +                   attr->name_len == ARRAY_SIZE(I30_NAME) &&
75188 +                   !memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME))) {
75189 +                       oa->run1 = &ni_oe->dir.alloc_run;
75190 +                       goto final_oe;
75191 +               }
75192 +       }
75194 +       if (attr->non_res) {
75195 +               u16 roff = le16_to_cpu(attr->nres.run_off);
75196 +               CLST svcn = le64_to_cpu(attr->nres.svcn);
75198 +               err = run_unpack(&oa->run0, sbi, inode->i_ino, svcn,
75199 +                                le64_to_cpu(attr->nres.evcn), svcn,
75200 +                                Add2Ptr(attr, roff), t32 - roff);
75201 +               if (err < 0) {
75202 +                       ntfs_free(oa->attr);
75203 +                       oa->attr = NULL;
75204 +                       goto fake_attr;
75205 +               }
75206 +               err = 0;
75207 +       }
75208 +       oa->run1 = &oa->run0;
75209 +       attr = oa->attr;
75211 +final_oe:
75212 +       if (oe->is_attr_name == 1)
75213 +               ntfs_free(oe->ptr);
75214 +       oe->is_attr_name = 0;
75215 +       oe->ptr = oa;
75216 +       oe->name_len = attr->name_len;
75218 +       goto next_open_attribute;
75220 +       /*
75221 +        * Now loop through the dirty page table to extract all of the Vcn/Lcn
75222 +        * Mapping that we have, and insert it into the appropriate run
75223 +        */
75224 +next_dirty_page:
75225 +       dp = enum_rstbl(dptbl, dp);
75226 +       if (!dp)
75227 +               goto do_redo_1;
75229 +       oe = Add2Ptr(oatbl, le32_to_cpu(dp->target_attr));
75231 +       if (oe->next != RESTART_ENTRY_ALLOCATED_LE)
75232 +               goto next_dirty_page;
75234 +       oa = oe->ptr;
75235 +       if (!oa)
75236 +               goto next_dirty_page;
75238 +       i = -1;
75239 +next_dirty_page_vcn:
75240 +       i += 1;
75241 +       if (i >= le32_to_cpu(dp->lcns_follow))
75242 +               goto next_dirty_page;
75244 +       vcn = le64_to_cpu(dp->vcn) + i;
75245 +       size = (vcn + 1) << sbi->cluster_bits;
75247 +       if (!dp->page_lcns[i])
75248 +               goto next_dirty_page_vcn;
75250 +       rno = ino_get(&oe->ref);
75251 +       if (rno <= MFT_REC_MIRR &&
75252 +           size < (MFT_REC_VOL + 1) * sbi->record_size &&
75253 +           oe->type == ATTR_DATA) {
75254 +               goto next_dirty_page_vcn;
75255 +       }
75257 +       lcn = le64_to_cpu(dp->page_lcns[i]);
75259 +       if ((!run_lookup_entry(oa->run1, vcn, &lcn0, &len0, NULL) ||
75260 +            lcn0 != lcn) &&
75261 +           !run_add_entry(oa->run1, vcn, lcn, 1, false)) {
75262 +               err = -ENOMEM;
75263 +               goto out;
75264 +       }
75265 +       attr = oa->attr;
75266 +       t64 = le64_to_cpu(attr->nres.alloc_size);
75267 +       if (size > t64) {
75268 +               attr->nres.valid_size = attr->nres.data_size =
75269 +                       attr->nres.alloc_size = cpu_to_le64(size);
75270 +       }
75271 +       goto next_dirty_page_vcn;
75273 +do_redo_1:
75274 +       /*
75275 +        * Perform the Redo Pass, to restore all of the dirty pages to the same
75276 +        * contents that they had immediately before the crash
75277 +        * If the dirty page table is empty, then we can skip the entire Redo Pass
75278 +        */
75279 +       if (!dptbl || !dptbl->total)
75280 +               goto do_undo_action;
75282 +       rec_lsn = rlsn;
75284 +       /*
75285 +        * Read the record at the Redo lsn, before falling
75286 +        * into common code to handle each record
75287 +        */
75288 +       err = read_log_rec_lcb(log, rlsn, lcb_ctx_next, &lcb);
75289 +       if (err)
75290 +               goto out;
75292 +       /*
75293 +        * Now loop to read all of our log records forwards,
75294 +        * until we hit the end of the file, cleaning up at the end
75295 +        */
75296 +do_action_next:
75297 +       frh = lcb->lrh;
75299 +       if (LfsClientRecord != frh->record_type)
75300 +               goto read_next_log_do_action;
75302 +       transact_id = le32_to_cpu(frh->transact_id);
75303 +       rec_len = le32_to_cpu(frh->client_data_len);
75304 +       lrh = lcb->log_rec;
75306 +       if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
75307 +               err = -EINVAL;
75308 +               goto out;
75309 +       }
75311 +       /* Ignore log records that do not update pages */
75312 +       if (lrh->lcns_follow)
75313 +               goto find_dirty_page;
75315 +       goto read_next_log_do_action;
75317 +find_dirty_page:
75318 +       t16 = le16_to_cpu(lrh->target_attr);
75319 +       t64 = le64_to_cpu(lrh->target_vcn);
75320 +       dp = find_dp(dptbl, t16, t64);
75322 +       if (!dp)
75323 +               goto read_next_log_do_action;
75325 +       if (rec_lsn < le64_to_cpu(dp->oldest_lsn))
75326 +               goto read_next_log_do_action;
75328 +       t16 = le16_to_cpu(lrh->target_attr);
75329 +       if (t16 >= bytes_per_rt(oatbl)) {
75330 +               err = -EINVAL;
75331 +               goto out;
75332 +       }
75334 +       oe = Add2Ptr(oatbl, t16);
75336 +       if (oe->next != RESTART_ENTRY_ALLOCATED_LE) {
75337 +               err = -EINVAL;
75338 +               goto out;
75339 +       }
75341 +       oa = oe->ptr;
75343 +       if (!oa) {
75344 +               err = -EINVAL;
75345 +               goto out;
75346 +       }
75347 +       attr = oa->attr;
75349 +       vcn = le64_to_cpu(lrh->target_vcn);
75351 +       if (!run_lookup_entry(oa->run1, vcn, &lcn, NULL, NULL) ||
75352 +           lcn == SPARSE_LCN) {
75353 +               goto read_next_log_do_action;
75354 +       }
75356 +       /* Point to the Redo data and get its length */
75357 +       data = Add2Ptr(lrh, le16_to_cpu(lrh->redo_off));
75358 +       dlen = le16_to_cpu(lrh->redo_len);
75360 +       /* Shorten length by any Lcns which were deleted */
75361 +       saved_len = dlen;
75363 +       for (i = le16_to_cpu(lrh->lcns_follow); i; i--) {
75364 +               size_t j;
75365 +               u32 alen, voff;
75367 +               voff = le16_to_cpu(lrh->record_off) +
75368 +                      le16_to_cpu(lrh->attr_off);
75369 +               voff += le16_to_cpu(lrh->cluster_off) << SECTOR_SHIFT;
75371 +               /* If the Vcn question is allocated, we can just get out.*/
75372 +               j = le64_to_cpu(lrh->target_vcn) - le64_to_cpu(dp->vcn);
75373 +               if (dp->page_lcns[j + i - 1])
75374 +                       break;
75376 +               if (!saved_len)
75377 +                       saved_len = 1;
75379 +               /*
75380 +                * Calculate the allocated space left relative to the
75381 +                * log record Vcn, after removing this unallocated Vcn
75382 +                */
75383 +               alen = (i - 1) << sbi->cluster_bits;
75385 +               /*
75386 +                * If the update described this log record goes beyond
75387 +                * the allocated space, then we will have to reduce the length
75388 +                */
75389 +               if (voff >= alen)
75390 +                       dlen = 0;
75391 +               else if (voff + dlen > alen)
75392 +                       dlen = alen - voff;
75393 +       }
75395 +       /* If the resulting dlen from above is now zero, we can skip this log record */
75396 +       if (!dlen && saved_len)
75397 +               goto read_next_log_do_action;
75399 +       t16 = le16_to_cpu(lrh->redo_op);
75400 +       if (can_skip_action(t16))
75401 +               goto read_next_log_do_action;
75403 +       /* Apply the Redo operation a common routine */
75404 +       err = do_action(log, oe, lrh, t16, data, dlen, rec_len, &rec_lsn);
75405 +       if (err)
75406 +               goto out;
75408 +       /* Keep reading and looping back until end of file */
75409 +read_next_log_do_action:
75410 +       err = read_next_log_rec(log, lcb, &rec_lsn);
75411 +       if (!err && rec_lsn)
75412 +               goto do_action_next;
75414 +       lcb_put(lcb);
75415 +       lcb = NULL;
75417 +do_undo_action:
75418 +       /* Scan Transaction Table */
75419 +       tr = NULL;
75420 +transaction_table_next:
75421 +       tr = enum_rstbl(trtbl, tr);
75422 +       if (!tr)
75423 +               goto undo_action_done;
75425 +       if (TransactionActive != tr->transact_state || !tr->undo_next_lsn) {
75426 +               free_rsttbl_idx(trtbl, PtrOffset(trtbl, tr));
75427 +               goto transaction_table_next;
75428 +       }
75430 +       log->transaction_id = PtrOffset(trtbl, tr);
75431 +       undo_next_lsn = le64_to_cpu(tr->undo_next_lsn);
75433 +       /*
75434 +        * We only have to do anything if the transaction has
75435 +        * something its undo_next_lsn field
75436 +        */
75437 +       if (!undo_next_lsn)
75438 +               goto commit_undo;
75440 +       /* Read the first record to be undone by this transaction */
75441 +       err = read_log_rec_lcb(log, undo_next_lsn, lcb_ctx_undo_next, &lcb);
75442 +       if (err)
75443 +               goto out;
75445 +       /*
75446 +        * Now loop to read all of our log records forwards,
75447 +        * until we hit the end of the file, cleaning up at the end
75448 +        */
75449 +undo_action_next:
75451 +       lrh = lcb->log_rec;
75452 +       frh = lcb->lrh;
75453 +       transact_id = le32_to_cpu(frh->transact_id);
75454 +       rec_len = le32_to_cpu(frh->client_data_len);
75456 +       if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
75457 +               err = -EINVAL;
75458 +               goto out;
75459 +       }
75461 +       if (lrh->undo_op == cpu_to_le16(Noop))
75462 +               goto read_next_log_undo_action;
75464 +       oe = Add2Ptr(oatbl, le16_to_cpu(lrh->target_attr));
75465 +       oa = oe->ptr;
75467 +       t16 = le16_to_cpu(lrh->lcns_follow);
75468 +       if (!t16)
75469 +               goto add_allocated_vcns;
75471 +       is_mapped = run_lookup_entry(oa->run1, le64_to_cpu(lrh->target_vcn),
75472 +                                    &lcn, &clen, NULL);
75474 +       /*
75475 +        * If the mapping isn't already the table or the  mapping
75476 +        * corresponds to a hole the mapping, we need to make sure
75477 +        * there is no partial page already memory
75478 +        */
75479 +       if (is_mapped && lcn != SPARSE_LCN && clen >= t16)
75480 +               goto add_allocated_vcns;
75482 +       vcn = le64_to_cpu(lrh->target_vcn);
75483 +       vcn &= ~(log->clst_per_page - 1);
75485 +add_allocated_vcns:
75486 +       for (i = 0, vcn = le64_to_cpu(lrh->target_vcn),
75487 +           size = (vcn + 1) << sbi->cluster_bits;
75488 +            i < t16; i++, vcn += 1, size += sbi->cluster_size) {
75489 +               attr = oa->attr;
75490 +               if (!attr->non_res) {
75491 +                       if (size > le32_to_cpu(attr->res.data_size))
75492 +                               attr->res.data_size = cpu_to_le32(size);
75493 +               } else {
75494 +                       if (size > le64_to_cpu(attr->nres.data_size))
75495 +                               attr->nres.valid_size = attr->nres.data_size =
75496 +                                       attr->nres.alloc_size =
75497 +                                               cpu_to_le64(size);
75498 +               }
75499 +       }
75501 +       t16 = le16_to_cpu(lrh->undo_op);
75502 +       if (can_skip_action(t16))
75503 +               goto read_next_log_undo_action;
75505 +       /* Point to the Redo data and get its length */
75506 +       data = Add2Ptr(lrh, le16_to_cpu(lrh->undo_off));
75507 +       dlen = le16_to_cpu(lrh->undo_len);
75509 +       /* it is time to apply the undo action */
75510 +       err = do_action(log, oe, lrh, t16, data, dlen, rec_len, NULL);
75512 +read_next_log_undo_action:
75513 +       /*
75514 +        * Keep reading and looping back until we have read the
75515 +        * last record for this transaction
75516 +        */
75517 +       err = read_next_log_rec(log, lcb, &rec_lsn);
75518 +       if (err)
75519 +               goto out;
75521 +       if (rec_lsn)
75522 +               goto undo_action_next;
75524 +       lcb_put(lcb);
75525 +       lcb = NULL;
75527 +commit_undo:
75528 +       free_rsttbl_idx(trtbl, log->transaction_id);
75530 +       log->transaction_id = 0;
75532 +       goto transaction_table_next;
75534 +undo_action_done:
75536 +       ntfs_update_mftmirr(sbi, 0);
75538 +       sbi->flags &= ~NTFS_FLAGS_NEED_REPLAY;
75540 +end_reply:
75542 +       err = 0;
75543 +       if (is_ro)
75544 +               goto out;
75546 +       rh = ntfs_zalloc(log->page_size);
75547 +       if (!rh) {
75548 +               err = -ENOMEM;
75549 +               goto out;
75550 +       }
75552 +       rh->rhdr.sign = NTFS_RSTR_SIGNATURE;
75553 +       rh->rhdr.fix_off = cpu_to_le16(offsetof(struct RESTART_HDR, fixups));
75554 +       t16 = (log->page_size >> SECTOR_SHIFT) + 1;
75555 +       rh->rhdr.fix_num = cpu_to_le16(t16);
75556 +       rh->sys_page_size = cpu_to_le32(log->page_size);
75557 +       rh->page_size = cpu_to_le32(log->page_size);
75559 +       t16 = QuadAlign(offsetof(struct RESTART_HDR, fixups) +
75560 +                       sizeof(short) * t16);
75561 +       rh->ra_off = cpu_to_le16(t16);
75562 +       rh->minor_ver = cpu_to_le16(1); // 0x1A:
75563 +       rh->major_ver = cpu_to_le16(1); // 0x1C:
75565 +       ra2 = Add2Ptr(rh, t16);
75566 +       memcpy(ra2, ra, sizeof(struct RESTART_AREA));
75568 +       ra2->client_idx[0] = 0;
75569 +       ra2->client_idx[1] = LFS_NO_CLIENT_LE;
75570 +       ra2->flags = cpu_to_le16(2);
75572 +       le32_add_cpu(&ra2->open_log_count, 1);
75574 +       ntfs_fix_pre_write(&rh->rhdr, log->page_size);
75576 +       err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rh, log->page_size);
75577 +       if (!err)
75578 +               err = ntfs_sb_write_run(sbi, &log->ni->file.run, log->page_size,
75579 +                                       rh, log->page_size);
75581 +       ntfs_free(rh);
75582 +       if (err)
75583 +               goto out;
75585 +out:
75586 +       ntfs_free(rst);
75587 +       if (lcb)
75588 +               lcb_put(lcb);
75590 +       /* Scan the Open Attribute Table to close all of the open attributes */
75591 +       oe = NULL;
75592 +       while ((oe = enum_rstbl(oatbl, oe))) {
75593 +               rno = ino_get(&oe->ref);
75595 +               if (oe->is_attr_name == 1) {
75596 +                       ntfs_free(oe->ptr);
75597 +                       oe->ptr = NULL;
75598 +                       continue;
75599 +               }
75601 +               if (oe->is_attr_name)
75602 +                       continue;
75604 +               oa = oe->ptr;
75605 +               if (!oa)
75606 +                       continue;
75608 +               run_close(&oa->run0);
75609 +               ntfs_free(oa->attr);
75610 +               if (oa->ni)
75611 +                       iput(&oa->ni->vfs_inode);
75612 +               ntfs_free(oa);
75613 +       }
75615 +       ntfs_free(trtbl);
75616 +       ntfs_free(oatbl);
75617 +       ntfs_free(dptbl);
75618 +       ntfs_free(attr_names);
75619 +       ntfs_free(rst_info.r_page);
75621 +       ntfs_free(ra);
75622 +       ntfs_free(log->one_page_buf);
75624 +       if (err)
75625 +               sbi->flags |= NTFS_FLAGS_NEED_REPLAY;
75627 +       if (err == -EROFS)
75628 +               err = 0;
75629 +       else if (log->set_dirty)
75630 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
75632 +       ntfs_free(log);
75634 +       return err;
75636 diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
75637 new file mode 100644
75638 index 000000000000..327356b08187
75639 --- /dev/null
75640 +++ b/fs/ntfs3/fsntfs.c
75641 @@ -0,0 +1,2542 @@
75642 +// SPDX-License-Identifier: GPL-2.0
75644 + *
75645 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
75646 + *
75647 + */
75649 +#include <linux/blkdev.h>
75650 +#include <linux/buffer_head.h>
75651 +#include <linux/fs.h>
75652 +#include <linux/nls.h>
75654 +#include "debug.h"
75655 +#include "ntfs.h"
75656 +#include "ntfs_fs.h"
75658 +// clang-format off
75659 +const struct cpu_str NAME_MFT = {
75660 +       4, 0, { '$', 'M', 'F', 'T' },
75662 +const struct cpu_str NAME_MIRROR = {
75663 +       8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
75665 +const struct cpu_str NAME_LOGFILE = {
75666 +       8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
75668 +const struct cpu_str NAME_VOLUME = {
75669 +       7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
75671 +const struct cpu_str NAME_ATTRDEF = {
75672 +       8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
75674 +const struct cpu_str NAME_ROOT = {
75675 +       1, 0, { '.' },
75677 +const struct cpu_str NAME_BITMAP = {
75678 +       7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
75680 +const struct cpu_str NAME_BOOT = {
75681 +       5, 0, { '$', 'B', 'o', 'o', 't' },
75683 +const struct cpu_str NAME_BADCLUS = {
75684 +       8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
75686 +const struct cpu_str NAME_QUOTA = {
75687 +       6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
75689 +const struct cpu_str NAME_SECURE = {
75690 +       7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
75692 +const struct cpu_str NAME_UPCASE = {
75693 +       7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
75695 +const struct cpu_str NAME_EXTEND = {
75696 +       7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
75698 +const struct cpu_str NAME_OBJID = {
75699 +       6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
75701 +const struct cpu_str NAME_REPARSE = {
75702 +       8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
75704 +const struct cpu_str NAME_USNJRNL = {
75705 +       8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
75707 +const __le16 BAD_NAME[4] = {
75708 +       cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
75710 +const __le16 I30_NAME[4] = {
75711 +       cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
75713 +const __le16 SII_NAME[4] = {
75714 +       cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
75716 +const __le16 SDH_NAME[4] = {
75717 +       cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
75719 +const __le16 SDS_NAME[4] = {
75720 +       cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
75722 +const __le16 SO_NAME[2] = {
75723 +       cpu_to_le16('$'), cpu_to_le16('O'),
75725 +const __le16 SQ_NAME[2] = {
75726 +       cpu_to_le16('$'), cpu_to_le16('Q'),
75728 +const __le16 SR_NAME[2] = {
75729 +       cpu_to_le16('$'), cpu_to_le16('R'),
75732 +#ifdef CONFIG_NTFS3_LZX_XPRESS
75733 +const __le16 WOF_NAME[17] = {
75734 +       cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
75735 +       cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
75736 +       cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
75737 +       cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
75738 +       cpu_to_le16('a'),
75740 +#endif
75742 +// clang-format on
75745 + * ntfs_fix_pre_write
75746 + *
75747 + * inserts fixups into 'rhdr' before writing to disk
75748 + */
75749 +bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
75751 +       u16 *fixup, *ptr;
75752 +       u16 sample;
75753 +       u16 fo = le16_to_cpu(rhdr->fix_off);
75754 +       u16 fn = le16_to_cpu(rhdr->fix_num);
75756 +       if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
75757 +           fn * SECTOR_SIZE > bytes) {
75758 +               return false;
75759 +       }
75761 +       /* Get fixup pointer */
75762 +       fixup = Add2Ptr(rhdr, fo);
75764 +       if (*fixup >= 0x7FFF)
75765 +               *fixup = 1;
75766 +       else
75767 +               *fixup += 1;
75769 +       sample = *fixup;
75771 +       ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
75773 +       while (fn--) {
75774 +               *++fixup = *ptr;
75775 +               *ptr = sample;
75776 +               ptr += SECTOR_SIZE / sizeof(short);
75777 +       }
75778 +       return true;
75782 + * ntfs_fix_post_read
75783 + *
75784 + * remove fixups after reading from disk
75785 + * Returns < 0 if error, 0 if ok, 1 if need to update fixups
75786 + */
75787 +int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
75788 +                      bool simple)
75790 +       int ret;
75791 +       u16 *fixup, *ptr;
75792 +       u16 sample, fo, fn;
75794 +       fo = le16_to_cpu(rhdr->fix_off);
75795 +       fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
75796 +                   : le16_to_cpu(rhdr->fix_num);
75798 +       /* Check errors */
75799 +       if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
75800 +           fn * SECTOR_SIZE > bytes) {
75801 +               return -EINVAL; /* native chkntfs returns ok! */
75802 +       }
75804 +       /* Get fixup pointer */
75805 +       fixup = Add2Ptr(rhdr, fo);
75806 +       sample = *fixup;
75807 +       ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
75808 +       ret = 0;
75810 +       while (fn--) {
75811 +               /* Test current word */
75812 +               if (*ptr != sample) {
75813 +                       /* Fixup does not match! Is it serious error? */
75814 +                       ret = -E_NTFS_FIXUP;
75815 +               }
75817 +               /* Replace fixup */
75818 +               *ptr = *++fixup;
75819 +               ptr += SECTOR_SIZE / sizeof(short);
75820 +       }
75822 +       return ret;
75826 + * ntfs_extend_init
75827 + *
75828 + * loads $Extend file
75829 + */
75830 +int ntfs_extend_init(struct ntfs_sb_info *sbi)
75832 +       int err;
75833 +       struct super_block *sb = sbi->sb;
75834 +       struct inode *inode, *inode2;
75835 +       struct MFT_REF ref;
75837 +       if (sbi->volume.major_ver < 3) {
75838 +               ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
75839 +               return 0;
75840 +       }
75842 +       ref.low = cpu_to_le32(MFT_REC_EXTEND);
75843 +       ref.high = 0;
75844 +       ref.seq = cpu_to_le16(MFT_REC_EXTEND);
75845 +       inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
75846 +       if (IS_ERR(inode)) {
75847 +               err = PTR_ERR(inode);
75848 +               ntfs_err(sb, "Failed to load $Extend.");
75849 +               inode = NULL;
75850 +               goto out;
75851 +       }
75853 +       /* if ntfs_iget5 reads from disk it never returns bad inode */
75854 +       if (!S_ISDIR(inode->i_mode)) {
75855 +               err = -EINVAL;
75856 +               goto out;
75857 +       }
75859 +       /* Try to find $ObjId */
75860 +       inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
75861 +       if (inode2 && !IS_ERR(inode2)) {
75862 +               if (is_bad_inode(inode2)) {
75863 +                       iput(inode2);
75864 +               } else {
75865 +                       sbi->objid.ni = ntfs_i(inode2);
75866 +                       sbi->objid_no = inode2->i_ino;
75867 +               }
75868 +       }
75870 +       /* Try to find $Quota */
75871 +       inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
75872 +       if (inode2 && !IS_ERR(inode2)) {
75873 +               sbi->quota_no = inode2->i_ino;
75874 +               iput(inode2);
75875 +       }
75877 +       /* Try to find $Reparse */
75878 +       inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
75879 +       if (inode2 && !IS_ERR(inode2)) {
75880 +               sbi->reparse.ni = ntfs_i(inode2);
75881 +               sbi->reparse_no = inode2->i_ino;
75882 +       }
75884 +       /* Try to find $UsnJrnl */
75885 +       inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
75886 +       if (inode2 && !IS_ERR(inode2)) {
75887 +               sbi->usn_jrnl_no = inode2->i_ino;
75888 +               iput(inode2);
75889 +       }
75891 +       err = 0;
75892 +out:
75893 +       iput(inode);
75894 +       return err;
75897 +int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
75899 +       int err = 0;
75900 +       struct super_block *sb = sbi->sb;
75901 +       bool initialized = false;
75902 +       struct MFT_REF ref;
75903 +       struct inode *inode;
75905 +       /* Check for 4GB */
75906 +       if (ni->vfs_inode.i_size >= 0x100000000ull) {
75907 +               ntfs_err(sb, "\x24LogFile is too big");
75908 +               err = -EINVAL;
75909 +               goto out;
75910 +       }
75912 +       sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
75914 +       ref.low = cpu_to_le32(MFT_REC_MFT);
75915 +       ref.high = 0;
75916 +       ref.seq = cpu_to_le16(1);
75918 +       inode = ntfs_iget5(sb, &ref, NULL);
75920 +       if (IS_ERR(inode))
75921 +               inode = NULL;
75923 +       if (!inode) {
75924 +               /* Try to use mft copy */
75925 +               u64 t64 = sbi->mft.lbo;
75927 +               sbi->mft.lbo = sbi->mft.lbo2;
75928 +               inode = ntfs_iget5(sb, &ref, NULL);
75929 +               sbi->mft.lbo = t64;
75930 +               if (IS_ERR(inode))
75931 +                       inode = NULL;
75932 +       }
75934 +       if (!inode) {
75935 +               err = -EINVAL;
75936 +               ntfs_err(sb, "Failed to load $MFT.");
75937 +               goto out;
75938 +       }
75940 +       sbi->mft.ni = ntfs_i(inode);
75942 +       /* LogFile should not contains attribute list */
75943 +       err = ni_load_all_mi(sbi->mft.ni);
75944 +       if (!err)
75945 +               err = log_replay(ni, &initialized);
75947 +       iput(inode);
75948 +       sbi->mft.ni = NULL;
75950 +       sync_blockdev(sb->s_bdev);
75951 +       invalidate_bdev(sb->s_bdev);
75953 +       if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
75954 +               err = 0;
75955 +               goto out;
75956 +       }
75958 +       if (sb_rdonly(sb) || !initialized)
75959 +               goto out;
75961 +       /* fill LogFile by '-1' if it is initialized */
75962 +       err = ntfs_bio_fill_1(sbi, &ni->file.run);
75964 +out:
75965 +       sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
75967 +       return err;
75971 + * ntfs_query_def
75972 + *
75973 + * returns current ATTR_DEF_ENTRY for given attribute type
75974 + */
75975 +const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
75976 +                                           enum ATTR_TYPE type)
75978 +       int type_in = le32_to_cpu(type);
75979 +       size_t min_idx = 0;
75980 +       size_t max_idx = sbi->def_entries - 1;
75982 +       while (min_idx <= max_idx) {
75983 +               size_t i = min_idx + ((max_idx - min_idx) >> 1);
75984 +               const struct ATTR_DEF_ENTRY *entry = sbi->def_table + i;
75985 +               int diff = le32_to_cpu(entry->type) - type_in;
75987 +               if (!diff)
75988 +                       return entry;
75989 +               if (diff < 0)
75990 +                       min_idx = i + 1;
75991 +               else if (i)
75992 +                       max_idx = i - 1;
75993 +               else
75994 +                       return NULL;
75995 +       }
75996 +       return NULL;
76000 + * ntfs_look_for_free_space
76001 + *
76002 + * looks for a free space in bitmap
76003 + */
76004 +int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
76005 +                            CLST *new_lcn, CLST *new_len,
76006 +                            enum ALLOCATE_OPT opt)
76008 +       int err;
76009 +       struct super_block *sb = sbi->sb;
76010 +       size_t a_lcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
76011 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
76013 +       down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
76014 +       if (opt & ALLOCATE_MFT) {
76015 +               CLST alen;
76017 +               zlen = wnd_zone_len(wnd);
76019 +               if (!zlen) {
76020 +                       err = ntfs_refresh_zone(sbi);
76021 +                       if (err)
76022 +                               goto out;
76024 +                       zlen = wnd_zone_len(wnd);
76026 +                       if (!zlen) {
76027 +                               ntfs_err(sbi->sb,
76028 +                                        "no free space to extend mft");
76029 +                               err = -ENOSPC;
76030 +                               goto out;
76031 +                       }
76032 +               }
76034 +               lcn = wnd_zone_bit(wnd);
76035 +               alen = zlen > len ? len : zlen;
76037 +               wnd_zone_set(wnd, lcn + alen, zlen - alen);
76039 +               err = wnd_set_used(wnd, lcn, alen);
76040 +               if (err)
76041 +                       goto out;
76043 +               *new_lcn = lcn;
76044 +               *new_len = alen;
76045 +               goto ok;
76046 +       }
76048 +       /*
76049 +        * 'Cause cluster 0 is always used this value means that we should use
76050 +        * cached value of 'next_free_lcn' to improve performance
76051 +        */
76052 +       if (!lcn)
76053 +               lcn = sbi->used.next_free_lcn;
76055 +       if (lcn >= wnd->nbits)
76056 +               lcn = 0;
76058 +       *new_len = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &a_lcn);
76059 +       if (*new_len) {
76060 +               *new_lcn = a_lcn;
76061 +               goto ok;
76062 +       }
76064 +       /* Try to use clusters from MftZone */
76065 +       zlen = wnd_zone_len(wnd);
76066 +       zeroes = wnd_zeroes(wnd);
76068 +       /* Check too big request */
76069 +       if (len > zeroes + zlen)
76070 +               goto no_space;
76072 +       if (zlen <= NTFS_MIN_MFT_ZONE)
76073 +               goto no_space;
76075 +       /* How many clusters to cat from zone */
76076 +       zlcn = wnd_zone_bit(wnd);
76077 +       zlen2 = zlen >> 1;
76078 +       ztrim = len > zlen ? zlen : (len > zlen2 ? len : zlen2);
76079 +       new_zlen = zlen - ztrim;
76081 +       if (new_zlen < NTFS_MIN_MFT_ZONE) {
76082 +               new_zlen = NTFS_MIN_MFT_ZONE;
76083 +               if (new_zlen > zlen)
76084 +                       new_zlen = zlen;
76085 +       }
76087 +       wnd_zone_set(wnd, zlcn, new_zlen);
76089 +       /* allocate continues clusters */
76090 +       *new_len =
76091 +               wnd_find(wnd, len, 0,
76092 +                        BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &a_lcn);
76093 +       if (*new_len) {
76094 +               *new_lcn = a_lcn;
76095 +               goto ok;
76096 +       }
76098 +no_space:
76099 +       up_write(&wnd->rw_lock);
76101 +       return -ENOSPC;
76103 +ok:
76104 +       err = 0;
76106 +       ntfs_unmap_meta(sb, *new_lcn, *new_len);
76108 +       if (opt & ALLOCATE_MFT)
76109 +               goto out;
76111 +       /* Set hint for next requests */
76112 +       sbi->used.next_free_lcn = *new_lcn + *new_len;
76114 +out:
76115 +       up_write(&wnd->rw_lock);
76116 +       return err;
76120 + * ntfs_extend_mft
76121 + *
76122 + * allocates additional MFT records
76123 + * sbi->mft.bitmap is locked for write
76124 + *
76125 + * NOTE: recursive:
76126 + *     ntfs_look_free_mft ->
76127 + *     ntfs_extend_mft ->
76128 + *     attr_set_size ->
76129 + *     ni_insert_nonresident ->
76130 + *     ni_insert_attr ->
76131 + *     ni_ins_attr_ext ->
76132 + *     ntfs_look_free_mft ->
76133 + *     ntfs_extend_mft
76134 + * To avoid recursive always allocate space for two new mft records
76135 + * see attrib.c: "at least two mft to avoid recursive loop"
76136 + */
76137 +static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
76139 +       int err;
76140 +       struct ntfs_inode *ni = sbi->mft.ni;
76141 +       size_t new_mft_total;
76142 +       u64 new_mft_bytes, new_bitmap_bytes;
76143 +       struct ATTRIB *attr;
76144 +       struct wnd_bitmap *wnd = &sbi->mft.bitmap;
76146 +       new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
76147 +       new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
76149 +       /* Step 1: Resize $MFT::DATA */
76150 +       down_write(&ni->file.run_lock);
76151 +       err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
76152 +                           new_mft_bytes, NULL, false, &attr);
76154 +       if (err) {
76155 +               up_write(&ni->file.run_lock);
76156 +               goto out;
76157 +       }
76159 +       attr->nres.valid_size = attr->nres.data_size;
76160 +       new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
76161 +       ni->mi.dirty = true;
76163 +       /* Step 2: Resize $MFT::BITMAP */
76164 +       new_bitmap_bytes = bitmap_size(new_mft_total);
76166 +       err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
76167 +                           new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
76169 +       /* Refresh Mft Zone if necessary */
76170 +       down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
76172 +       ntfs_refresh_zone(sbi);
76174 +       up_write(&sbi->used.bitmap.rw_lock);
76175 +       up_write(&ni->file.run_lock);
76177 +       if (err)
76178 +               goto out;
76180 +       err = wnd_extend(wnd, new_mft_total);
76182 +       if (err)
76183 +               goto out;
76185 +       ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
76187 +       err = _ni_write_inode(&ni->vfs_inode, 0);
76188 +out:
76189 +       return err;
76193 + * ntfs_look_free_mft
76194 + *
76195 + * looks for a free MFT record
76196 + */
76197 +int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
76198 +                      struct ntfs_inode *ni, struct mft_inode **mi)
76200 +       int err = 0;
76201 +       size_t zbit, zlen, from, to, fr;
76202 +       size_t mft_total;
76203 +       struct MFT_REF ref;
76204 +       struct super_block *sb = sbi->sb;
76205 +       struct wnd_bitmap *wnd = &sbi->mft.bitmap;
76206 +       u32 ir;
76208 +       static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
76209 +                     MFT_REC_FREE - MFT_REC_RESERVED);
76211 +       if (!mft)
76212 +               down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
76214 +       zlen = wnd_zone_len(wnd);
76216 +       /* Always reserve space for MFT */
76217 +       if (zlen) {
76218 +               if (mft) {
76219 +                       zbit = wnd_zone_bit(wnd);
76220 +                       *rno = zbit;
76221 +                       wnd_zone_set(wnd, zbit + 1, zlen - 1);
76222 +               }
76223 +               goto found;
76224 +       }
76226 +       /* No MFT zone. find the nearest to '0' free MFT */
76227 +       if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
76228 +               /* Resize MFT */
76229 +               mft_total = wnd->nbits;
76231 +               err = ntfs_extend_mft(sbi);
76232 +               if (!err) {
76233 +                       zbit = mft_total;
76234 +                       goto reserve_mft;
76235 +               }
76237 +               if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
76238 +                       goto out;
76240 +               err = 0;
76242 +               /*
76243 +                * Look for free record reserved area [11-16) ==
76244 +                * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
76245 +                * marks it as used
76246 +                */
76247 +               if (!sbi->mft.reserved_bitmap) {
76248 +                       /* Once per session create internal bitmap for 5 bits */
76249 +                       sbi->mft.reserved_bitmap = 0xFF;
76251 +                       ref.high = 0;
76252 +                       for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
76253 +                               struct inode *i;
76254 +                               struct ntfs_inode *ni;
76255 +                               struct MFT_REC *mrec;
76257 +                               ref.low = cpu_to_le32(ir);
76258 +                               ref.seq = cpu_to_le16(ir);
76260 +                               i = ntfs_iget5(sb, &ref, NULL);
76261 +                               if (IS_ERR(i)) {
76262 +next:
76263 +                                       ntfs_notice(
76264 +                                               sb,
76265 +                                               "Invalid reserved record %x",
76266 +                                               ref.low);
76267 +                                       continue;
76268 +                               }
76269 +                               if (is_bad_inode(i)) {
76270 +                                       iput(i);
76271 +                                       goto next;
76272 +                               }
76274 +                               ni = ntfs_i(i);
76276 +                               mrec = ni->mi.mrec;
76278 +                               if (!is_rec_base(mrec))
76279 +                                       goto next;
76281 +                               if (mrec->hard_links)
76282 +                                       goto next;
76284 +                               if (!ni_std(ni))
76285 +                                       goto next;
76287 +                               if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
76288 +                                                NULL, 0, NULL, NULL))
76289 +                                       goto next;
76291 +                               __clear_bit(ir - MFT_REC_RESERVED,
76292 +                                           &sbi->mft.reserved_bitmap);
76293 +                       }
76294 +               }
76296 +               /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
76297 +               zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
76298 +                                         MFT_REC_FREE, MFT_REC_RESERVED);
76299 +               if (zbit >= MFT_REC_FREE) {
76300 +                       sbi->mft.next_reserved = MFT_REC_FREE;
76301 +                       goto out;
76302 +               }
76304 +               zlen = 1;
76305 +               sbi->mft.next_reserved = zbit;
76306 +       } else {
76307 +reserve_mft:
76308 +               zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
76309 +               if (zbit + zlen > wnd->nbits)
76310 +                       zlen = wnd->nbits - zbit;
76312 +               while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
76313 +                       zlen -= 1;
76315 +               /* [zbit, zbit + zlen) will be used for Mft itself */
76316 +               from = sbi->mft.used;
76317 +               if (from < zbit)
76318 +                       from = zbit;
76319 +               to = zbit + zlen;
76320 +               if (from < to) {
76321 +                       ntfs_clear_mft_tail(sbi, from, to);
76322 +                       sbi->mft.used = to;
76323 +               }
76324 +       }
76326 +       if (mft) {
76327 +               *rno = zbit;
76328 +               zbit += 1;
76329 +               zlen -= 1;
76330 +       }
76332 +       wnd_zone_set(wnd, zbit, zlen);
76334 +found:
76335 +       if (!mft) {
76336 +               /* The request to get record for general purpose */
76337 +               if (sbi->mft.next_free < MFT_REC_USER)
76338 +                       sbi->mft.next_free = MFT_REC_USER;
76340 +               for (;;) {
76341 +                       if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
76342 +                       } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
76343 +                               sbi->mft.next_free = sbi->mft.bitmap.nbits;
76344 +                       } else {
76345 +                               *rno = fr;
76346 +                               sbi->mft.next_free = *rno + 1;
76347 +                               break;
76348 +                       }
76350 +                       err = ntfs_extend_mft(sbi);
76351 +                       if (err)
76352 +                               goto out;
76353 +               }
76354 +       }
76356 +       if (ni && !ni_add_subrecord(ni, *rno, mi)) {
76357 +               err = -ENOMEM;
76358 +               goto out;
76359 +       }
76361 +       /* We have found a record that are not reserved for next MFT */
76362 +       if (*rno >= MFT_REC_FREE)
76363 +               wnd_set_used(wnd, *rno, 1);
76364 +       else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
76365 +               __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
76367 +out:
76368 +       if (!mft)
76369 +               up_write(&wnd->rw_lock);
76371 +       return err;
76375 + * ntfs_mark_rec_free
76376 + *
76377 + * marks record as free
76378 + */
76379 +void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
76381 +       struct wnd_bitmap *wnd = &sbi->mft.bitmap;
76383 +       down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
76384 +       if (rno >= wnd->nbits)
76385 +               goto out;
76387 +       if (rno >= MFT_REC_FREE) {
76388 +               if (!wnd_is_used(wnd, rno, 1))
76389 +                       ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
76390 +               else
76391 +                       wnd_set_free(wnd, rno, 1);
76392 +       } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
76393 +               __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
76394 +       }
76396 +       if (rno < wnd_zone_bit(wnd))
76397 +               wnd_zone_set(wnd, rno, 1);
76398 +       else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
76399 +               sbi->mft.next_free = rno;
76401 +out:
76402 +       up_write(&wnd->rw_lock);
76406 + * ntfs_clear_mft_tail
76407 + *
76408 + * formats empty records [from, to)
76409 + * sbi->mft.bitmap is locked for write
76410 + */
76411 +int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
76413 +       int err;
76414 +       u32 rs;
76415 +       u64 vbo;
76416 +       struct runs_tree *run;
76417 +       struct ntfs_inode *ni;
76419 +       if (from >= to)
76420 +               return 0;
76422 +       rs = sbi->record_size;
76423 +       ni = sbi->mft.ni;
76424 +       run = &ni->file.run;
76426 +       down_read(&ni->file.run_lock);
76427 +       vbo = (u64)from * rs;
76428 +       for (; from < to; from++, vbo += rs) {
76429 +               struct ntfs_buffers nb;
76431 +               err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
76432 +               if (err)
76433 +                       goto out;
76435 +               err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
76436 +               nb_put(&nb);
76437 +               if (err)
76438 +                       goto out;
76439 +       }
76441 +out:
76442 +       sbi->mft.used = from;
76443 +       up_read(&ni->file.run_lock);
76444 +       return err;
76448 + * ntfs_refresh_zone
76449 + *
76450 + * refreshes Mft zone
76451 + * sbi->used.bitmap is locked for rw
76452 + * sbi->mft.bitmap is locked for write
76453 + * sbi->mft.ni->file.run_lock for write
76454 + */
76455 +int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
76457 +       CLST zone_limit, zone_max, lcn, vcn, len;
76458 +       size_t lcn_s, zlen;
76459 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
76460 +       struct ntfs_inode *ni = sbi->mft.ni;
76462 +       /* Do not change anything unless we have non empty Mft zone */
76463 +       if (wnd_zone_len(wnd))
76464 +               return 0;
76466 +       /*
76467 +        * Compute the mft zone at two steps
76468 +        * It would be nice if we are able to allocate
76469 +        * 1/8 of total clusters for MFT but not more then 512 MB
76470 +        */
76471 +       zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
76472 +       zone_max = wnd->nbits >> 3;
76473 +       if (zone_max > zone_limit)
76474 +               zone_max = zone_limit;
76476 +       vcn = bytes_to_cluster(sbi,
76477 +                              (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
76479 +       if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
76480 +               lcn = SPARSE_LCN;
76482 +       /* We should always find Last Lcn for MFT */
76483 +       if (lcn == SPARSE_LCN)
76484 +               return -EINVAL;
76486 +       lcn_s = lcn + 1;
76488 +       /* Try to allocate clusters after last MFT run */
76489 +       zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
76490 +       if (!zlen) {
76491 +               ntfs_notice(sbi->sb, "MftZone: unavailable");
76492 +               return 0;
76493 +       }
76495 +       /* Truncate too large zone */
76496 +       wnd_zone_set(wnd, lcn_s, zlen);
76498 +       return 0;
76502 + * ntfs_update_mftmirr
76503 + *
76504 + * updates $MFTMirr data
76505 + */
76506 +int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
76508 +       int err;
76509 +       struct super_block *sb = sbi->sb;
76510 +       u32 blocksize = sb->s_blocksize;
76511 +       sector_t block1, block2;
76512 +       u32 bytes;
76514 +       if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
76515 +               return 0;
76517 +       err = 0;
76518 +       bytes = sbi->mft.recs_mirr << sbi->record_bits;
76519 +       block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
76520 +       block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
76522 +       for (; bytes >= blocksize; bytes -= blocksize) {
76523 +               struct buffer_head *bh1, *bh2;
76525 +               bh1 = sb_bread(sb, block1++);
76526 +               if (!bh1) {
76527 +                       err = -EIO;
76528 +                       goto out;
76529 +               }
76531 +               bh2 = sb_getblk(sb, block2++);
76532 +               if (!bh2) {
76533 +                       put_bh(bh1);
76534 +                       err = -EIO;
76535 +                       goto out;
76536 +               }
76538 +               if (buffer_locked(bh2))
76539 +                       __wait_on_buffer(bh2);
76541 +               lock_buffer(bh2);
76542 +               memcpy(bh2->b_data, bh1->b_data, blocksize);
76543 +               set_buffer_uptodate(bh2);
76544 +               mark_buffer_dirty(bh2);
76545 +               unlock_buffer(bh2);
76547 +               put_bh(bh1);
76548 +               bh1 = NULL;
76550 +               if (wait)
76551 +                       err = sync_dirty_buffer(bh2);
76553 +               put_bh(bh2);
76554 +               if (err)
76555 +                       goto out;
76556 +       }
76558 +       sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
76560 +out:
76561 +       return err;
76565 + * ntfs_set_state
76566 + *
76567 + * mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
76568 + * umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
76569 + * ntfs error: ntfs_set_state(NTFS_DIRTY_ERROR)
76570 + */
76571 +int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
76573 +       int err;
76574 +       struct ATTRIB *attr;
76575 +       struct VOLUME_INFO *info;
76576 +       struct mft_inode *mi;
76577 +       struct ntfs_inode *ni;
76579 +       /*
76580 +        * do not change state if fs was real_dirty
76581 +        * do not change state if fs already dirty(clear)
76582 +        * do not change any thing if mounted read only
76583 +        */
76584 +       if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
76585 +               return 0;
76587 +       /* Check cached value */
76588 +       if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
76589 +           (sbi->volume.flags & VOLUME_FLAG_DIRTY))
76590 +               return 0;
76592 +       ni = sbi->volume.ni;
76593 +       if (!ni)
76594 +               return -EINVAL;
76596 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
76598 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
76599 +       if (!attr) {
76600 +               err = -EINVAL;
76601 +               goto out;
76602 +       }
76604 +       info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
76605 +       if (!info) {
76606 +               err = -EINVAL;
76607 +               goto out;
76608 +       }
76610 +       switch (dirty) {
76611 +       case NTFS_DIRTY_ERROR:
76612 +               ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
76613 +               sbi->volume.real_dirty = true;
76614 +               fallthrough;
76615 +       case NTFS_DIRTY_DIRTY:
76616 +               info->flags |= VOLUME_FLAG_DIRTY;
76617 +               break;
76618 +       case NTFS_DIRTY_CLEAR:
76619 +               info->flags &= ~VOLUME_FLAG_DIRTY;
76620 +               break;
76621 +       }
76622 +       /* cache current volume flags*/
76623 +       sbi->volume.flags = info->flags;
76624 +       mi->dirty = true;
76625 +       err = 0;
76627 +out:
76628 +       ni_unlock(ni);
76629 +       if (err)
76630 +               return err;
76632 +       mark_inode_dirty(&ni->vfs_inode);
76633 +       /*verify(!ntfs_update_mftmirr()); */
76634 +       err = sync_inode_metadata(&ni->vfs_inode, 1);
76636 +       return err;
76640 + * security_hash
76641 + *
76642 + * calculates a hash of security descriptor
76643 + */
76644 +static inline __le32 security_hash(const void *sd, size_t bytes)
76646 +       u32 hash = 0;
76647 +       const __le32 *ptr = sd;
76649 +       bytes >>= 2;
76650 +       while (bytes--)
76651 +               hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
76652 +       return cpu_to_le32(hash);
76655 +int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
76657 +       struct block_device *bdev = sb->s_bdev;
76658 +       u32 blocksize = sb->s_blocksize;
76659 +       u64 block = lbo >> sb->s_blocksize_bits;
76660 +       u32 off = lbo & (blocksize - 1);
76661 +       u32 op = blocksize - off;
76663 +       for (; bytes; block += 1, off = 0, op = blocksize) {
76664 +               struct buffer_head *bh = __bread(bdev, block, blocksize);
76666 +               if (!bh)
76667 +                       return -EIO;
76669 +               if (op > bytes)
76670 +                       op = bytes;
76672 +               memcpy(buffer, bh->b_data + off, op);
76674 +               put_bh(bh);
76676 +               bytes -= op;
76677 +               buffer = Add2Ptr(buffer, op);
76678 +       }
76680 +       return 0;
76683 +int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
76684 +                 const void *buf, int wait)
76686 +       u32 blocksize = sb->s_blocksize;
76687 +       struct block_device *bdev = sb->s_bdev;
76688 +       sector_t block = lbo >> sb->s_blocksize_bits;
76689 +       u32 off = lbo & (blocksize - 1);
76690 +       u32 op = blocksize - off;
76691 +       struct buffer_head *bh;
76693 +       if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
76694 +               wait = 1;
76696 +       for (; bytes; block += 1, off = 0, op = blocksize) {
76697 +               if (op > bytes)
76698 +                       op = bytes;
76700 +               if (op < blocksize) {
76701 +                       bh = __bread(bdev, block, blocksize);
76702 +                       if (!bh) {
76703 +                               ntfs_err(sb, "failed to read block %llx",
76704 +                                        (u64)block);
76705 +                               return -EIO;
76706 +                       }
76707 +               } else {
76708 +                       bh = __getblk(bdev, block, blocksize);
76709 +                       if (!bh)
76710 +                               return -ENOMEM;
76711 +               }
76713 +               if (buffer_locked(bh))
76714 +                       __wait_on_buffer(bh);
76716 +               lock_buffer(bh);
76717 +               if (buf) {
76718 +                       memcpy(bh->b_data + off, buf, op);
76719 +                       buf = Add2Ptr(buf, op);
76720 +               } else {
76721 +                       memset(bh->b_data + off, -1, op);
76722 +               }
76724 +               set_buffer_uptodate(bh);
76725 +               mark_buffer_dirty(bh);
76726 +               unlock_buffer(bh);
76728 +               if (wait) {
76729 +                       int err = sync_dirty_buffer(bh);
76731 +                       if (err) {
76732 +                               ntfs_err(
76733 +                                       sb,
76734 +                                       "failed to sync buffer at block %llx, error %d",
76735 +                                       (u64)block, err);
76736 +                               put_bh(bh);
76737 +                               return err;
76738 +                       }
76739 +               }
76741 +               put_bh(bh);
76743 +               bytes -= op;
76744 +       }
76745 +       return 0;
76748 +int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
76749 +                     u64 vbo, const void *buf, size_t bytes)
76751 +       struct super_block *sb = sbi->sb;
76752 +       u8 cluster_bits = sbi->cluster_bits;
76753 +       u32 off = vbo & sbi->cluster_mask;
76754 +       CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
76755 +       u64 lbo, len;
76756 +       size_t idx;
76758 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
76759 +               return -ENOENT;
76761 +       if (lcn == SPARSE_LCN)
76762 +               return -EINVAL;
76764 +       lbo = ((u64)lcn << cluster_bits) + off;
76765 +       len = ((u64)clen << cluster_bits) - off;
76767 +       for (;;) {
76768 +               u32 op = len < bytes ? len : bytes;
76769 +               int err = ntfs_sb_write(sb, lbo, op, buf, 0);
76771 +               if (err)
76772 +                       return err;
76774 +               bytes -= op;
76775 +               if (!bytes)
76776 +                       break;
76778 +               vcn_next = vcn + clen;
76779 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
76780 +                   vcn != vcn_next)
76781 +                       return -ENOENT;
76783 +               if (lcn == SPARSE_LCN)
76784 +                       return -EINVAL;
76786 +               if (buf)
76787 +                       buf = Add2Ptr(buf, op);
76789 +               lbo = ((u64)lcn << cluster_bits);
76790 +               len = ((u64)clen << cluster_bits);
76791 +       }
76793 +       return 0;
76796 +struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
76797 +                                  const struct runs_tree *run, u64 vbo)
76799 +       struct super_block *sb = sbi->sb;
76800 +       u8 cluster_bits = sbi->cluster_bits;
76801 +       CLST lcn;
76802 +       u64 lbo;
76804 +       if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
76805 +               return ERR_PTR(-ENOENT);
76807 +       lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
76809 +       return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
76812 +int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
76813 +                    u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
76815 +       int err;
76816 +       struct super_block *sb = sbi->sb;
76817 +       u32 blocksize = sb->s_blocksize;
76818 +       u8 cluster_bits = sbi->cluster_bits;
76819 +       u32 off = vbo & sbi->cluster_mask;
76820 +       u32 nbh = 0;
76821 +       CLST vcn_next, vcn = vbo >> cluster_bits;
76822 +       CLST lcn, clen;
76823 +       u64 lbo, len;
76824 +       size_t idx;
76825 +       struct buffer_head *bh;
76827 +       if (!run) {
76828 +               /* first reading of $Volume + $MFTMirr + LogFile goes here*/
76829 +               if (vbo > MFT_REC_VOL * sbi->record_size) {
76830 +                       err = -ENOENT;
76831 +                       goto out;
76832 +               }
76834 +               /* use absolute boot's 'MFTCluster' to read record */
76835 +               lbo = vbo + sbi->mft.lbo;
76836 +               len = sbi->record_size;
76837 +       } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
76838 +               err = -ENOENT;
76839 +               goto out;
76840 +       } else {
76841 +               if (lcn == SPARSE_LCN) {
76842 +                       err = -EINVAL;
76843 +                       goto out;
76844 +               }
76846 +               lbo = ((u64)lcn << cluster_bits) + off;
76847 +               len = ((u64)clen << cluster_bits) - off;
76848 +       }
76850 +       off = lbo & (blocksize - 1);
76851 +       if (nb) {
76852 +               nb->off = off;
76853 +               nb->bytes = bytes;
76854 +       }
76856 +       for (;;) {
76857 +               u32 len32 = len >= bytes ? bytes : len;
76858 +               sector_t block = lbo >> sb->s_blocksize_bits;
76860 +               do {
76861 +                       u32 op = blocksize - off;
76863 +                       if (op > len32)
76864 +                               op = len32;
76866 +                       bh = ntfs_bread(sb, block);
76867 +                       if (!bh) {
76868 +                               err = -EIO;
76869 +                               goto out;
76870 +                       }
76872 +                       if (buf) {
76873 +                               memcpy(buf, bh->b_data + off, op);
76874 +                               buf = Add2Ptr(buf, op);
76875 +                       }
76877 +                       if (!nb) {
76878 +                               put_bh(bh);
76879 +                       } else if (nbh >= ARRAY_SIZE(nb->bh)) {
76880 +                               err = -EINVAL;
76881 +                               goto out;
76882 +                       } else {
76883 +                               nb->bh[nbh++] = bh;
76884 +                               nb->nbufs = nbh;
76885 +                       }
76887 +                       bytes -= op;
76888 +                       if (!bytes)
76889 +                               return 0;
76890 +                       len32 -= op;
76891 +                       block += 1;
76892 +                       off = 0;
76894 +               } while (len32);
76896 +               vcn_next = vcn + clen;
76897 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
76898 +                   vcn != vcn_next) {
76899 +                       err = -ENOENT;
76900 +                       goto out;
76901 +               }
76903 +               if (lcn == SPARSE_LCN) {
76904 +                       err = -EINVAL;
76905 +                       goto out;
76906 +               }
76908 +               lbo = ((u64)lcn << cluster_bits);
76909 +               len = ((u64)clen << cluster_bits);
76910 +       }
76912 +out:
76913 +       if (!nbh)
76914 +               return err;
76916 +       while (nbh) {
76917 +               put_bh(nb->bh[--nbh]);
76918 +               nb->bh[nbh] = NULL;
76919 +       }
76921 +       nb->nbufs = 0;
76922 +       return err;
76925 +/* Returns < 0 if error, 0 if ok, '-E_NTFS_FIXUP' if need to update fixups */
76926 +int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
76927 +                struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
76928 +                struct ntfs_buffers *nb)
76930 +       int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
76932 +       if (err)
76933 +               return err;
76934 +       return ntfs_fix_post_read(rhdr, nb->bytes, true);
76937 +int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
76938 +               u32 bytes, struct ntfs_buffers *nb)
76940 +       int err = 0;
76941 +       struct super_block *sb = sbi->sb;
76942 +       u32 blocksize = sb->s_blocksize;
76943 +       u8 cluster_bits = sbi->cluster_bits;
76944 +       CLST vcn_next, vcn = vbo >> cluster_bits;
76945 +       u32 off;
76946 +       u32 nbh = 0;
76947 +       CLST lcn, clen;
76948 +       u64 lbo, len;
76949 +       size_t idx;
76951 +       nb->bytes = bytes;
76953 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
76954 +               err = -ENOENT;
76955 +               goto out;
76956 +       }
76958 +       off = vbo & sbi->cluster_mask;
76959 +       lbo = ((u64)lcn << cluster_bits) + off;
76960 +       len = ((u64)clen << cluster_bits) - off;
76962 +       nb->off = off = lbo & (blocksize - 1);
76964 +       for (;;) {
76965 +               u32 len32 = len < bytes ? len : bytes;
76966 +               sector_t block = lbo >> sb->s_blocksize_bits;
76968 +               do {
76969 +                       u32 op;
76970 +                       struct buffer_head *bh;
76972 +                       if (nbh >= ARRAY_SIZE(nb->bh)) {
76973 +                               err = -EINVAL;
76974 +                               goto out;
76975 +                       }
76977 +                       op = blocksize - off;
76978 +                       if (op > len32)
76979 +                               op = len32;
76981 +                       if (op == blocksize) {
76982 +                               bh = sb_getblk(sb, block);
76983 +                               if (!bh) {
76984 +                                       err = -ENOMEM;
76985 +                                       goto out;
76986 +                               }
76987 +                               if (buffer_locked(bh))
76988 +                                       __wait_on_buffer(bh);
76989 +                               set_buffer_uptodate(bh);
76990 +                       } else {
76991 +                               bh = ntfs_bread(sb, block);
76992 +                               if (!bh) {
76993 +                                       err = -EIO;
76994 +                                       goto out;
76995 +                               }
76996 +                       }
76998 +                       nb->bh[nbh++] = bh;
76999 +                       bytes -= op;
77000 +                       if (!bytes) {
77001 +                               nb->nbufs = nbh;
77002 +                               return 0;
77003 +                       }
77005 +                       block += 1;
77006 +                       len32 -= op;
77007 +                       off = 0;
77008 +               } while (len32);
77010 +               vcn_next = vcn + clen;
77011 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
77012 +                   vcn != vcn_next) {
77013 +                       err = -ENOENT;
77014 +                       goto out;
77015 +               }
77017 +               lbo = ((u64)lcn << cluster_bits);
77018 +               len = ((u64)clen << cluster_bits);
77019 +       }
77021 +out:
77022 +       while (nbh) {
77023 +               put_bh(nb->bh[--nbh]);
77024 +               nb->bh[nbh] = NULL;
77025 +       }
77027 +       nb->nbufs = 0;
77029 +       return err;
77032 +int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
77033 +                 struct ntfs_buffers *nb, int sync)
77035 +       int err = 0;
77036 +       struct super_block *sb = sbi->sb;
77037 +       u32 block_size = sb->s_blocksize;
77038 +       u32 bytes = nb->bytes;
77039 +       u32 off = nb->off;
77040 +       u16 fo = le16_to_cpu(rhdr->fix_off);
77041 +       u16 fn = le16_to_cpu(rhdr->fix_num);
77042 +       u32 idx;
77043 +       __le16 *fixup;
77044 +       __le16 sample;
77046 +       if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
77047 +           fn * SECTOR_SIZE > bytes) {
77048 +               return -EINVAL;
77049 +       }
77051 +       for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
77052 +               u32 op = block_size - off;
77053 +               char *bh_data;
77054 +               struct buffer_head *bh = nb->bh[idx];
77055 +               __le16 *ptr, *end_data;
77057 +               if (op > bytes)
77058 +                       op = bytes;
77060 +               if (buffer_locked(bh))
77061 +                       __wait_on_buffer(bh);
77063 +               lock_buffer(nb->bh[idx]);
77065 +               bh_data = bh->b_data + off;
77066 +               end_data = Add2Ptr(bh_data, op);
77067 +               memcpy(bh_data, rhdr, op);
77069 +               if (!idx) {
77070 +                       u16 t16;
77072 +                       fixup = Add2Ptr(bh_data, fo);
77073 +                       sample = *fixup;
77074 +                       t16 = le16_to_cpu(sample);
77075 +                       if (t16 >= 0x7FFF) {
77076 +                               sample = *fixup = cpu_to_le16(1);
77077 +                       } else {
77078 +                               sample = cpu_to_le16(t16 + 1);
77079 +                               *fixup = sample;
77080 +                       }
77082 +                       *(__le16 *)Add2Ptr(rhdr, fo) = sample;
77083 +               }
77085 +               ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
77087 +               do {
77088 +                       *++fixup = *ptr;
77089 +                       *ptr = sample;
77090 +                       ptr += SECTOR_SIZE / sizeof(short);
77091 +               } while (ptr < end_data);
77093 +               set_buffer_uptodate(bh);
77094 +               mark_buffer_dirty(bh);
77095 +               unlock_buffer(bh);
77097 +               if (sync) {
77098 +                       int err2 = sync_dirty_buffer(bh);
77100 +                       if (!err && err2)
77101 +                               err = err2;
77102 +               }
77104 +               bytes -= op;
77105 +               rhdr = Add2Ptr(rhdr, op);
77106 +       }
77108 +       return err;
77111 +static inline struct bio *ntfs_alloc_bio(u32 nr_vecs)
77113 +       struct bio *bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
77115 +       if (!bio && (current->flags & PF_MEMALLOC)) {
77116 +               while (!bio && (nr_vecs /= 2))
77117 +                       bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
77118 +       }
77119 +       return bio;
77122 +/* read/write pages from/to disk*/
77123 +int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
77124 +                  struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
77125 +                  u32 op)
77127 +       int err = 0;
77128 +       struct bio *new, *bio = NULL;
77129 +       struct super_block *sb = sbi->sb;
77130 +       struct block_device *bdev = sb->s_bdev;
77131 +       struct page *page;
77132 +       u8 cluster_bits = sbi->cluster_bits;
77133 +       CLST lcn, clen, vcn, vcn_next;
77134 +       u32 add, off, page_idx;
77135 +       u64 lbo, len;
77136 +       size_t run_idx;
77137 +       struct blk_plug plug;
77139 +       if (!bytes)
77140 +               return 0;
77142 +       blk_start_plug(&plug);
77144 +       /* align vbo and bytes to be 512 bytes aligned */
77145 +       lbo = (vbo + bytes + 511) & ~511ull;
77146 +       vbo = vbo & ~511ull;
77147 +       bytes = lbo - vbo;
77149 +       vcn = vbo >> cluster_bits;
77150 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
77151 +               err = -ENOENT;
77152 +               goto out;
77153 +       }
77154 +       off = vbo & sbi->cluster_mask;
77155 +       page_idx = 0;
77156 +       page = pages[0];
77158 +       for (;;) {
77159 +               lbo = ((u64)lcn << cluster_bits) + off;
77160 +               len = ((u64)clen << cluster_bits) - off;
77161 +new_bio:
77162 +               new = ntfs_alloc_bio(nr_pages - page_idx);
77163 +               if (!new) {
77164 +                       err = -ENOMEM;
77165 +                       goto out;
77166 +               }
77167 +               if (bio) {
77168 +                       bio_chain(bio, new);
77169 +                       submit_bio(bio);
77170 +               }
77171 +               bio = new;
77172 +               bio_set_dev(bio, bdev);
77173 +               bio->bi_iter.bi_sector = lbo >> 9;
77174 +               bio->bi_opf = op;
77176 +               while (len) {
77177 +                       off = vbo & (PAGE_SIZE - 1);
77178 +                       add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
77180 +                       if (bio_add_page(bio, page, add, off) < add)
77181 +                               goto new_bio;
77183 +                       if (bytes <= add)
77184 +                               goto out;
77185 +                       bytes -= add;
77186 +                       vbo += add;
77188 +                       if (add + off == PAGE_SIZE) {
77189 +                               page_idx += 1;
77190 +                               if (WARN_ON(page_idx >= nr_pages)) {
77191 +                                       err = -EINVAL;
77192 +                                       goto out;
77193 +                               }
77194 +                               page = pages[page_idx];
77195 +                       }
77197 +                       if (len <= add)
77198 +                               break;
77199 +                       len -= add;
77200 +                       lbo += add;
77201 +               }
77203 +               vcn_next = vcn + clen;
77204 +               if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
77205 +                   vcn != vcn_next) {
77206 +                       err = -ENOENT;
77207 +                       goto out;
77208 +               }
77209 +               off = 0;
77210 +       }
77211 +out:
77212 +       if (bio) {
77213 +               if (!err)
77214 +                       err = submit_bio_wait(bio);
77215 +               bio_put(bio);
77216 +       }
77217 +       blk_finish_plug(&plug);
77219 +       return err;
77223 + * Helper for ntfs_loadlog_and_replay
77224 + * fill on-disk logfile range by (-1)
77225 + * this means empty logfile
77226 + */
77227 +int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
77229 +       int err = 0;
77230 +       struct super_block *sb = sbi->sb;
77231 +       struct block_device *bdev = sb->s_bdev;
77232 +       u8 cluster_bits = sbi->cluster_bits;
77233 +       struct bio *new, *bio = NULL;
77234 +       CLST lcn, clen;
77235 +       u64 lbo, len;
77236 +       size_t run_idx;
77237 +       struct page *fill;
77238 +       void *kaddr;
77239 +       struct blk_plug plug;
77241 +       fill = alloc_page(GFP_KERNEL);
77242 +       if (!fill)
77243 +               return -ENOMEM;
77245 +       kaddr = kmap_atomic(fill);
77246 +       memset(kaddr, -1, PAGE_SIZE);
77247 +       kunmap_atomic(kaddr);
77248 +       flush_dcache_page(fill);
77249 +       lock_page(fill);
77251 +       if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
77252 +               err = -ENOENT;
77253 +               goto out;
77254 +       }
77256 +       /*
77257 +        * TODO: try blkdev_issue_write_same
77258 +        */
77259 +       blk_start_plug(&plug);
77260 +       do {
77261 +               lbo = (u64)lcn << cluster_bits;
77262 +               len = (u64)clen << cluster_bits;
77263 +new_bio:
77264 +               new = ntfs_alloc_bio(BIO_MAX_VECS);
77265 +               if (!new) {
77266 +                       err = -ENOMEM;
77267 +                       break;
77268 +               }
77269 +               if (bio) {
77270 +                       bio_chain(bio, new);
77271 +                       submit_bio(bio);
77272 +               }
77273 +               bio = new;
77274 +               bio_set_dev(bio, bdev);
77275 +               bio->bi_opf = REQ_OP_WRITE;
77276 +               bio->bi_iter.bi_sector = lbo >> 9;
77278 +               for (;;) {
77279 +                       u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
77281 +                       if (bio_add_page(bio, fill, add, 0) < add)
77282 +                               goto new_bio;
77284 +                       lbo += add;
77285 +                       if (len <= add)
77286 +                               break;
77287 +                       len -= add;
77288 +               }
77289 +       } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
77291 +       if (bio) {
77292 +               if (!err)
77293 +                       err = submit_bio_wait(bio);
77294 +               bio_put(bio);
77295 +       }
77296 +       blk_finish_plug(&plug);
77297 +out:
77298 +       unlock_page(fill);
77299 +       put_page(fill);
77301 +       return err;
77304 +int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
77305 +                   u64 vbo, u64 *lbo, u64 *bytes)
77307 +       u32 off;
77308 +       CLST lcn, len;
77309 +       u8 cluster_bits = sbi->cluster_bits;
77311 +       if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
77312 +               return -ENOENT;
77314 +       off = vbo & sbi->cluster_mask;
77315 +       *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
77316 +       *bytes = ((u64)len << cluster_bits) - off;
77318 +       return 0;
77321 +struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
77323 +       int err = 0;
77324 +       struct super_block *sb = sbi->sb;
77325 +       struct inode *inode = new_inode(sb);
77326 +       struct ntfs_inode *ni;
77328 +       if (!inode)
77329 +               return ERR_PTR(-ENOMEM);
77331 +       ni = ntfs_i(inode);
77333 +       err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
77334 +                           false);
77335 +       if (err)
77336 +               goto out;
77338 +       inode->i_ino = rno;
77339 +       if (insert_inode_locked(inode) < 0) {
77340 +               err = -EIO;
77341 +               goto out;
77342 +       }
77344 +out:
77345 +       if (err) {
77346 +               iput(inode);
77347 +               ni = ERR_PTR(err);
77348 +       }
77349 +       return ni;
77353 + * O:BAG:BAD:(A;OICI;FA;;;WD)
77354 + * owner S-1-5-32-544 (Administrators)
77355 + * group S-1-5-32-544 (Administrators)
77356 + * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
77357 + */
77358 +const u8 s_default_security[] __aligned(8) = {
77359 +       0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
77360 +       0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
77361 +       0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
77362 +       0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
77363 +       0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
77364 +       0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
77365 +       0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
77368 +static_assert(sizeof(s_default_security) == 0x50);
77370 +static inline u32 sid_length(const struct SID *sid)
77372 +       return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
77376 + * Thanks Mark Harmstone for idea
77377 + */
77378 +static bool is_acl_valid(const struct ACL *acl, u32 len)
77380 +       const struct ACE_HEADER *ace;
77381 +       u32 i;
77382 +       u16 ace_count, ace_size;
77384 +       if (acl->AclRevision != ACL_REVISION &&
77385 +           acl->AclRevision != ACL_REVISION_DS) {
77386 +               /*
77387 +                * This value should be ACL_REVISION, unless the ACL contains an
77388 +                * object-specific ACE, in which case this value must be ACL_REVISION_DS.
77389 +                * All ACEs in an ACL must be at the same revision level.
77390 +                */
77391 +               return false;
77392 +       }
77394 +       if (acl->Sbz1)
77395 +               return false;
77397 +       if (le16_to_cpu(acl->AclSize) > len)
77398 +               return false;
77400 +       if (acl->Sbz2)
77401 +               return false;
77403 +       len -= sizeof(struct ACL);
77404 +       ace = (struct ACE_HEADER *)&acl[1];
77405 +       ace_count = le16_to_cpu(acl->AceCount);
77407 +       for (i = 0; i < ace_count; i++) {
77408 +               if (len < sizeof(struct ACE_HEADER))
77409 +                       return false;
77411 +               ace_size = le16_to_cpu(ace->AceSize);
77412 +               if (len < ace_size)
77413 +                       return false;
77415 +               len -= ace_size;
77416 +               ace = Add2Ptr(ace, ace_size);
77417 +       }
77419 +       return true;
77422 +bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
77424 +       u32 sd_owner, sd_group, sd_sacl, sd_dacl;
77426 +       if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
77427 +               return false;
77429 +       if (sd->Revision != 1)
77430 +               return false;
77432 +       if (sd->Sbz1)
77433 +               return false;
77435 +       if (!(sd->Control & SE_SELF_RELATIVE))
77436 +               return false;
77438 +       sd_owner = le32_to_cpu(sd->Owner);
77439 +       if (sd_owner) {
77440 +               const struct SID *owner = Add2Ptr(sd, sd_owner);
77442 +               if (sd_owner + offsetof(struct SID, SubAuthority) > len)
77443 +                       return false;
77445 +               if (owner->Revision != 1)
77446 +                       return false;
77448 +               if (sd_owner + sid_length(owner) > len)
77449 +                       return false;
77450 +       }
77452 +       sd_group = le32_to_cpu(sd->Group);
77453 +       if (sd_group) {
77454 +               const struct SID *group = Add2Ptr(sd, sd_group);
77456 +               if (sd_group + offsetof(struct SID, SubAuthority) > len)
77457 +                       return false;
77459 +               if (group->Revision != 1)
77460 +                       return false;
77462 +               if (sd_group + sid_length(group) > len)
77463 +                       return false;
77464 +       }
77466 +       sd_sacl = le32_to_cpu(sd->Sacl);
77467 +       if (sd_sacl) {
77468 +               const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
77470 +               if (sd_sacl + sizeof(struct ACL) > len)
77471 +                       return false;
77473 +               if (!is_acl_valid(sacl, len - sd_sacl))
77474 +                       return false;
77475 +       }
77477 +       sd_dacl = le32_to_cpu(sd->Dacl);
77478 +       if (sd_dacl) {
77479 +               const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
77481 +               if (sd_dacl + sizeof(struct ACL) > len)
77482 +                       return false;
77484 +               if (!is_acl_valid(dacl, len - sd_dacl))
77485 +                       return false;
77486 +       }
77488 +       return true;
77492 + * ntfs_security_init
77493 + *
77494 + * loads and parse $Secure
77495 + */
77496 +int ntfs_security_init(struct ntfs_sb_info *sbi)
77498 +       int err;
77499 +       struct super_block *sb = sbi->sb;
77500 +       struct inode *inode;
77501 +       struct ntfs_inode *ni;
77502 +       struct MFT_REF ref;
77503 +       struct ATTRIB *attr;
77504 +       struct ATTR_LIST_ENTRY *le;
77505 +       u64 sds_size;
77506 +       size_t cnt, off;
77507 +       struct NTFS_DE *ne;
77508 +       struct NTFS_DE_SII *sii_e;
77509 +       struct ntfs_fnd *fnd_sii = NULL;
77510 +       const struct INDEX_ROOT *root_sii;
77511 +       const struct INDEX_ROOT *root_sdh;
77512 +       struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
77513 +       struct ntfs_index *indx_sii = &sbi->security.index_sii;
77515 +       ref.low = cpu_to_le32(MFT_REC_SECURE);
77516 +       ref.high = 0;
77517 +       ref.seq = cpu_to_le16(MFT_REC_SECURE);
77519 +       inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
77520 +       if (IS_ERR(inode)) {
77521 +               err = PTR_ERR(inode);
77522 +               ntfs_err(sb, "Failed to load $Secure.");
77523 +               inode = NULL;
77524 +               goto out;
77525 +       }
77527 +       ni = ntfs_i(inode);
77529 +       le = NULL;
77531 +       attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
77532 +                           ARRAY_SIZE(SDH_NAME), NULL, NULL);
77533 +       if (!attr) {
77534 +               err = -EINVAL;
77535 +               goto out;
77536 +       }
77538 +       root_sdh = resident_data(attr);
77539 +       if (root_sdh->type != ATTR_ZERO ||
77540 +           root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH) {
77541 +               err = -EINVAL;
77542 +               goto out;
77543 +       }
77545 +       err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
77546 +       if (err)
77547 +               goto out;
77549 +       attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
77550 +                           ARRAY_SIZE(SII_NAME), NULL, NULL);
77551 +       if (!attr) {
77552 +               err = -EINVAL;
77553 +               goto out;
77554 +       }
77556 +       root_sii = resident_data(attr);
77557 +       if (root_sii->type != ATTR_ZERO ||
77558 +           root_sii->rule != NTFS_COLLATION_TYPE_UINT) {
77559 +               err = -EINVAL;
77560 +               goto out;
77561 +       }
77563 +       err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
77564 +       if (err)
77565 +               goto out;
77567 +       fnd_sii = fnd_get();
77568 +       if (!fnd_sii) {
77569 +               err = -ENOMEM;
77570 +               goto out;
77571 +       }
77573 +       sds_size = inode->i_size;
77575 +       /* Find the last valid Id */
77576 +       sbi->security.next_id = SECURITY_ID_FIRST;
77577 +       /* Always write new security at the end of bucket */
77578 +       sbi->security.next_off =
77579 +               Quad2Align(sds_size - SecurityDescriptorsBlockSize);
77581 +       cnt = 0;
77582 +       off = 0;
77583 +       ne = NULL;
77585 +       for (;;) {
77586 +               u32 next_id;
77588 +               err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
77589 +               if (err || !ne)
77590 +                       break;
77592 +               sii_e = (struct NTFS_DE_SII *)ne;
77593 +               if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
77594 +                       continue;
77596 +               next_id = le32_to_cpu(sii_e->sec_id) + 1;
77597 +               if (next_id >= sbi->security.next_id)
77598 +                       sbi->security.next_id = next_id;
77600 +               cnt += 1;
77601 +       }
77603 +       sbi->security.ni = ni;
77604 +       inode = NULL;
77605 +out:
77606 +       iput(inode);
77607 +       fnd_put(fnd_sii);
77609 +       return err;
77613 + * ntfs_get_security_by_id
77614 + *
77615 + * reads security descriptor by id
77616 + */
77617 +int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
77618 +                           struct SECURITY_DESCRIPTOR_RELATIVE **sd,
77619 +                           size_t *size)
77621 +       int err;
77622 +       int diff;
77623 +       struct ntfs_inode *ni = sbi->security.ni;
77624 +       struct ntfs_index *indx = &sbi->security.index_sii;
77625 +       void *p = NULL;
77626 +       struct NTFS_DE_SII *sii_e;
77627 +       struct ntfs_fnd *fnd_sii;
77628 +       struct SECURITY_HDR d_security;
77629 +       const struct INDEX_ROOT *root_sii;
77630 +       u32 t32;
77632 +       *sd = NULL;
77634 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
77636 +       fnd_sii = fnd_get();
77637 +       if (!fnd_sii) {
77638 +               err = -ENOMEM;
77639 +               goto out;
77640 +       }
77642 +       root_sii = indx_get_root(indx, ni, NULL, NULL);
77643 +       if (!root_sii) {
77644 +               err = -EINVAL;
77645 +               goto out;
77646 +       }
77648 +       /* Try to find this SECURITY descriptor in SII indexes */
77649 +       err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
77650 +                       NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
77651 +       if (err)
77652 +               goto out;
77654 +       if (diff)
77655 +               goto out;
77657 +       t32 = le32_to_cpu(sii_e->sec_hdr.size);
77658 +       if (t32 < SIZEOF_SECURITY_HDR) {
77659 +               err = -EINVAL;
77660 +               goto out;
77661 +       }
77663 +       if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
77664 +               /*
77665 +                * looks like too big security. 0x10000 - is arbitrary big number
77666 +                */
77667 +               err = -EFBIG;
77668 +               goto out;
77669 +       }
77671 +       *size = t32 - SIZEOF_SECURITY_HDR;
77673 +       p = ntfs_malloc(*size);
77674 +       if (!p) {
77675 +               err = -ENOMEM;
77676 +               goto out;
77677 +       }
77679 +       err = ntfs_read_run_nb(sbi, &ni->file.run,
77680 +                              le64_to_cpu(sii_e->sec_hdr.off), &d_security,
77681 +                              sizeof(d_security), NULL);
77682 +       if (err)
77683 +               goto out;
77685 +       if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
77686 +               err = -EINVAL;
77687 +               goto out;
77688 +       }
77690 +       err = ntfs_read_run_nb(sbi, &ni->file.run,
77691 +                              le64_to_cpu(sii_e->sec_hdr.off) +
77692 +                                      SIZEOF_SECURITY_HDR,
77693 +                              p, *size, NULL);
77694 +       if (err)
77695 +               goto out;
77697 +       *sd = p;
77698 +       p = NULL;
77700 +out:
77701 +       ntfs_free(p);
77702 +       fnd_put(fnd_sii);
77703 +       ni_unlock(ni);
77705 +       return err;
77709 + * ntfs_insert_security
77710 + *
77711 + * inserts security descriptor into $Secure::SDS
77712 + *
77713 + * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
77714 + * and it contains a mirror copy of each security descriptor.  When writing
77715 + * to a security descriptor at location X, another copy will be written at
77716 + * location (X+256K).
77717 + * When writing a security descriptor that will cross the 256K boundary,
77718 + * the pointer will be advanced by 256K to skip
77719 + * over the mirror portion.
77720 + */
77721 +int ntfs_insert_security(struct ntfs_sb_info *sbi,
77722 +                        const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
77723 +                        u32 size_sd, __le32 *security_id, bool *inserted)
77725 +       int err, diff;
77726 +       struct ntfs_inode *ni = sbi->security.ni;
77727 +       struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
77728 +       struct ntfs_index *indx_sii = &sbi->security.index_sii;
77729 +       struct NTFS_DE_SDH *e;
77730 +       struct NTFS_DE_SDH sdh_e;
77731 +       struct NTFS_DE_SII sii_e;
77732 +       struct SECURITY_HDR *d_security;
77733 +       u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
77734 +       u32 aligned_sec_size = Quad2Align(new_sec_size);
77735 +       struct SECURITY_KEY hash_key;
77736 +       struct ntfs_fnd *fnd_sdh = NULL;
77737 +       const struct INDEX_ROOT *root_sdh;
77738 +       const struct INDEX_ROOT *root_sii;
77739 +       u64 mirr_off, new_sds_size;
77740 +       u32 next, left;
77742 +       static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
77743 +                     SecurityDescriptorsBlockSize);
77745 +       hash_key.hash = security_hash(sd, size_sd);
77746 +       hash_key.sec_id = SECURITY_ID_INVALID;
77748 +       if (inserted)
77749 +               *inserted = false;
77750 +       *security_id = SECURITY_ID_INVALID;
77752 +       /* Allocate a temporal buffer*/
77753 +       d_security = ntfs_zalloc(aligned_sec_size);
77754 +       if (!d_security)
77755 +               return -ENOMEM;
77757 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
77759 +       fnd_sdh = fnd_get();
77760 +       if (!fnd_sdh) {
77761 +               err = -ENOMEM;
77762 +               goto out;
77763 +       }
77765 +       root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
77766 +       if (!root_sdh) {
77767 +               err = -EINVAL;
77768 +               goto out;
77769 +       }
77771 +       root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
77772 +       if (!root_sii) {
77773 +               err = -EINVAL;
77774 +               goto out;
77775 +       }
77777 +       /*
77778 +        * Check if such security already exists
77779 +        * use "SDH" and hash -> to get the offset in "SDS"
77780 +        */
77781 +       err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
77782 +                       &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
77783 +                       fnd_sdh);
77784 +       if (err)
77785 +               goto out;
77787 +       while (e) {
77788 +               if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
77789 +                       err = ntfs_read_run_nb(sbi, &ni->file.run,
77790 +                                              le64_to_cpu(e->sec_hdr.off),
77791 +                                              d_security, new_sec_size, NULL);
77792 +                       if (err)
77793 +                               goto out;
77795 +                       if (le32_to_cpu(d_security->size) == new_sec_size &&
77796 +                           d_security->key.hash == hash_key.hash &&
77797 +                           !memcmp(d_security + 1, sd, size_sd)) {
77798 +                               *security_id = d_security->key.sec_id;
77799 +                               /*such security already exists*/
77800 +                               err = 0;
77801 +                               goto out;
77802 +                       }
77803 +               }
77805 +               err = indx_find_sort(indx_sdh, ni, root_sdh,
77806 +                                    (struct NTFS_DE **)&e, fnd_sdh);
77807 +               if (err)
77808 +                       goto out;
77810 +               if (!e || e->key.hash != hash_key.hash)
77811 +                       break;
77812 +       }
77814 +       /* Zero unused space */
77815 +       next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
77816 +       left = SecurityDescriptorsBlockSize - next;
77818 +       /* Zero gap until SecurityDescriptorsBlockSize */
77819 +       if (left < new_sec_size) {
77820 +               /* zero "left" bytes from sbi->security.next_off */
77821 +               sbi->security.next_off += SecurityDescriptorsBlockSize + left;
77822 +       }
77824 +       /* Zero tail of previous security */
77825 +       //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
77827 +       /*
77828 +        * Example:
77829 +        * 0x40438 == ni->vfs_inode.i_size
77830 +        * 0x00440 == sbi->security.next_off
77831 +        * need to zero [0x438-0x440)
77832 +        * if (next > used) {
77833 +        *  u32 tozero = next - used;
77834 +        *  zero "tozero" bytes from sbi->security.next_off - tozero
77835 +        */
77837 +       /* format new security descriptor */
77838 +       d_security->key.hash = hash_key.hash;
77839 +       d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
77840 +       d_security->off = cpu_to_le64(sbi->security.next_off);
77841 +       d_security->size = cpu_to_le32(new_sec_size);
77842 +       memcpy(d_security + 1, sd, size_sd);
77844 +       /* Write main SDS bucket */
77845 +       err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
77846 +                               d_security, aligned_sec_size);
77848 +       if (err)
77849 +               goto out;
77851 +       mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
77852 +       new_sds_size = mirr_off + aligned_sec_size;
77854 +       if (new_sds_size > ni->vfs_inode.i_size) {
77855 +               err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
77856 +                                   ARRAY_SIZE(SDS_NAME), &ni->file.run,
77857 +                                   new_sds_size, &new_sds_size, false, NULL);
77858 +               if (err)
77859 +                       goto out;
77860 +       }
77862 +       /* Write copy SDS bucket */
77863 +       err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
77864 +                               aligned_sec_size);
77865 +       if (err)
77866 +               goto out;
77868 +       /* Fill SII entry */
77869 +       sii_e.de.view.data_off =
77870 +               cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
77871 +       sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
77872 +       sii_e.de.view.res = 0;
77873 +       sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
77874 +       sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
77875 +       sii_e.de.flags = 0;
77876 +       sii_e.de.res = 0;
77877 +       sii_e.sec_id = d_security->key.sec_id;
77878 +       memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
77880 +       err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL);
77881 +       if (err)
77882 +               goto out;
77884 +       /* Fill SDH entry */
77885 +       sdh_e.de.view.data_off =
77886 +               cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
77887 +       sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
77888 +       sdh_e.de.view.res = 0;
77889 +       sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
77890 +       sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
77891 +       sdh_e.de.flags = 0;
77892 +       sdh_e.de.res = 0;
77893 +       sdh_e.key.hash = d_security->key.hash;
77894 +       sdh_e.key.sec_id = d_security->key.sec_id;
77895 +       memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
77896 +       sdh_e.magic[0] = cpu_to_le16('I');
77897 +       sdh_e.magic[1] = cpu_to_le16('I');
77899 +       fnd_clear(fnd_sdh);
77900 +       err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
77901 +                               fnd_sdh);
77902 +       if (err)
77903 +               goto out;
77905 +       *security_id = d_security->key.sec_id;
77906 +       if (inserted)
77907 +               *inserted = true;
77909 +       /* Update Id and offset for next descriptor */
77910 +       sbi->security.next_id += 1;
77911 +       sbi->security.next_off += aligned_sec_size;
77913 +out:
77914 +       fnd_put(fnd_sdh);
77915 +       mark_inode_dirty(&ni->vfs_inode);
77916 +       ni_unlock(ni);
77917 +       ntfs_free(d_security);
77919 +       return err;
77923 + * ntfs_reparse_init
77924 + *
77925 + * loads and parse $Extend/$Reparse
77926 + */
77927 +int ntfs_reparse_init(struct ntfs_sb_info *sbi)
77929 +       int err;
77930 +       struct ntfs_inode *ni = sbi->reparse.ni;
77931 +       struct ntfs_index *indx = &sbi->reparse.index_r;
77932 +       struct ATTRIB *attr;
77933 +       struct ATTR_LIST_ENTRY *le;
77934 +       const struct INDEX_ROOT *root_r;
77936 +       if (!ni)
77937 +               return 0;
77939 +       le = NULL;
77940 +       attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
77941 +                           ARRAY_SIZE(SR_NAME), NULL, NULL);
77942 +       if (!attr) {
77943 +               err = -EINVAL;
77944 +               goto out;
77945 +       }
77947 +       root_r = resident_data(attr);
77948 +       if (root_r->type != ATTR_ZERO ||
77949 +           root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
77950 +               err = -EINVAL;
77951 +               goto out;
77952 +       }
77954 +       err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
77955 +       if (err)
77956 +               goto out;
77958 +out:
77959 +       return err;
77963 + * ntfs_objid_init
77964 + *
77965 + * loads and parse $Extend/$ObjId
77966 + */
77967 +int ntfs_objid_init(struct ntfs_sb_info *sbi)
77969 +       int err;
77970 +       struct ntfs_inode *ni = sbi->objid.ni;
77971 +       struct ntfs_index *indx = &sbi->objid.index_o;
77972 +       struct ATTRIB *attr;
77973 +       struct ATTR_LIST_ENTRY *le;
77974 +       const struct INDEX_ROOT *root;
77976 +       if (!ni)
77977 +               return 0;
77979 +       le = NULL;
77980 +       attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
77981 +                           ARRAY_SIZE(SO_NAME), NULL, NULL);
77982 +       if (!attr) {
77983 +               err = -EINVAL;
77984 +               goto out;
77985 +       }
77987 +       root = resident_data(attr);
77988 +       if (root->type != ATTR_ZERO ||
77989 +           root->rule != NTFS_COLLATION_TYPE_UINTS) {
77990 +               err = -EINVAL;
77991 +               goto out;
77992 +       }
77994 +       err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
77995 +       if (err)
77996 +               goto out;
77998 +out:
77999 +       return err;
78002 +int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
78004 +       int err;
78005 +       struct ntfs_inode *ni = sbi->objid.ni;
78006 +       struct ntfs_index *indx = &sbi->objid.index_o;
78008 +       if (!ni)
78009 +               return -EINVAL;
78011 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
78013 +       err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
78015 +       mark_inode_dirty(&ni->vfs_inode);
78016 +       ni_unlock(ni);
78018 +       return err;
78021 +int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
78022 +                       const struct MFT_REF *ref)
78024 +       int err;
78025 +       struct ntfs_inode *ni = sbi->reparse.ni;
78026 +       struct ntfs_index *indx = &sbi->reparse.index_r;
78027 +       struct NTFS_DE_R re;
78029 +       if (!ni)
78030 +               return -EINVAL;
78032 +       memset(&re, 0, sizeof(re));
78034 +       re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
78035 +       re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
78036 +       re.de.key_size = cpu_to_le16(sizeof(re.key));
78038 +       re.key.ReparseTag = rtag;
78039 +       memcpy(&re.key.ref, ref, sizeof(*ref));
78041 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
78043 +       err = indx_insert_entry(indx, ni, &re.de, NULL, NULL);
78045 +       mark_inode_dirty(&ni->vfs_inode);
78046 +       ni_unlock(ni);
78048 +       return err;
78051 +int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
78052 +                       const struct MFT_REF *ref)
78054 +       int err, diff;
78055 +       struct ntfs_inode *ni = sbi->reparse.ni;
78056 +       struct ntfs_index *indx = &sbi->reparse.index_r;
78057 +       struct ntfs_fnd *fnd = NULL;
78058 +       struct REPARSE_KEY rkey;
78059 +       struct NTFS_DE_R *re;
78060 +       struct INDEX_ROOT *root_r;
78062 +       if (!ni)
78063 +               return -EINVAL;
78065 +       rkey.ReparseTag = rtag;
78066 +       rkey.ref = *ref;
78068 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
78070 +       if (rtag) {
78071 +               err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
78072 +               goto out1;
78073 +       }
78075 +       fnd = fnd_get();
78076 +       if (!fnd) {
78077 +               err = -ENOMEM;
78078 +               goto out1;
78079 +       }
78081 +       root_r = indx_get_root(indx, ni, NULL, NULL);
78082 +       if (!root_r) {
78083 +               err = -EINVAL;
78084 +               goto out;
78085 +       }
78087 +       /* 1 - forces to ignore rkey.ReparseTag when comparing keys */
78088 +       err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
78089 +                       (struct NTFS_DE **)&re, fnd);
78090 +       if (err)
78091 +               goto out;
78093 +       if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
78094 +               /* Impossible. Looks like volume corrupt?*/
78095 +               goto out;
78096 +       }
78098 +       memcpy(&rkey, &re->key, sizeof(rkey));
78100 +       fnd_put(fnd);
78101 +       fnd = NULL;
78103 +       err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
78104 +       if (err)
78105 +               goto out;
78107 +out:
78108 +       fnd_put(fnd);
78110 +out1:
78111 +       mark_inode_dirty(&ni->vfs_inode);
78112 +       ni_unlock(ni);
78114 +       return err;
78117 +static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
78118 +                                         CLST len)
78120 +       ntfs_unmap_meta(sbi->sb, lcn, len);
78121 +       ntfs_discard(sbi, lcn, len);
78124 +void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
78126 +       CLST end, i;
78127 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
78129 +       down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
78130 +       if (!wnd_is_used(wnd, lcn, len)) {
78131 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
78133 +               end = lcn + len;
78134 +               len = 0;
78135 +               for (i = lcn; i < end; i++) {
78136 +                       if (wnd_is_used(wnd, i, 1)) {
78137 +                               if (!len)
78138 +                                       lcn = i;
78139 +                               len += 1;
78140 +                               continue;
78141 +                       }
78143 +                       if (!len)
78144 +                               continue;
78146 +                       if (trim)
78147 +                               ntfs_unmap_and_discard(sbi, lcn, len);
78149 +                       wnd_set_free(wnd, lcn, len);
78150 +                       len = 0;
78151 +               }
78153 +               if (!len)
78154 +                       goto out;
78155 +       }
78157 +       if (trim)
78158 +               ntfs_unmap_and_discard(sbi, lcn, len);
78159 +       wnd_set_free(wnd, lcn, len);
78161 +out:
78162 +       up_write(&wnd->rw_lock);
78166 + * run_deallocate
78167 + *
78168 + * deallocate clusters
78169 + */
78170 +int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
78172 +       CLST lcn, len;
78173 +       size_t idx = 0;
78175 +       while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
78176 +               if (lcn == SPARSE_LCN)
78177 +                       continue;
78179 +               mark_as_free_ex(sbi, lcn, len, trim);
78180 +       }
78182 +       return 0;
78184 diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
78185 new file mode 100644
78186 index 000000000000..931a7241ef00
78187 --- /dev/null
78188 +++ b/fs/ntfs3/index.c
78189 @@ -0,0 +1,2641 @@
78190 +// SPDX-License-Identifier: GPL-2.0
78192 + *
78193 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
78194 + *
78195 + */
78197 +#include <linux/blkdev.h>
78198 +#include <linux/buffer_head.h>
78199 +#include <linux/fs.h>
78200 +#include <linux/nls.h>
78202 +#include "debug.h"
78203 +#include "ntfs.h"
78204 +#include "ntfs_fs.h"
78206 +static const struct INDEX_NAMES {
78207 +       const __le16 *name;
78208 +       u8 name_len;
78209 +} s_index_names[INDEX_MUTEX_TOTAL] = {
78210 +       { I30_NAME, ARRAY_SIZE(I30_NAME) }, { SII_NAME, ARRAY_SIZE(SII_NAME) },
78211 +       { SDH_NAME, ARRAY_SIZE(SDH_NAME) }, { SO_NAME, ARRAY_SIZE(SO_NAME) },
78212 +       { SQ_NAME, ARRAY_SIZE(SQ_NAME) },   { SR_NAME, ARRAY_SIZE(SR_NAME) },
78216 + * compare two names in index
78217 + * if l1 != 0
78218 + *   both names are little endian on-disk ATTR_FILE_NAME structs
78219 + * else
78220 + *   key1 - cpu_str, key2 - ATTR_FILE_NAME
78221 + */
78222 +static int cmp_fnames(const void *key1, size_t l1, const void *key2, size_t l2,
78223 +                     const void *data)
78225 +       const struct ATTR_FILE_NAME *f2 = key2;
78226 +       const struct ntfs_sb_info *sbi = data;
78227 +       const struct ATTR_FILE_NAME *f1;
78228 +       u16 fsize2;
78229 +       bool both_case;
78231 +       if (l2 <= offsetof(struct ATTR_FILE_NAME, name))
78232 +               return -1;
78234 +       fsize2 = fname_full_size(f2);
78235 +       if (l2 < fsize2)
78236 +               return -1;
78238 +       both_case = f2->type != FILE_NAME_DOS /*&& !sbi->options.nocase*/;
78239 +       if (!l1) {
78240 +               const struct le_str *s2 = (struct le_str *)&f2->name_len;
78242 +               /*
78243 +                * If names are equal (case insensitive)
78244 +                * try to compare it case sensitive
78245 +                */
78246 +               return ntfs_cmp_names_cpu(key1, s2, sbi->upcase, both_case);
78247 +       }
78249 +       f1 = key1;
78250 +       return ntfs_cmp_names(f1->name, f1->name_len, f2->name, f2->name_len,
78251 +                             sbi->upcase, both_case);
78254 +/* $SII of $Secure and $Q of Quota */
78255 +static int cmp_uint(const void *key1, size_t l1, const void *key2, size_t l2,
78256 +                   const void *data)
78258 +       const u32 *k1 = key1;
78259 +       const u32 *k2 = key2;
78261 +       if (l2 < sizeof(u32))
78262 +               return -1;
78264 +       if (*k1 < *k2)
78265 +               return -1;
78266 +       if (*k1 > *k2)
78267 +               return 1;
78268 +       return 0;
78271 +/* $SDH of $Secure */
78272 +static int cmp_sdh(const void *key1, size_t l1, const void *key2, size_t l2,
78273 +                  const void *data)
78275 +       const struct SECURITY_KEY *k1 = key1;
78276 +       const struct SECURITY_KEY *k2 = key2;
78277 +       u32 t1, t2;
78279 +       if (l2 < sizeof(struct SECURITY_KEY))
78280 +               return -1;
78282 +       t1 = le32_to_cpu(k1->hash);
78283 +       t2 = le32_to_cpu(k2->hash);
78285 +       /* First value is a hash value itself */
78286 +       if (t1 < t2)
78287 +               return -1;
78288 +       if (t1 > t2)
78289 +               return 1;
78291 +       /* Second value is security Id */
78292 +       if (data) {
78293 +               t1 = le32_to_cpu(k1->sec_id);
78294 +               t2 = le32_to_cpu(k2->sec_id);
78295 +               if (t1 < t2)
78296 +                       return -1;
78297 +               if (t1 > t2)
78298 +                       return 1;
78299 +       }
78301 +       return 0;
78304 +/* $O of ObjId and "$R" for Reparse */
78305 +static int cmp_uints(const void *key1, size_t l1, const void *key2, size_t l2,
78306 +                    const void *data)
78308 +       const __le32 *k1 = key1;
78309 +       const __le32 *k2 = key2;
78310 +       size_t count;
78312 +       if ((size_t)data == 1) {
78313 +               /*
78314 +                * ni_delete_all -> ntfs_remove_reparse -> delete all with this reference
78315 +                * k1, k2 - pointers to REPARSE_KEY
78316 +                */
78318 +               k1 += 1; // skip REPARSE_KEY.ReparseTag
78319 +               k2 += 1; // skip REPARSE_KEY.ReparseTag
78320 +               if (l2 <= sizeof(int))
78321 +                       return -1;
78322 +               l2 -= sizeof(int);
78323 +               if (l1 <= sizeof(int))
78324 +                       return 1;
78325 +               l1 -= sizeof(int);
78326 +       }
78328 +       if (l2 < sizeof(int))
78329 +               return -1;
78331 +       for (count = min(l1, l2) >> 2; count > 0; --count, ++k1, ++k2) {
78332 +               u32 t1 = le32_to_cpu(*k1);
78333 +               u32 t2 = le32_to_cpu(*k2);
78335 +               if (t1 > t2)
78336 +                       return 1;
78337 +               if (t1 < t2)
78338 +                       return -1;
78339 +       }
78341 +       if (l1 > l2)
78342 +               return 1;
78343 +       if (l1 < l2)
78344 +               return -1;
78346 +       return 0;
78349 +static inline NTFS_CMP_FUNC get_cmp_func(const struct INDEX_ROOT *root)
78351 +       switch (root->type) {
78352 +       case ATTR_NAME:
78353 +               if (root->rule == NTFS_COLLATION_TYPE_FILENAME)
78354 +                       return &cmp_fnames;
78355 +               break;
78356 +       case ATTR_ZERO:
78357 +               switch (root->rule) {
78358 +               case NTFS_COLLATION_TYPE_UINT:
78359 +                       return &cmp_uint;
78360 +               case NTFS_COLLATION_TYPE_SECURITY_HASH:
78361 +                       return &cmp_sdh;
78362 +               case NTFS_COLLATION_TYPE_UINTS:
78363 +                       return &cmp_uints;
78364 +               default:
78365 +                       break;
78366 +               }
78367 +       default:
78368 +               break;
78369 +       }
78371 +       return NULL;
78374 +struct bmp_buf {
78375 +       struct ATTRIB *b;
78376 +       struct mft_inode *mi;
78377 +       struct buffer_head *bh;
78378 +       ulong *buf;
78379 +       size_t bit;
78380 +       u32 nbits;
78381 +       u64 new_valid;
78384 +static int bmp_buf_get(struct ntfs_index *indx, struct ntfs_inode *ni,
78385 +                      size_t bit, struct bmp_buf *bbuf)
78387 +       struct ATTRIB *b;
78388 +       size_t data_size, valid_size, vbo, off = bit >> 3;
78389 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
78390 +       CLST vcn = off >> sbi->cluster_bits;
78391 +       struct ATTR_LIST_ENTRY *le = NULL;
78392 +       struct buffer_head *bh;
78393 +       struct super_block *sb;
78394 +       u32 blocksize;
78395 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
78397 +       bbuf->bh = NULL;
78399 +       b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len,
78400 +                        &vcn, &bbuf->mi);
78401 +       bbuf->b = b;
78402 +       if (!b)
78403 +               return -EINVAL;
78405 +       if (!b->non_res) {
78406 +               data_size = le32_to_cpu(b->res.data_size);
78408 +               if (off >= data_size)
78409 +                       return -EINVAL;
78411 +               bbuf->buf = (ulong *)resident_data(b);
78412 +               bbuf->bit = 0;
78413 +               bbuf->nbits = data_size * 8;
78415 +               return 0;
78416 +       }
78418 +       data_size = le64_to_cpu(b->nres.data_size);
78419 +       if (WARN_ON(off >= data_size)) {
78420 +               /* looks like filesystem error */
78421 +               return -EINVAL;
78422 +       }
78424 +       valid_size = le64_to_cpu(b->nres.valid_size);
78426 +       bh = ntfs_bread_run(sbi, &indx->bitmap_run, off);
78427 +       if (!bh)
78428 +               return -EIO;
78430 +       if (IS_ERR(bh))
78431 +               return PTR_ERR(bh);
78433 +       bbuf->bh = bh;
78435 +       if (buffer_locked(bh))
78436 +               __wait_on_buffer(bh);
78438 +       lock_buffer(bh);
78440 +       sb = sbi->sb;
78441 +       blocksize = sb->s_blocksize;
78443 +       vbo = off & ~(size_t)sbi->block_mask;
78445 +       bbuf->new_valid = vbo + blocksize;
78446 +       if (bbuf->new_valid <= valid_size)
78447 +               bbuf->new_valid = 0;
78448 +       else if (bbuf->new_valid > data_size)
78449 +               bbuf->new_valid = data_size;
78451 +       if (vbo >= valid_size) {
78452 +               memset(bh->b_data, 0, blocksize);
78453 +       } else if (vbo + blocksize > valid_size) {
78454 +               u32 voff = valid_size & sbi->block_mask;
78456 +               memset(bh->b_data + voff, 0, blocksize - voff);
78457 +       }
78459 +       bbuf->buf = (ulong *)bh->b_data;
78460 +       bbuf->bit = 8 * (off & ~(size_t)sbi->block_mask);
78461 +       bbuf->nbits = 8 * blocksize;
78463 +       return 0;
78466 +static void bmp_buf_put(struct bmp_buf *bbuf, bool dirty)
78468 +       struct buffer_head *bh = bbuf->bh;
78469 +       struct ATTRIB *b = bbuf->b;
78471 +       if (!bh) {
78472 +               if (b && !b->non_res && dirty)
78473 +                       bbuf->mi->dirty = true;
78474 +               return;
78475 +       }
78477 +       if (!dirty)
78478 +               goto out;
78480 +       if (bbuf->new_valid) {
78481 +               b->nres.valid_size = cpu_to_le64(bbuf->new_valid);
78482 +               bbuf->mi->dirty = true;
78483 +       }
78485 +       set_buffer_uptodate(bh);
78486 +       mark_buffer_dirty(bh);
78488 +out:
78489 +       unlock_buffer(bh);
78490 +       put_bh(bh);
78494 + * indx_mark_used
78495 + *
78496 + * marks the bit 'bit' as used
78497 + */
78498 +static int indx_mark_used(struct ntfs_index *indx, struct ntfs_inode *ni,
78499 +                         size_t bit)
78501 +       int err;
78502 +       struct bmp_buf bbuf;
78504 +       err = bmp_buf_get(indx, ni, bit, &bbuf);
78505 +       if (err)
78506 +               return err;
78508 +       __set_bit(bit - bbuf.bit, bbuf.buf);
78510 +       bmp_buf_put(&bbuf, true);
78512 +       return 0;
78516 + * indx_mark_free
78517 + *
78518 + * the bit 'bit' as free
78519 + */
78520 +static int indx_mark_free(struct ntfs_index *indx, struct ntfs_inode *ni,
78521 +                         size_t bit)
78523 +       int err;
78524 +       struct bmp_buf bbuf;
78526 +       err = bmp_buf_get(indx, ni, bit, &bbuf);
78527 +       if (err)
78528 +               return err;
78530 +       __clear_bit(bit - bbuf.bit, bbuf.buf);
78532 +       bmp_buf_put(&bbuf, true);
78534 +       return 0;
78538 + * if ntfs_readdir calls this function (indx_used_bit -> scan_nres_bitmap),
78539 + * inode is shared locked and no ni_lock
78540 + * use rw_semaphore for read/write access to bitmap_run
78541 + */
78542 +static int scan_nres_bitmap(struct ntfs_inode *ni, struct ATTRIB *bitmap,
78543 +                           struct ntfs_index *indx, size_t from,
78544 +                           bool (*fn)(const ulong *buf, u32 bit, u32 bits,
78545 +                                      size_t *ret),
78546 +                           size_t *ret)
78548 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
78549 +       struct super_block *sb = sbi->sb;
78550 +       struct runs_tree *run = &indx->bitmap_run;
78551 +       struct rw_semaphore *lock = &indx->run_lock;
78552 +       u32 nbits = sb->s_blocksize * 8;
78553 +       u32 blocksize = sb->s_blocksize;
78554 +       u64 valid_size = le64_to_cpu(bitmap->nres.valid_size);
78555 +       u64 data_size = le64_to_cpu(bitmap->nres.data_size);
78556 +       sector_t eblock = bytes_to_block(sb, data_size);
78557 +       size_t vbo = from >> 3;
78558 +       sector_t blk = (vbo & sbi->cluster_mask) >> sb->s_blocksize_bits;
78559 +       sector_t vblock = vbo >> sb->s_blocksize_bits;
78560 +       sector_t blen, block;
78561 +       CLST lcn, clen, vcn, vcn_next;
78562 +       size_t idx;
78563 +       struct buffer_head *bh;
78564 +       bool ok;
78566 +       *ret = MINUS_ONE_T;
78568 +       if (vblock >= eblock)
78569 +               return 0;
78571 +       from &= nbits - 1;
78572 +       vcn = vbo >> sbi->cluster_bits;
78574 +       down_read(lock);
78575 +       ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
78576 +       up_read(lock);
78578 +next_run:
78579 +       if (!ok) {
78580 +               int err;
78581 +               const struct INDEX_NAMES *name = &s_index_names[indx->type];
78583 +               down_write(lock);
78584 +               err = attr_load_runs_vcn(ni, ATTR_BITMAP, name->name,
78585 +                                        name->name_len, run, vcn);
78586 +               up_write(lock);
78587 +               if (err)
78588 +                       return err;
78589 +               down_read(lock);
78590 +               ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
78591 +               up_read(lock);
78592 +               if (!ok)
78593 +                       return -EINVAL;
78594 +       }
78596 +       blen = (sector_t)clen * sbi->blocks_per_cluster;
78597 +       block = (sector_t)lcn * sbi->blocks_per_cluster;
78599 +       for (; blk < blen; blk++, from = 0) {
78600 +               bh = ntfs_bread(sb, block + blk);
78601 +               if (!bh)
78602 +                       return -EIO;
78604 +               vbo = (u64)vblock << sb->s_blocksize_bits;
78605 +               if (vbo >= valid_size) {
78606 +                       memset(bh->b_data, 0, blocksize);
78607 +               } else if (vbo + blocksize > valid_size) {
78608 +                       u32 voff = valid_size & sbi->block_mask;
78610 +                       memset(bh->b_data + voff, 0, blocksize - voff);
78611 +               }
78613 +               if (vbo + blocksize > data_size)
78614 +                       nbits = 8 * (data_size - vbo);
78616 +               ok = nbits > from ? (*fn)((ulong *)bh->b_data, from, nbits, ret)
78617 +                                 : false;
78618 +               put_bh(bh);
78620 +               if (ok) {
78621 +                       *ret += 8 * vbo;
78622 +                       return 0;
78623 +               }
78625 +               if (++vblock >= eblock) {
78626 +                       *ret = MINUS_ONE_T;
78627 +                       return 0;
78628 +               }
78629 +       }
78630 +       blk = 0;
78631 +       vcn_next = vcn + clen;
78632 +       down_read(lock);
78633 +       ok = run_get_entry(run, ++idx, &vcn, &lcn, &clen) && vcn == vcn_next;
78634 +       if (!ok)
78635 +               vcn = vcn_next;
78636 +       up_read(lock);
78637 +       goto next_run;
78640 +static bool scan_for_free(const ulong *buf, u32 bit, u32 bits, size_t *ret)
78642 +       size_t pos = find_next_zero_bit(buf, bits, bit);
78644 +       if (pos >= bits)
78645 +               return false;
78646 +       *ret = pos;
78647 +       return true;
78651 + * indx_find_free
78652 + *
78653 + * looks for free bit
78654 + * returns -1 if no free bits
78655 + */
78656 +static int indx_find_free(struct ntfs_index *indx, struct ntfs_inode *ni,
78657 +                         size_t *bit, struct ATTRIB **bitmap)
78659 +       struct ATTRIB *b;
78660 +       struct ATTR_LIST_ENTRY *le = NULL;
78661 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
78662 +       int err;
78664 +       b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len,
78665 +                        NULL, NULL);
78667 +       if (!b)
78668 +               return -ENOENT;
78670 +       *bitmap = b;
78671 +       *bit = MINUS_ONE_T;
78673 +       if (!b->non_res) {
78674 +               u32 nbits = 8 * le32_to_cpu(b->res.data_size);
78675 +               size_t pos = find_next_zero_bit(resident_data(b), nbits, 0);
78677 +               if (pos < nbits)
78678 +                       *bit = pos;
78679 +       } else {
78680 +               err = scan_nres_bitmap(ni, b, indx, 0, &scan_for_free, bit);
78682 +               if (err)
78683 +                       return err;
78684 +       }
78686 +       return 0;
78689 +static bool scan_for_used(const ulong *buf, u32 bit, u32 bits, size_t *ret)
78691 +       size_t pos = find_next_bit(buf, bits, bit);
78693 +       if (pos >= bits)
78694 +               return false;
78695 +       *ret = pos;
78696 +       return true;
78700 + * indx_used_bit
78701 + *
78702 + * looks for used bit
78703 + * returns MINUS_ONE_T if no used bits
78704 + */
78705 +int indx_used_bit(struct ntfs_index *indx, struct ntfs_inode *ni, size_t *bit)
78707 +       struct ATTRIB *b;
78708 +       struct ATTR_LIST_ENTRY *le = NULL;
78709 +       size_t from = *bit;
78710 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
78711 +       int err;
78713 +       b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len,
78714 +                        NULL, NULL);
78716 +       if (!b)
78717 +               return -ENOENT;
78719 +       *bit = MINUS_ONE_T;
78721 +       if (!b->non_res) {
78722 +               u32 nbits = le32_to_cpu(b->res.data_size) * 8;
78723 +               size_t pos = find_next_bit(resident_data(b), nbits, from);
78725 +               if (pos < nbits)
78726 +                       *bit = pos;
78727 +       } else {
78728 +               err = scan_nres_bitmap(ni, b, indx, from, &scan_for_used, bit);
78729 +               if (err)
78730 +                       return err;
78731 +       }
78733 +       return 0;
78737 + * hdr_find_split
78738 + *
78739 + * finds a point at which the index allocation buffer would like to
78740 + * be split.
78741 + * NOTE: This function should never return 'END' entry NULL returns on error
78742 + */
78743 +static const struct NTFS_DE *hdr_find_split(const struct INDEX_HDR *hdr)
78745 +       size_t o;
78746 +       const struct NTFS_DE *e = hdr_first_de(hdr);
78747 +       u32 used_2 = le32_to_cpu(hdr->used) >> 1;
78748 +       u16 esize = le16_to_cpu(e->size);
78750 +       if (!e || de_is_last(e))
78751 +               return NULL;
78753 +       for (o = le32_to_cpu(hdr->de_off) + esize; o < used_2; o += esize) {
78754 +               const struct NTFS_DE *p = e;
78756 +               e = Add2Ptr(hdr, o);
78758 +               /* We must not return END entry */
78759 +               if (de_is_last(e))
78760 +                       return p;
78762 +               esize = le16_to_cpu(e->size);
78763 +       }
78765 +       return e;
78769 + * hdr_insert_head
78770 + *
78771 + * inserts some entries at the beginning of the buffer.
78772 + * It is used to insert entries into a newly-created buffer.
78773 + */
78774 +static const struct NTFS_DE *hdr_insert_head(struct INDEX_HDR *hdr,
78775 +                                            const void *ins, u32 ins_bytes)
78777 +       u32 to_move;
78778 +       struct NTFS_DE *e = hdr_first_de(hdr);
78779 +       u32 used = le32_to_cpu(hdr->used);
78781 +       if (!e)
78782 +               return NULL;
78784 +       /* Now we just make room for the inserted entries and jam it in. */
78785 +       to_move = used - le32_to_cpu(hdr->de_off);
78786 +       memmove(Add2Ptr(e, ins_bytes), e, to_move);
78787 +       memcpy(e, ins, ins_bytes);
78788 +       hdr->used = cpu_to_le32(used + ins_bytes);
78790 +       return e;
78793 +void fnd_clear(struct ntfs_fnd *fnd)
78795 +       int i;
78797 +       for (i = 0; i < fnd->level; i++) {
78798 +               struct indx_node *n = fnd->nodes[i];
78800 +               if (!n)
78801 +                       continue;
78803 +               put_indx_node(n);
78804 +               fnd->nodes[i] = NULL;
78805 +       }
78806 +       fnd->level = 0;
78807 +       fnd->root_de = NULL;
78810 +static int fnd_push(struct ntfs_fnd *fnd, struct indx_node *n,
78811 +                   struct NTFS_DE *e)
78813 +       int i;
78815 +       i = fnd->level;
78816 +       if (i < 0 || i >= ARRAY_SIZE(fnd->nodes))
78817 +               return -EINVAL;
78818 +       fnd->nodes[i] = n;
78819 +       fnd->de[i] = e;
78820 +       fnd->level += 1;
78821 +       return 0;
78824 +static struct indx_node *fnd_pop(struct ntfs_fnd *fnd)
78826 +       struct indx_node *n;
78827 +       int i = fnd->level;
78829 +       i -= 1;
78830 +       n = fnd->nodes[i];
78831 +       fnd->nodes[i] = NULL;
78832 +       fnd->level = i;
78834 +       return n;
78837 +static bool fnd_is_empty(struct ntfs_fnd *fnd)
78839 +       if (!fnd->level)
78840 +               return !fnd->root_de;
78842 +       return !fnd->de[fnd->level - 1];
78846 + * hdr_find_e
78847 + *
78848 + * locates an entry the index buffer.
78849 + * If no matching entry is found, it returns the first entry which is greater
78850 + * than the desired entry If the search key is greater than all the entries the
78851 + * buffer, it returns the 'end' entry. This function does a binary search of the
78852 + * current index buffer, for the first entry that is <= to the search value
78853 + * Returns NULL if error
78854 + */
78855 +static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx,
78856 +                                 const struct INDEX_HDR *hdr, const void *key,
78857 +                                 size_t key_len, const void *ctx, int *diff)
78859 +       struct NTFS_DE *e;
78860 +       NTFS_CMP_FUNC cmp = indx->cmp;
78861 +       u32 e_size, e_key_len;
78862 +       u32 end = le32_to_cpu(hdr->used);
78863 +       u32 off = le32_to_cpu(hdr->de_off);
78865 +#ifdef NTFS3_INDEX_BINARY_SEARCH
78866 +       int max_idx = 0, fnd, min_idx;
78867 +       int nslots = 64;
78868 +       u16 *offs;
78870 +       if (end > 0x10000)
78871 +               goto next;
78873 +       offs = ntfs_malloc(sizeof(u16) * nslots);
78874 +       if (!offs)
78875 +               goto next;
78877 +       /* use binary search algorithm */
78878 +next1:
78879 +       if (off + sizeof(struct NTFS_DE) > end) {
78880 +               e = NULL;
78881 +               goto out1;
78882 +       }
78883 +       e = Add2Ptr(hdr, off);
78884 +       e_size = le16_to_cpu(e->size);
78886 +       if (e_size < sizeof(struct NTFS_DE) || off + e_size > end) {
78887 +               e = NULL;
78888 +               goto out1;
78889 +       }
78891 +       if (max_idx >= nslots) {
78892 +               u16 *ptr;
78893 +               int new_slots = QuadAlign(2 * nslots);
78895 +               ptr = ntfs_malloc(sizeof(u16) * new_slots);
78896 +               if (ptr)
78897 +                       memcpy(ptr, offs, sizeof(u16) * max_idx);
78898 +               ntfs_free(offs);
78899 +               offs = ptr;
78900 +               nslots = new_slots;
78901 +               if (!ptr)
78902 +                       goto next;
78903 +       }
78905 +       /* Store entry table */
78906 +       offs[max_idx] = off;
78908 +       if (!de_is_last(e)) {
78909 +               off += e_size;
78910 +               max_idx += 1;
78911 +               goto next1;
78912 +       }
78914 +       /*
78915 +        * Table of pointers is created
78916 +        * Use binary search to find entry that is <= to the search value
78917 +        */
78918 +       fnd = -1;
78919 +       min_idx = 0;
78921 +       while (min_idx <= max_idx) {
78922 +               int mid_idx = min_idx + ((max_idx - min_idx) >> 1);
78923 +               int diff2;
78925 +               e = Add2Ptr(hdr, offs[mid_idx]);
78927 +               e_key_len = le16_to_cpu(e->key_size);
78929 +               diff2 = (*cmp)(key, key_len, e + 1, e_key_len, ctx);
78931 +               if (!diff2) {
78932 +                       *diff = 0;
78933 +                       goto out1;
78934 +               }
78936 +               if (diff2 < 0) {
78937 +                       max_idx = mid_idx - 1;
78938 +                       fnd = mid_idx;
78939 +                       if (!fnd)
78940 +                               break;
78941 +               } else {
78942 +                       min_idx = mid_idx + 1;
78943 +               }
78944 +       }
78946 +       if (fnd == -1) {
78947 +               e = NULL;
78948 +               goto out1;
78949 +       }
78951 +       *diff = -1;
78952 +       e = Add2Ptr(hdr, offs[fnd]);
78954 +out1:
78955 +       ntfs_free(offs);
78957 +       return e;
78958 +#endif
78960 +next:
78961 +       /*
78962 +        * Entries index are sorted
78963 +        * Enumerate all entries until we find entry that is <= to the search value
78964 +        */
78965 +       if (off + sizeof(struct NTFS_DE) > end)
78966 +               return NULL;
78968 +       e = Add2Ptr(hdr, off);
78969 +       e_size = le16_to_cpu(e->size);
78971 +       if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
78972 +               return NULL;
78974 +       off += e_size;
78976 +       e_key_len = le16_to_cpu(e->key_size);
78978 +       *diff = (*cmp)(key, key_len, e + 1, e_key_len, ctx);
78979 +       if (!*diff)
78980 +               return e;
78982 +       if (*diff <= 0)
78983 +               return e;
78985 +       if (de_is_last(e)) {
78986 +               *diff = 1;
78987 +               return e;
78988 +       }
78989 +       goto next;
78993 + * hdr_insert_de
78994 + *
78995 + * inserts an index entry into the buffer.
78996 + * 'before' should be a pointer previously returned from hdr_find_e
78997 + */
78998 +static struct NTFS_DE *hdr_insert_de(const struct ntfs_index *indx,
78999 +                                    struct INDEX_HDR *hdr,
79000 +                                    const struct NTFS_DE *de,
79001 +                                    struct NTFS_DE *before, const void *ctx)
79003 +       int diff;
79004 +       size_t off = PtrOffset(hdr, before);
79005 +       u32 used = le32_to_cpu(hdr->used);
79006 +       u32 total = le32_to_cpu(hdr->total);
79007 +       u16 de_size = le16_to_cpu(de->size);
79009 +       /* First, check to see if there's enough room */
79010 +       if (used + de_size > total)
79011 +               return NULL;
79013 +       /* We know there's enough space, so we know we'll succeed. */
79014 +       if (before) {
79015 +               /* Check that before is inside Index */
79016 +               if (off >= used || off < le32_to_cpu(hdr->de_off) ||
79017 +                   off + le16_to_cpu(before->size) > total) {
79018 +                       return NULL;
79019 +               }
79020 +               goto ok;
79021 +       }
79022 +       /* No insert point is applied. Get it manually */
79023 +       before = hdr_find_e(indx, hdr, de + 1, le16_to_cpu(de->key_size), ctx,
79024 +                           &diff);
79025 +       if (!before)
79026 +               return NULL;
79027 +       off = PtrOffset(hdr, before);
79029 +ok:
79030 +       /* Now we just make room for the entry and jam it in. */
79031 +       memmove(Add2Ptr(before, de_size), before, used - off);
79033 +       hdr->used = cpu_to_le32(used + de_size);
79034 +       memcpy(before, de, de_size);
79036 +       return before;
79040 + * hdr_delete_de
79041 + *
79042 + * removes an entry from the index buffer
79043 + */
79044 +static inline struct NTFS_DE *hdr_delete_de(struct INDEX_HDR *hdr,
79045 +                                           struct NTFS_DE *re)
79047 +       u32 used = le32_to_cpu(hdr->used);
79048 +       u16 esize = le16_to_cpu(re->size);
79049 +       u32 off = PtrOffset(hdr, re);
79050 +       int bytes = used - (off + esize);
79052 +       if (off >= used || esize < sizeof(struct NTFS_DE) ||
79053 +           bytes < sizeof(struct NTFS_DE))
79054 +               return NULL;
79056 +       hdr->used = cpu_to_le32(used - esize);
79057 +       memmove(re, Add2Ptr(re, esize), bytes);
79059 +       return re;
79062 +void indx_clear(struct ntfs_index *indx)
79064 +       run_close(&indx->alloc_run);
79065 +       run_close(&indx->bitmap_run);
79068 +int indx_init(struct ntfs_index *indx, struct ntfs_sb_info *sbi,
79069 +             const struct ATTRIB *attr, enum index_mutex_classed type)
79071 +       u32 t32;
79072 +       const struct INDEX_ROOT *root = resident_data(attr);
79074 +       /* Check root fields */
79075 +       if (!root->index_block_clst)
79076 +               return -EINVAL;
79078 +       indx->type = type;
79079 +       indx->idx2vbn_bits = __ffs(root->index_block_clst);
79081 +       t32 = le32_to_cpu(root->index_block_size);
79082 +       indx->index_bits = blksize_bits(t32);
79084 +       /* Check index record size */
79085 +       if (t32 < sbi->cluster_size) {
79086 +               /* index record is smaller than a cluster, use 512 blocks */
79087 +               if (t32 != root->index_block_clst * SECTOR_SIZE)
79088 +                       return -EINVAL;
79090 +               /* Check alignment to a cluster */
79091 +               if ((sbi->cluster_size >> SECTOR_SHIFT) &
79092 +                   (root->index_block_clst - 1)) {
79093 +                       return -EINVAL;
79094 +               }
79096 +               indx->vbn2vbo_bits = SECTOR_SHIFT;
79097 +       } else {
79098 +               /* index record must be a multiple of cluster size */
79099 +               if (t32 != root->index_block_clst << sbi->cluster_bits)
79100 +                       return -EINVAL;
79102 +               indx->vbn2vbo_bits = sbi->cluster_bits;
79103 +       }
79105 +       init_rwsem(&indx->run_lock);
79107 +       indx->cmp = get_cmp_func(root);
79108 +       return indx->cmp ? 0 : -EINVAL;
79111 +static struct indx_node *indx_new(struct ntfs_index *indx,
79112 +                                 struct ntfs_inode *ni, CLST vbn,
79113 +                                 const __le64 *sub_vbn)
79115 +       int err;
79116 +       struct NTFS_DE *e;
79117 +       struct indx_node *r;
79118 +       struct INDEX_HDR *hdr;
79119 +       struct INDEX_BUFFER *index;
79120 +       u64 vbo = (u64)vbn << indx->vbn2vbo_bits;
79121 +       u32 bytes = 1u << indx->index_bits;
79122 +       u16 fn;
79123 +       u32 eo;
79125 +       r = ntfs_zalloc(sizeof(struct indx_node));
79126 +       if (!r)
79127 +               return ERR_PTR(-ENOMEM);
79129 +       index = ntfs_zalloc(bytes);
79130 +       if (!index) {
79131 +               ntfs_free(r);
79132 +               return ERR_PTR(-ENOMEM);
79133 +       }
79135 +       err = ntfs_get_bh(ni->mi.sbi, &indx->alloc_run, vbo, bytes, &r->nb);
79137 +       if (err) {
79138 +               ntfs_free(index);
79139 +               ntfs_free(r);
79140 +               return ERR_PTR(err);
79141 +       }
79143 +       /* Create header */
79144 +       index->rhdr.sign = NTFS_INDX_SIGNATURE;
79145 +       index->rhdr.fix_off = cpu_to_le16(sizeof(struct INDEX_BUFFER)); // 0x28
79146 +       fn = (bytes >> SECTOR_SHIFT) + 1; // 9
79147 +       index->rhdr.fix_num = cpu_to_le16(fn);
79148 +       index->vbn = cpu_to_le64(vbn);
79149 +       hdr = &index->ihdr;
79150 +       eo = QuadAlign(sizeof(struct INDEX_BUFFER) + fn * sizeof(short));
79151 +       hdr->de_off = cpu_to_le32(eo);
79153 +       e = Add2Ptr(hdr, eo);
79155 +       if (sub_vbn) {
79156 +               e->flags = NTFS_IE_LAST | NTFS_IE_HAS_SUBNODES;
79157 +               e->size = cpu_to_le16(sizeof(struct NTFS_DE) + sizeof(u64));
79158 +               hdr->used =
79159 +                       cpu_to_le32(eo + sizeof(struct NTFS_DE) + sizeof(u64));
79160 +               de_set_vbn_le(e, *sub_vbn);
79161 +               hdr->flags = 1;
79162 +       } else {
79163 +               e->size = cpu_to_le16(sizeof(struct NTFS_DE));
79164 +               hdr->used = cpu_to_le32(eo + sizeof(struct NTFS_DE));
79165 +               e->flags = NTFS_IE_LAST;
79166 +       }
79168 +       hdr->total = cpu_to_le32(bytes - offsetof(struct INDEX_BUFFER, ihdr));
79170 +       r->index = index;
79171 +       return r;
79174 +struct INDEX_ROOT *indx_get_root(struct ntfs_index *indx, struct ntfs_inode *ni,
79175 +                                struct ATTRIB **attr, struct mft_inode **mi)
79177 +       struct ATTR_LIST_ENTRY *le = NULL;
79178 +       struct ATTRIB *a;
79179 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
79181 +       a = ni_find_attr(ni, NULL, &le, ATTR_ROOT, in->name, in->name_len, NULL,
79182 +                        mi);
79183 +       if (!a)
79184 +               return NULL;
79186 +       if (attr)
79187 +               *attr = a;
79189 +       return resident_data_ex(a, sizeof(struct INDEX_ROOT));
79192 +static int indx_write(struct ntfs_index *indx, struct ntfs_inode *ni,
79193 +                     struct indx_node *node, int sync)
79195 +       struct INDEX_BUFFER *ib = node->index;
79197 +       return ntfs_write_bh(ni->mi.sbi, &ib->rhdr, &node->nb, sync);
79201 + * if ntfs_readdir calls this function
79202 + * inode is shared locked and no ni_lock
79203 + * use rw_semaphore for read/write access to alloc_run
79204 + */
79205 +int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn,
79206 +             struct indx_node **node)
79208 +       int err;
79209 +       struct INDEX_BUFFER *ib;
79210 +       struct runs_tree *run = &indx->alloc_run;
79211 +       struct rw_semaphore *lock = &indx->run_lock;
79212 +       u64 vbo = (u64)vbn << indx->vbn2vbo_bits;
79213 +       u32 bytes = 1u << indx->index_bits;
79214 +       struct indx_node *in = *node;
79215 +       const struct INDEX_NAMES *name;
79217 +       if (!in) {
79218 +               in = ntfs_zalloc(sizeof(struct indx_node));
79219 +               if (!in)
79220 +                       return -ENOMEM;
79221 +       } else {
79222 +               nb_put(&in->nb);
79223 +       }
79225 +       ib = in->index;
79226 +       if (!ib) {
79227 +               ib = ntfs_malloc(bytes);
79228 +               if (!ib) {
79229 +                       err = -ENOMEM;
79230 +                       goto out;
79231 +               }
79232 +       }
79234 +       down_read(lock);
79235 +       err = ntfs_read_bh(ni->mi.sbi, run, vbo, &ib->rhdr, bytes, &in->nb);
79236 +       up_read(lock);
79237 +       if (!err)
79238 +               goto ok;
79240 +       if (err == -E_NTFS_FIXUP)
79241 +               goto ok;
79243 +       if (err != -ENOENT)
79244 +               goto out;
79246 +       name = &s_index_names[indx->type];
79247 +       down_write(lock);
79248 +       err = attr_load_runs_range(ni, ATTR_ALLOC, name->name, name->name_len,
79249 +                                  run, vbo, vbo + bytes);
79250 +       up_write(lock);
79251 +       if (err)
79252 +               goto out;
79254 +       down_read(lock);
79255 +       err = ntfs_read_bh(ni->mi.sbi, run, vbo, &ib->rhdr, bytes, &in->nb);
79256 +       up_read(lock);
79257 +       if (err == -E_NTFS_FIXUP)
79258 +               goto ok;
79260 +       if (err)
79261 +               goto out;
79263 +ok:
79264 +       if (err == -E_NTFS_FIXUP) {
79265 +               ntfs_write_bh(ni->mi.sbi, &ib->rhdr, &in->nb, 0);
79266 +               err = 0;
79267 +       }
79269 +       in->index = ib;
79270 +       *node = in;
79272 +out:
79273 +       if (ib != in->index)
79274 +               ntfs_free(ib);
79276 +       if (*node != in) {
79277 +               nb_put(&in->nb);
79278 +               ntfs_free(in);
79279 +       }
79281 +       return err;
79285 + * indx_find
79286 + *
79287 + * scans NTFS directory for given entry
79288 + */
79289 +int indx_find(struct ntfs_index *indx, struct ntfs_inode *ni,
79290 +             const struct INDEX_ROOT *root, const void *key, size_t key_len,
79291 +             const void *ctx, int *diff, struct NTFS_DE **entry,
79292 +             struct ntfs_fnd *fnd)
79294 +       int err;
79295 +       struct NTFS_DE *e;
79296 +       const struct INDEX_HDR *hdr;
79297 +       struct indx_node *node;
79299 +       if (!root)
79300 +               root = indx_get_root(&ni->dir, ni, NULL, NULL);
79302 +       if (!root) {
79303 +               err = -EINVAL;
79304 +               goto out;
79305 +       }
79307 +       hdr = &root->ihdr;
79309 +       /* Check cache */
79310 +       e = fnd->level ? fnd->de[fnd->level - 1] : fnd->root_de;
79311 +       if (e && !de_is_last(e) &&
79312 +           !(*indx->cmp)(key, key_len, e + 1, le16_to_cpu(e->key_size), ctx)) {
79313 +               *entry = e;
79314 +               *diff = 0;
79315 +               return 0;
79316 +       }
79318 +       /* Soft finder reset */
79319 +       fnd_clear(fnd);
79321 +       /* Lookup entry that is <= to the search value */
79322 +       e = hdr_find_e(indx, hdr, key, key_len, ctx, diff);
79323 +       if (!e)
79324 +               return -EINVAL;
79326 +       if (fnd)
79327 +               fnd->root_de = e;
79329 +       err = 0;
79331 +       for (;;) {
79332 +               node = NULL;
79333 +               if (*diff >= 0 || !de_has_vcn_ex(e)) {
79334 +                       *entry = e;
79335 +                       goto out;
79336 +               }
79338 +               /* Read next level. */
79339 +               err = indx_read(indx, ni, de_get_vbn(e), &node);
79340 +               if (err)
79341 +                       goto out;
79343 +               /* Lookup entry that is <= to the search value */
79344 +               e = hdr_find_e(indx, &node->index->ihdr, key, key_len, ctx,
79345 +                              diff);
79346 +               if (!e) {
79347 +                       err = -EINVAL;
79348 +                       put_indx_node(node);
79349 +                       goto out;
79350 +               }
79352 +               fnd_push(fnd, node, e);
79353 +       }
79355 +out:
79356 +       return err;
79359 +int indx_find_sort(struct ntfs_index *indx, struct ntfs_inode *ni,
79360 +                  const struct INDEX_ROOT *root, struct NTFS_DE **entry,
79361 +                  struct ntfs_fnd *fnd)
79363 +       int err;
79364 +       struct indx_node *n = NULL;
79365 +       struct NTFS_DE *e;
79366 +       size_t iter = 0;
79367 +       int level = fnd->level;
79369 +       if (!*entry) {
79370 +               /* Start find */
79371 +               e = hdr_first_de(&root->ihdr);
79372 +               if (!e)
79373 +                       return 0;
79374 +               fnd_clear(fnd);
79375 +               fnd->root_de = e;
79376 +       } else if (!level) {
79377 +               if (de_is_last(fnd->root_de)) {
79378 +                       *entry = NULL;
79379 +                       return 0;
79380 +               }
79382 +               e = hdr_next_de(&root->ihdr, fnd->root_de);
79383 +               if (!e)
79384 +                       return -EINVAL;
79385 +               fnd->root_de = e;
79386 +       } else {
79387 +               n = fnd->nodes[level - 1];
79388 +               e = fnd->de[level - 1];
79390 +               if (de_is_last(e))
79391 +                       goto pop_level;
79393 +               e = hdr_next_de(&n->index->ihdr, e);
79394 +               if (!e)
79395 +                       return -EINVAL;
79397 +               fnd->de[level - 1] = e;
79398 +       }
79400 +       /* Just to avoid tree cycle */
79401 +next_iter:
79402 +       if (iter++ >= 1000)
79403 +               return -EINVAL;
79405 +       while (de_has_vcn_ex(e)) {
79406 +               if (le16_to_cpu(e->size) <
79407 +                   sizeof(struct NTFS_DE) + sizeof(u64)) {
79408 +                       if (n) {
79409 +                               fnd_pop(fnd);
79410 +                               ntfs_free(n);
79411 +                       }
79412 +                       return -EINVAL;
79413 +               }
79415 +               /* Read next level */
79416 +               err = indx_read(indx, ni, de_get_vbn(e), &n);
79417 +               if (err)
79418 +                       return err;
79420 +               /* Try next level */
79421 +               e = hdr_first_de(&n->index->ihdr);
79422 +               if (!e) {
79423 +                       ntfs_free(n);
79424 +                       return -EINVAL;
79425 +               }
79427 +               fnd_push(fnd, n, e);
79428 +       }
79430 +       if (le16_to_cpu(e->size) > sizeof(struct NTFS_DE)) {
79431 +               *entry = e;
79432 +               return 0;
79433 +       }
79435 +pop_level:
79436 +       for (;;) {
79437 +               if (!de_is_last(e))
79438 +                       goto next_iter;
79440 +               /* Pop one level */
79441 +               if (n) {
79442 +                       fnd_pop(fnd);
79443 +                       ntfs_free(n);
79444 +               }
79446 +               level = fnd->level;
79448 +               if (level) {
79449 +                       n = fnd->nodes[level - 1];
79450 +                       e = fnd->de[level - 1];
79451 +               } else if (fnd->root_de) {
79452 +                       n = NULL;
79453 +                       e = fnd->root_de;
79454 +                       fnd->root_de = NULL;
79455 +               } else {
79456 +                       *entry = NULL;
79457 +                       return 0;
79458 +               }
79460 +               if (le16_to_cpu(e->size) > sizeof(struct NTFS_DE)) {
79461 +                       *entry = e;
79462 +                       if (!fnd->root_de)
79463 +                               fnd->root_de = e;
79464 +                       return 0;
79465 +               }
79466 +       }
79469 +int indx_find_raw(struct ntfs_index *indx, struct ntfs_inode *ni,
79470 +                 const struct INDEX_ROOT *root, struct NTFS_DE **entry,
79471 +                 size_t *off, struct ntfs_fnd *fnd)
79473 +       int err;
79474 +       struct indx_node *n = NULL;
79475 +       struct NTFS_DE *e = NULL;
79476 +       struct NTFS_DE *e2;
79477 +       size_t bit;
79478 +       CLST next_used_vbn;
79479 +       CLST next_vbn;
79480 +       u32 record_size = ni->mi.sbi->record_size;
79482 +       /* Use non sorted algorithm */
79483 +       if (!*entry) {
79484 +               /* This is the first call */
79485 +               e = hdr_first_de(&root->ihdr);
79486 +               if (!e)
79487 +                       return 0;
79488 +               fnd_clear(fnd);
79489 +               fnd->root_de = e;
79491 +               /* The first call with setup of initial element */
79492 +               if (*off >= record_size) {
79493 +                       next_vbn = (((*off - record_size) >> indx->index_bits))
79494 +                                  << indx->idx2vbn_bits;
79495 +                       /* jump inside cycle 'for'*/
79496 +                       goto next;
79497 +               }
79499 +               /* Start enumeration from root */
79500 +               *off = 0;
79501 +       } else if (!fnd->root_de)
79502 +               return -EINVAL;
79504 +       for (;;) {
79505 +               /* Check if current entry can be used */
79506 +               if (e && le16_to_cpu(e->size) > sizeof(struct NTFS_DE))
79507 +                       goto ok;
79509 +               if (!fnd->level) {
79510 +                       /* Continue to enumerate root */
79511 +                       if (!de_is_last(fnd->root_de)) {
79512 +                               e = hdr_next_de(&root->ihdr, fnd->root_de);
79513 +                               if (!e)
79514 +                                       return -EINVAL;
79515 +                               fnd->root_de = e;
79516 +                               continue;
79517 +                       }
79519 +                       /* Start to enumerate indexes from 0 */
79520 +                       next_vbn = 0;
79521 +               } else {
79522 +                       /* Continue to enumerate indexes */
79523 +                       e2 = fnd->de[fnd->level - 1];
79525 +                       n = fnd->nodes[fnd->level - 1];
79527 +                       if (!de_is_last(e2)) {
79528 +                               e = hdr_next_de(&n->index->ihdr, e2);
79529 +                               if (!e)
79530 +                                       return -EINVAL;
79531 +                               fnd->de[fnd->level - 1] = e;
79532 +                               continue;
79533 +                       }
79535 +                       /* Continue with next index */
79536 +                       next_vbn = le64_to_cpu(n->index->vbn) +
79537 +                                  root->index_block_clst;
79538 +               }
79540 +next:
79541 +               /* Release current index */
79542 +               if (n) {
79543 +                       fnd_pop(fnd);
79544 +                       put_indx_node(n);
79545 +                       n = NULL;
79546 +               }
79548 +               /* Skip all free indexes */
79549 +               bit = next_vbn >> indx->idx2vbn_bits;
79550 +               err = indx_used_bit(indx, ni, &bit);
79551 +               if (err == -ENOENT || bit == MINUS_ONE_T) {
79552 +                       /* No used indexes */
79553 +                       *entry = NULL;
79554 +                       return 0;
79555 +               }
79557 +               next_used_vbn = bit << indx->idx2vbn_bits;
79559 +               /* Read buffer into memory */
79560 +               err = indx_read(indx, ni, next_used_vbn, &n);
79561 +               if (err)
79562 +                       return err;
79564 +               e = hdr_first_de(&n->index->ihdr);
79565 +               fnd_push(fnd, n, e);
79566 +               if (!e)
79567 +                       return -EINVAL;
79568 +       }
79570 +ok:
79571 +       /* return offset to restore enumerator if necessary */
79572 +       if (!n) {
79573 +               /* 'e' points in root */
79574 +               *off = PtrOffset(&root->ihdr, e);
79575 +       } else {
79576 +               /* 'e' points in index */
79577 +               *off = (le64_to_cpu(n->index->vbn) << indx->vbn2vbo_bits) +
79578 +                      record_size + PtrOffset(&n->index->ihdr, e);
79579 +       }
79581 +       *entry = e;
79582 +       return 0;
79586 + * indx_create_allocate
79587 + *
79588 + * create "Allocation + Bitmap" attributes
79589 + */
79590 +static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
79591 +                               CLST *vbn)
79593 +       int err = -ENOMEM;
79594 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
79595 +       struct ATTRIB *bitmap;
79596 +       struct ATTRIB *alloc;
79597 +       u32 data_size = 1u << indx->index_bits;
79598 +       u32 alloc_size = ntfs_up_cluster(sbi, data_size);
79599 +       CLST len = alloc_size >> sbi->cluster_bits;
79600 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
79601 +       CLST alen;
79602 +       struct runs_tree run;
79604 +       run_init(&run);
79606 +       err = attr_allocate_clusters(sbi, &run, 0, 0, len, NULL, 0, &alen, 0,
79607 +                                    NULL);
79608 +       if (err)
79609 +               goto out;
79611 +       err = ni_insert_nonresident(ni, ATTR_ALLOC, in->name, in->name_len,
79612 +                                   &run, 0, len, 0, &alloc, NULL);
79613 +       if (err)
79614 +               goto out1;
79616 +       alloc->nres.valid_size = alloc->nres.data_size = cpu_to_le64(data_size);
79618 +       err = ni_insert_resident(ni, bitmap_size(1), ATTR_BITMAP, in->name,
79619 +                                in->name_len, &bitmap, NULL);
79620 +       if (err)
79621 +               goto out2;
79623 +       if (in->name == I30_NAME) {
79624 +               ni->vfs_inode.i_size = data_size;
79625 +               inode_set_bytes(&ni->vfs_inode, alloc_size);
79626 +       }
79628 +       memcpy(&indx->alloc_run, &run, sizeof(run));
79630 +       *vbn = 0;
79632 +       return 0;
79634 +out2:
79635 +       mi_remove_attr(&ni->mi, alloc);
79637 +out1:
79638 +       run_deallocate(sbi, &run, false);
79640 +out:
79641 +       return err;
79645 + * indx_add_allocate
79646 + *
79647 + * add clusters to index
79648 + */
79649 +static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
79650 +                            CLST *vbn)
79652 +       int err;
79653 +       size_t bit;
79654 +       u64 data_size;
79655 +       u64 bmp_size, bmp_size_v;
79656 +       struct ATTRIB *bmp, *alloc;
79657 +       struct mft_inode *mi;
79658 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
79660 +       err = indx_find_free(indx, ni, &bit, &bmp);
79661 +       if (err)
79662 +               goto out1;
79664 +       if (bit != MINUS_ONE_T) {
79665 +               bmp = NULL;
79666 +       } else {
79667 +               if (bmp->non_res) {
79668 +                       bmp_size = le64_to_cpu(bmp->nres.data_size);
79669 +                       bmp_size_v = le64_to_cpu(bmp->nres.valid_size);
79670 +               } else {
79671 +                       bmp_size = bmp_size_v = le32_to_cpu(bmp->res.data_size);
79672 +               }
79674 +               bit = bmp_size << 3;
79675 +       }
79677 +       data_size = (u64)(bit + 1) << indx->index_bits;
79679 +       if (bmp) {
79680 +               /* Increase bitmap */
79681 +               err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
79682 +                                   &indx->bitmap_run, bitmap_size(bit + 1),
79683 +                                   NULL, true, NULL);
79684 +               if (err)
79685 +                       goto out1;
79686 +       }
79688 +       alloc = ni_find_attr(ni, NULL, NULL, ATTR_ALLOC, in->name, in->name_len,
79689 +                            NULL, &mi);
79690 +       if (!alloc) {
79691 +               if (bmp)
79692 +                       goto out2;
79693 +               goto out1;
79694 +       }
79696 +       /* Increase allocation */
79697 +       err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
79698 +                           &indx->alloc_run, data_size, &data_size, true,
79699 +                           NULL);
79700 +       if (err) {
79701 +               if (bmp)
79702 +                       goto out2;
79703 +               goto out1;
79704 +       }
79706 +       *vbn = bit << indx->idx2vbn_bits;
79708 +       return 0;
79710 +out2:
79711 +       /* Ops (no space?) */
79712 +       attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
79713 +                     &indx->bitmap_run, bmp_size, &bmp_size_v, false, NULL);
79715 +out1:
79716 +       return err;
79720 + * indx_insert_into_root
79721 + *
79722 + * attempts to insert an entry into the index root
79723 + * If necessary, it will twiddle the index b-tree.
79724 + */
79725 +static int indx_insert_into_root(struct ntfs_index *indx, struct ntfs_inode *ni,
79726 +                                const struct NTFS_DE *new_de,
79727 +                                struct NTFS_DE *root_de, const void *ctx,
79728 +                                struct ntfs_fnd *fnd)
79730 +       int err = 0;
79731 +       struct NTFS_DE *e, *e0, *re;
79732 +       struct mft_inode *mi;
79733 +       struct ATTRIB *attr;
79734 +       struct MFT_REC *rec;
79735 +       struct INDEX_HDR *hdr;
79736 +       struct indx_node *n;
79737 +       CLST new_vbn;
79738 +       __le64 *sub_vbn, t_vbn;
79739 +       u16 new_de_size;
79740 +       u32 hdr_used, hdr_total, asize, used, to_move;
79741 +       u32 root_size, new_root_size;
79742 +       struct ntfs_sb_info *sbi;
79743 +       int ds_root;
79744 +       struct INDEX_ROOT *root, *a_root = NULL;
79746 +       /* Get the record this root placed in */
79747 +       root = indx_get_root(indx, ni, &attr, &mi);
79748 +       if (!root)
79749 +               goto out;
79751 +       /*
79752 +        * Try easy case:
79753 +        * hdr_insert_de will succeed if there's room the root for the new entry.
79754 +        */
79755 +       hdr = &root->ihdr;
79756 +       sbi = ni->mi.sbi;
79757 +       rec = mi->mrec;
79758 +       used = le32_to_cpu(rec->used);
79759 +       new_de_size = le16_to_cpu(new_de->size);
79760 +       hdr_used = le32_to_cpu(hdr->used);
79761 +       hdr_total = le32_to_cpu(hdr->total);
79762 +       asize = le32_to_cpu(attr->size);
79763 +       root_size = le32_to_cpu(attr->res.data_size);
79765 +       ds_root = new_de_size + hdr_used - hdr_total;
79767 +       if (used + ds_root < sbi->max_bytes_per_attr) {
79768 +               /* make a room for new elements */
79769 +               mi_resize_attr(mi, attr, ds_root);
79770 +               hdr->total = cpu_to_le32(hdr_total + ds_root);
79771 +               e = hdr_insert_de(indx, hdr, new_de, root_de, ctx);
79772 +               WARN_ON(!e);
79773 +               fnd_clear(fnd);
79774 +               fnd->root_de = e;
79776 +               return 0;
79777 +       }
79779 +       /* Make a copy of root attribute to restore if error */
79780 +       a_root = ntfs_memdup(attr, asize);
79781 +       if (!a_root) {
79782 +               err = -ENOMEM;
79783 +               goto out;
79784 +       }
79786 +       /* copy all the non-end entries from the index root to the new buffer.*/
79787 +       to_move = 0;
79788 +       e0 = hdr_first_de(hdr);
79790 +       /* Calculate the size to copy */
79791 +       for (e = e0;; e = hdr_next_de(hdr, e)) {
79792 +               if (!e) {
79793 +                       err = -EINVAL;
79794 +                       goto out;
79795 +               }
79797 +               if (de_is_last(e))
79798 +                       break;
79799 +               to_move += le16_to_cpu(e->size);
79800 +       }
79802 +       n = NULL;
79803 +       if (!to_move) {
79804 +               re = NULL;
79805 +       } else {
79806 +               re = ntfs_memdup(e0, to_move);
79807 +               if (!re) {
79808 +                       err = -ENOMEM;
79809 +                       goto out;
79810 +               }
79811 +       }
79813 +       sub_vbn = NULL;
79814 +       if (de_has_vcn(e)) {
79815 +               t_vbn = de_get_vbn_le(e);
79816 +               sub_vbn = &t_vbn;
79817 +       }
79819 +       new_root_size = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE) +
79820 +                       sizeof(u64);
79821 +       ds_root = new_root_size - root_size;
79823 +       if (ds_root > 0 && used + ds_root > sbi->max_bytes_per_attr) {
79824 +               /* make root external */
79825 +               err = -EOPNOTSUPP;
79826 +               goto out;
79827 +       }
79829 +       if (ds_root)
79830 +               mi_resize_attr(mi, attr, ds_root);
79832 +       /* Fill first entry (vcn will be set later) */
79833 +       e = (struct NTFS_DE *)(root + 1);
79834 +       memset(e, 0, sizeof(struct NTFS_DE));
79835 +       e->size = cpu_to_le16(sizeof(struct NTFS_DE) + sizeof(u64));
79836 +       e->flags = NTFS_IE_HAS_SUBNODES | NTFS_IE_LAST;
79838 +       hdr->flags = 1;
79839 +       hdr->used = hdr->total =
79840 +               cpu_to_le32(new_root_size - offsetof(struct INDEX_ROOT, ihdr));
79842 +       fnd->root_de = hdr_first_de(hdr);
79843 +       mi->dirty = true;
79845 +       /* Create alloc and bitmap attributes (if not) */
79846 +       err = run_is_empty(&indx->alloc_run)
79847 +                     ? indx_create_allocate(indx, ni, &new_vbn)
79848 +                     : indx_add_allocate(indx, ni, &new_vbn);
79850 +       /* layout of record may be changed, so rescan root */
79851 +       root = indx_get_root(indx, ni, &attr, &mi);
79852 +       if (!root) {
79853 +               /* bug? */
79854 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
79855 +               err = -EINVAL;
79856 +               goto out1;
79857 +       }
79859 +       if (err) {
79860 +               /* restore root */
79861 +               if (mi_resize_attr(mi, attr, -ds_root))
79862 +                       memcpy(attr, a_root, asize);
79863 +               else {
79864 +                       /* bug? */
79865 +                       ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
79866 +               }
79867 +               goto out1;
79868 +       }
79870 +       e = (struct NTFS_DE *)(root + 1);
79871 +       *(__le64 *)(e + 1) = cpu_to_le64(new_vbn);
79872 +       mi->dirty = true;
79874 +       /* now we can create/format the new buffer and copy the entries into */
79875 +       n = indx_new(indx, ni, new_vbn, sub_vbn);
79876 +       if (IS_ERR(n)) {
79877 +               err = PTR_ERR(n);
79878 +               goto out1;
79879 +       }
79881 +       hdr = &n->index->ihdr;
79882 +       hdr_used = le32_to_cpu(hdr->used);
79883 +       hdr_total = le32_to_cpu(hdr->total);
79885 +       /* Copy root entries into new buffer */
79886 +       hdr_insert_head(hdr, re, to_move);
79888 +       /* Update bitmap attribute */
79889 +       indx_mark_used(indx, ni, new_vbn >> indx->idx2vbn_bits);
79891 +       /* Check if we can insert new entry new index buffer */
79892 +       if (hdr_used + new_de_size > hdr_total) {
79893 +               /*
79894 +                * This occurs if mft record is the same or bigger than index
79895 +                * buffer. Move all root new index and have no space to add
79896 +                * new entry classic case when mft record is 1K and index
79897 +                * buffer 4K the problem should not occurs
79898 +                */
79899 +               ntfs_free(re);
79900 +               indx_write(indx, ni, n, 0);
79902 +               put_indx_node(n);
79903 +               fnd_clear(fnd);
79904 +               err = indx_insert_entry(indx, ni, new_de, ctx, fnd);
79905 +               goto out;
79906 +       }
79908 +       /*
79909 +        * Now root is a parent for new index buffer
79910 +        * Insert NewEntry a new buffer
79911 +        */
79912 +       e = hdr_insert_de(indx, hdr, new_de, NULL, ctx);
79913 +       if (!e) {
79914 +               err = -EINVAL;
79915 +               goto out1;
79916 +       }
79917 +       fnd_push(fnd, n, e);
79919 +       /* Just write updates index into disk */
79920 +       indx_write(indx, ni, n, 0);
79922 +       n = NULL;
79924 +out1:
79925 +       ntfs_free(re);
79926 +       if (n)
79927 +               put_indx_node(n);
79929 +out:
79930 +       ntfs_free(a_root);
79931 +       return err;
79935 + * indx_insert_into_buffer
79936 + *
79937 + * attempts to insert an entry into an Index Allocation Buffer.
79938 + * If necessary, it will split the buffer.
79939 + */
79940 +static int
79941 +indx_insert_into_buffer(struct ntfs_index *indx, struct ntfs_inode *ni,
79942 +                       struct INDEX_ROOT *root, const struct NTFS_DE *new_de,
79943 +                       const void *ctx, int level, struct ntfs_fnd *fnd)
79945 +       int err;
79946 +       const struct NTFS_DE *sp;
79947 +       struct NTFS_DE *e, *de_t, *up_e = NULL;
79948 +       struct indx_node *n2 = NULL;
79949 +       struct indx_node *n1 = fnd->nodes[level];
79950 +       struct INDEX_HDR *hdr1 = &n1->index->ihdr;
79951 +       struct INDEX_HDR *hdr2;
79952 +       u32 to_copy, used;
79953 +       CLST new_vbn;
79954 +       __le64 t_vbn, *sub_vbn;
79955 +       u16 sp_size;
79957 +       /* Try the most easy case */
79958 +       e = fnd->level - 1 == level ? fnd->de[level] : NULL;
79959 +       e = hdr_insert_de(indx, hdr1, new_de, e, ctx);
79960 +       fnd->de[level] = e;
79961 +       if (e) {
79962 +               /* Just write updated index into disk */
79963 +               indx_write(indx, ni, n1, 0);
79964 +               return 0;
79965 +       }
79967 +       /*
79968 +        * No space to insert into buffer. Split it.
79969 +        * To split we:
79970 +        *  - Save split point ('cause index buffers will be changed)
79971 +        * - Allocate NewBuffer and copy all entries <= sp into new buffer
79972 +        * - Remove all entries (sp including) from TargetBuffer
79973 +        * - Insert NewEntry into left or right buffer (depending on sp <=>
79974 +        *     NewEntry)
79975 +        * - Insert sp into parent buffer (or root)
79976 +        * - Make sp a parent for new buffer
79977 +        */
79978 +       sp = hdr_find_split(hdr1);
79979 +       if (!sp)
79980 +               return -EINVAL;
79982 +       sp_size = le16_to_cpu(sp->size);
79983 +       up_e = ntfs_malloc(sp_size + sizeof(u64));
79984 +       if (!up_e)
79985 +               return -ENOMEM;
79986 +       memcpy(up_e, sp, sp_size);
79988 +       if (!hdr1->flags) {
79989 +               up_e->flags |= NTFS_IE_HAS_SUBNODES;
79990 +               up_e->size = cpu_to_le16(sp_size + sizeof(u64));
79991 +               sub_vbn = NULL;
79992 +       } else {
79993 +               t_vbn = de_get_vbn_le(up_e);
79994 +               sub_vbn = &t_vbn;
79995 +       }
79997 +       /* Allocate on disk a new index allocation buffer. */
79998 +       err = indx_add_allocate(indx, ni, &new_vbn);
79999 +       if (err)
80000 +               goto out;
80002 +       /* Allocate and format memory a new index buffer */
80003 +       n2 = indx_new(indx, ni, new_vbn, sub_vbn);
80004 +       if (IS_ERR(n2)) {
80005 +               err = PTR_ERR(n2);
80006 +               goto out;
80007 +       }
80009 +       hdr2 = &n2->index->ihdr;
80011 +       /* Make sp a parent for new buffer */
80012 +       de_set_vbn(up_e, new_vbn);
80014 +       /* copy all the entries <= sp into the new buffer. */
80015 +       de_t = hdr_first_de(hdr1);
80016 +       to_copy = PtrOffset(de_t, sp);
80017 +       hdr_insert_head(hdr2, de_t, to_copy);
80019 +       /* remove all entries (sp including) from hdr1 */
80020 +       used = le32_to_cpu(hdr1->used) - to_copy - sp_size;
80021 +       memmove(de_t, Add2Ptr(sp, sp_size), used - le32_to_cpu(hdr1->de_off));
80022 +       hdr1->used = cpu_to_le32(used);
80024 +       /* Insert new entry into left or right buffer (depending on sp <=> new_de) */
80025 +       hdr_insert_de(indx,
80026 +                     (*indx->cmp)(new_de + 1, le16_to_cpu(new_de->key_size),
80027 +                                  up_e + 1, le16_to_cpu(up_e->key_size),
80028 +                                  ctx) < 0
80029 +                             ? hdr2
80030 +                             : hdr1,
80031 +                     new_de, NULL, ctx);
80033 +       indx_mark_used(indx, ni, new_vbn >> indx->idx2vbn_bits);
80035 +       indx_write(indx, ni, n1, 0);
80036 +       indx_write(indx, ni, n2, 0);
80038 +       put_indx_node(n2);
80040 +       /*
80041 +        * we've finished splitting everybody, so we are ready to
80042 +        * insert the promoted entry into the parent.
80043 +        */
80044 +       if (!level) {
80045 +               /* Insert in root */
80046 +               err = indx_insert_into_root(indx, ni, up_e, NULL, ctx, fnd);
80047 +               if (err)
80048 +                       goto out;
80049 +       } else {
80050 +               /*
80051 +                * The target buffer's parent is another index buffer
80052 +                * TODO: Remove recursion
80053 +                */
80054 +               err = indx_insert_into_buffer(indx, ni, root, up_e, ctx,
80055 +                                             level - 1, fnd);
80056 +               if (err)
80057 +                       goto out;
80058 +       }
80060 +out:
80061 +       ntfs_free(up_e);
80063 +       return err;
80067 + * indx_insert_entry
80068 + *
80069 + * inserts new entry into index
80070 + */
80071 +int indx_insert_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
80072 +                     const struct NTFS_DE *new_de, const void *ctx,
80073 +                     struct ntfs_fnd *fnd)
80075 +       int err;
80076 +       int diff;
80077 +       struct NTFS_DE *e;
80078 +       struct ntfs_fnd *fnd_a = NULL;
80079 +       struct INDEX_ROOT *root;
80081 +       if (!fnd) {
80082 +               fnd_a = fnd_get();
80083 +               if (!fnd_a) {
80084 +                       err = -ENOMEM;
80085 +                       goto out1;
80086 +               }
80087 +               fnd = fnd_a;
80088 +       }
80090 +       root = indx_get_root(indx, ni, NULL, NULL);
80091 +       if (!root) {
80092 +               err = -EINVAL;
80093 +               goto out;
80094 +       }
80096 +       if (fnd_is_empty(fnd)) {
80097 +               /* Find the spot the tree where we want to insert the new entry. */
80098 +               err = indx_find(indx, ni, root, new_de + 1,
80099 +                               le16_to_cpu(new_de->key_size), ctx, &diff, &e,
80100 +                               fnd);
80101 +               if (err)
80102 +                       goto out;
80104 +               if (!diff) {
80105 +                       err = -EEXIST;
80106 +                       goto out;
80107 +               }
80108 +       }
80110 +       if (!fnd->level) {
80111 +               /* The root is also a leaf, so we'll insert the new entry into it. */
80112 +               err = indx_insert_into_root(indx, ni, new_de, fnd->root_de, ctx,
80113 +                                           fnd);
80114 +               if (err)
80115 +                       goto out;
80116 +       } else {
80117 +               /* found a leaf buffer, so we'll insert the new entry into it.*/
80118 +               err = indx_insert_into_buffer(indx, ni, root, new_de, ctx,
80119 +                                             fnd->level - 1, fnd);
80120 +               if (err)
80121 +                       goto out;
80122 +       }
80124 +out:
80125 +       fnd_put(fnd_a);
80126 +out1:
80127 +       return err;
80131 + * indx_find_buffer
80132 + *
80133 + * locates a buffer the tree.
80134 + */
80135 +static struct indx_node *indx_find_buffer(struct ntfs_index *indx,
80136 +                                         struct ntfs_inode *ni,
80137 +                                         const struct INDEX_ROOT *root,
80138 +                                         __le64 vbn, struct indx_node *n)
80140 +       int err;
80141 +       const struct NTFS_DE *e;
80142 +       struct indx_node *r;
80143 +       const struct INDEX_HDR *hdr = n ? &n->index->ihdr : &root->ihdr;
80145 +       /* Step 1: Scan one level */
80146 +       for (e = hdr_first_de(hdr);; e = hdr_next_de(hdr, e)) {
80147 +               if (!e)
80148 +                       return ERR_PTR(-EINVAL);
80150 +               if (de_has_vcn(e) && vbn == de_get_vbn_le(e))
80151 +                       return n;
80153 +               if (de_is_last(e))
80154 +                       break;
80155 +       }
80157 +       /* Step2: Do recursion */
80158 +       e = Add2Ptr(hdr, le32_to_cpu(hdr->de_off));
80159 +       for (;;) {
80160 +               if (de_has_vcn_ex(e)) {
80161 +                       err = indx_read(indx, ni, de_get_vbn(e), &n);
80162 +                       if (err)
80163 +                               return ERR_PTR(err);
80165 +                       r = indx_find_buffer(indx, ni, root, vbn, n);
80166 +                       if (r)
80167 +                               return r;
80168 +               }
80170 +               if (de_is_last(e))
80171 +                       break;
80173 +               e = Add2Ptr(e, le16_to_cpu(e->size));
80174 +       }
80176 +       return NULL;
80180 + * indx_shrink
80181 + *
80182 + * deallocates unused tail indexes
80183 + */
80184 +static int indx_shrink(struct ntfs_index *indx, struct ntfs_inode *ni,
80185 +                      size_t bit)
80187 +       int err = 0;
80188 +       u64 bpb, new_data;
80189 +       size_t nbits;
80190 +       struct ATTRIB *b;
80191 +       struct ATTR_LIST_ENTRY *le = NULL;
80192 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
80194 +       b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len,
80195 +                        NULL, NULL);
80197 +       if (!b)
80198 +               return -ENOENT;
80200 +       if (!b->non_res) {
80201 +               unsigned long pos;
80202 +               const unsigned long *bm = resident_data(b);
80204 +               nbits = le32_to_cpu(b->res.data_size) * 8;
80206 +               if (bit >= nbits)
80207 +                       return 0;
80209 +               pos = find_next_bit(bm, nbits, bit);
80210 +               if (pos < nbits)
80211 +                       return 0;
80212 +       } else {
80213 +               size_t used = MINUS_ONE_T;
80215 +               nbits = le64_to_cpu(b->nres.data_size) * 8;
80217 +               if (bit >= nbits)
80218 +                       return 0;
80220 +               err = scan_nres_bitmap(ni, b, indx, bit, &scan_for_used, &used);
80221 +               if (err)
80222 +                       return err;
80224 +               if (used != MINUS_ONE_T)
80225 +                       return 0;
80226 +       }
80228 +       new_data = (u64)bit << indx->index_bits;
80230 +       err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
80231 +                           &indx->alloc_run, new_data, &new_data, false, NULL);
80232 +       if (err)
80233 +               return err;
80235 +       bpb = bitmap_size(bit);
80236 +       if (bpb * 8 == nbits)
80237 +               return 0;
80239 +       err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
80240 +                           &indx->bitmap_run, bpb, &bpb, false, NULL);
80242 +       return err;
80245 +static int indx_free_children(struct ntfs_index *indx, struct ntfs_inode *ni,
80246 +                             const struct NTFS_DE *e, bool trim)
80248 +       int err;
80249 +       struct indx_node *n;
80250 +       struct INDEX_HDR *hdr;
80251 +       CLST vbn = de_get_vbn(e);
80252 +       size_t i;
80254 +       err = indx_read(indx, ni, vbn, &n);
80255 +       if (err)
80256 +               return err;
80258 +       hdr = &n->index->ihdr;
80259 +       /* First, recurse into the children, if any.*/
80260 +       if (hdr_has_subnode(hdr)) {
80261 +               for (e = hdr_first_de(hdr); e; e = hdr_next_de(hdr, e)) {
80262 +                       indx_free_children(indx, ni, e, false);
80263 +                       if (de_is_last(e))
80264 +                               break;
80265 +               }
80266 +       }
80268 +       put_indx_node(n);
80270 +       i = vbn >> indx->idx2vbn_bits;
80271 +       /* We've gotten rid of the children; add this buffer to the free list. */
80272 +       indx_mark_free(indx, ni, i);
80274 +       if (!trim)
80275 +               return 0;
80277 +       /*
80278 +        * If there are no used indexes after current free index
80279 +        * then we can truncate allocation and bitmap
80280 +        * Use bitmap to estimate the case
80281 +        */
80282 +       indx_shrink(indx, ni, i + 1);
80283 +       return 0;
80287 + * indx_get_entry_to_replace
80288 + *
80289 + * finds a replacement entry for a deleted entry
80290 + * always returns a node entry:
80291 + * NTFS_IE_HAS_SUBNODES is set the flags and the size includes the sub_vcn
80292 + */
80293 +static int indx_get_entry_to_replace(struct ntfs_index *indx,
80294 +                                    struct ntfs_inode *ni,
80295 +                                    const struct NTFS_DE *de_next,
80296 +                                    struct NTFS_DE **de_to_replace,
80297 +                                    struct ntfs_fnd *fnd)
80299 +       int err;
80300 +       int level = -1;
80301 +       CLST vbn;
80302 +       struct NTFS_DE *e, *te, *re;
80303 +       struct indx_node *n;
80304 +       struct INDEX_BUFFER *ib;
80306 +       *de_to_replace = NULL;
80308 +       /* Find first leaf entry down from de_next */
80309 +       vbn = de_get_vbn(de_next);
80310 +       for (;;) {
80311 +               n = NULL;
80312 +               err = indx_read(indx, ni, vbn, &n);
80313 +               if (err)
80314 +                       goto out;
80316 +               e = hdr_first_de(&n->index->ihdr);
80317 +               fnd_push(fnd, n, e);
80319 +               if (!de_is_last(e)) {
80320 +                       /*
80321 +                        * This buffer is non-empty, so its first entry could be used as the
80322 +                        * replacement entry.
80323 +                        */
80324 +                       level = fnd->level - 1;
80325 +               }
80327 +               if (!de_has_vcn(e))
80328 +                       break;
80330 +               /* This buffer is a node. Continue to go down */
80331 +               vbn = de_get_vbn(e);
80332 +       }
80334 +       if (level == -1)
80335 +               goto out;
80337 +       n = fnd->nodes[level];
80338 +       te = hdr_first_de(&n->index->ihdr);
80339 +       /* Copy the candidate entry into the replacement entry buffer. */
80340 +       re = ntfs_malloc(le16_to_cpu(te->size) + sizeof(u64));
80341 +       if (!re) {
80342 +               err = -ENOMEM;
80343 +               goto out;
80344 +       }
80346 +       *de_to_replace = re;
80347 +       memcpy(re, te, le16_to_cpu(te->size));
80349 +       if (!de_has_vcn(re)) {
80350 +               /*
80351 +                * The replacement entry we found doesn't have a sub_vcn. increase its size
80352 +                * to hold one.
80353 +                */
80354 +               le16_add_cpu(&re->size, sizeof(u64));
80355 +               re->flags |= NTFS_IE_HAS_SUBNODES;
80356 +       } else {
80357 +               /*
80358 +                * The replacement entry we found was a node entry, which means that all
80359 +                * its child buffers are empty. Return them to the free pool.
80360 +                */
80361 +               indx_free_children(indx, ni, te, true);
80362 +       }
80364 +       /*
80365 +        * Expunge the replacement entry from its former location,
80366 +        * and then write that buffer.
80367 +        */
80368 +       ib = n->index;
80369 +       e = hdr_delete_de(&ib->ihdr, te);
80371 +       fnd->de[level] = e;
80372 +       indx_write(indx, ni, n, 0);
80374 +       /* Check to see if this action created an empty leaf. */
80375 +       if (ib_is_leaf(ib) && ib_is_empty(ib))
80376 +               return 0;
80378 +out:
80379 +       fnd_clear(fnd);
80380 +       return err;
80384 + * indx_delete_entry
80385 + *
80386 + * deletes an entry from the index.
80387 + */
80388 +int indx_delete_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
80389 +                     const void *key, u32 key_len, const void *ctx)
80391 +       int err, diff;
80392 +       struct INDEX_ROOT *root;
80393 +       struct INDEX_HDR *hdr;
80394 +       struct ntfs_fnd *fnd, *fnd2;
80395 +       struct INDEX_BUFFER *ib;
80396 +       struct NTFS_DE *e, *re, *next, *prev, *me;
80397 +       struct indx_node *n, *n2d = NULL;
80398 +       __le64 sub_vbn;
80399 +       int level, level2;
80400 +       struct ATTRIB *attr;
80401 +       struct mft_inode *mi;
80402 +       u32 e_size, root_size, new_root_size;
80403 +       size_t trim_bit;
80404 +       const struct INDEX_NAMES *in;
80406 +       fnd = fnd_get();
80407 +       if (!fnd) {
80408 +               err = -ENOMEM;
80409 +               goto out2;
80410 +       }
80412 +       fnd2 = fnd_get();
80413 +       if (!fnd2) {
80414 +               err = -ENOMEM;
80415 +               goto out1;
80416 +       }
80418 +       root = indx_get_root(indx, ni, &attr, &mi);
80419 +       if (!root) {
80420 +               err = -EINVAL;
80421 +               goto out;
80422 +       }
80424 +       /* Locate the entry to remove. */
80425 +       err = indx_find(indx, ni, root, key, key_len, ctx, &diff, &e, fnd);
80426 +       if (err)
80427 +               goto out;
80429 +       if (!e || diff) {
80430 +               err = -ENOENT;
80431 +               goto out;
80432 +       }
80434 +       level = fnd->level;
80436 +       if (level) {
80437 +               n = fnd->nodes[level - 1];
80438 +               e = fnd->de[level - 1];
80439 +               ib = n->index;
80440 +               hdr = &ib->ihdr;
80441 +       } else {
80442 +               hdr = &root->ihdr;
80443 +               e = fnd->root_de;
80444 +               n = NULL;
80445 +       }
80447 +       e_size = le16_to_cpu(e->size);
80449 +       if (!de_has_vcn_ex(e)) {
80450 +               /* The entry to delete is a leaf, so we can just rip it out */
80451 +               hdr_delete_de(hdr, e);
80453 +               if (!level) {
80454 +                       hdr->total = hdr->used;
80456 +                       /* Shrink resident root attribute */
80457 +                       mi_resize_attr(mi, attr, 0 - e_size);
80458 +                       goto out;
80459 +               }
80461 +               indx_write(indx, ni, n, 0);
80463 +               /*
80464 +                * Check to see if removing that entry made
80465 +                * the leaf empty.
80466 +                */
80467 +               if (ib_is_leaf(ib) && ib_is_empty(ib)) {
80468 +                       fnd_pop(fnd);
80469 +                       fnd_push(fnd2, n, e);
80470 +               }
80471 +       } else {
80472 +               /*
80473 +                * The entry we wish to delete is a node buffer, so we
80474 +                * have to find a replacement for it.
80475 +                */
80476 +               next = de_get_next(e);
80478 +               err = indx_get_entry_to_replace(indx, ni, next, &re, fnd2);
80479 +               if (err)
80480 +                       goto out;
80482 +               if (re) {
80483 +                       de_set_vbn_le(re, de_get_vbn_le(e));
80484 +                       hdr_delete_de(hdr, e);
80486 +                       err = level ? indx_insert_into_buffer(indx, ni, root,
80487 +                                                             re, ctx,
80488 +                                                             fnd->level - 1,
80489 +                                                             fnd)
80490 +                                   : indx_insert_into_root(indx, ni, re, e,
80491 +                                                           ctx, fnd);
80492 +                       ntfs_free(re);
80494 +                       if (err)
80495 +                               goto out;
80496 +               } else {
80497 +                       /*
80498 +                        * There is no replacement for the current entry.
80499 +                        * This means that the subtree rooted at its node is empty,
80500 +                        * and can be deleted, which turn means that the node can
80501 +                        * just inherit the deleted entry sub_vcn
80502 +                        */
80503 +                       indx_free_children(indx, ni, next, true);
80505 +                       de_set_vbn_le(next, de_get_vbn_le(e));
80506 +                       hdr_delete_de(hdr, e);
80507 +                       if (level) {
80508 +                               indx_write(indx, ni, n, 0);
80509 +                       } else {
80510 +                               hdr->total = hdr->used;
80512 +                               /* Shrink resident root attribute */
80513 +                               mi_resize_attr(mi, attr, 0 - e_size);
80514 +                       }
80515 +               }
80516 +       }
80518 +       /* Delete a branch of tree */
80519 +       if (!fnd2 || !fnd2->level)
80520 +               goto out;
80522 +       /* Reinit root 'cause it can be changed */
80523 +       root = indx_get_root(indx, ni, &attr, &mi);
80524 +       if (!root) {
80525 +               err = -EINVAL;
80526 +               goto out;
80527 +       }
80529 +       n2d = NULL;
80530 +       sub_vbn = fnd2->nodes[0]->index->vbn;
80531 +       level2 = 0;
80532 +       level = fnd->level;
80534 +       hdr = level ? &fnd->nodes[level - 1]->index->ihdr : &root->ihdr;
80536 +       /* Scan current level */
80537 +       for (e = hdr_first_de(hdr);; e = hdr_next_de(hdr, e)) {
80538 +               if (!e) {
80539 +                       err = -EINVAL;
80540 +                       goto out;
80541 +               }
80543 +               if (de_has_vcn(e) && sub_vbn == de_get_vbn_le(e))
80544 +                       break;
80546 +               if (de_is_last(e)) {
80547 +                       e = NULL;
80548 +                       break;
80549 +               }
80550 +       }
80552 +       if (!e) {
80553 +               /* Do slow search from root */
80554 +               struct indx_node *in;
80556 +               fnd_clear(fnd);
80558 +               in = indx_find_buffer(indx, ni, root, sub_vbn, NULL);
80559 +               if (IS_ERR(in)) {
80560 +                       err = PTR_ERR(in);
80561 +                       goto out;
80562 +               }
80564 +               if (in)
80565 +                       fnd_push(fnd, in, NULL);
80566 +       }
80568 +       /* Merge fnd2 -> fnd */
80569 +       for (level = 0; level < fnd2->level; level++) {
80570 +               fnd_push(fnd, fnd2->nodes[level], fnd2->de[level]);
80571 +               fnd2->nodes[level] = NULL;
80572 +       }
80573 +       fnd2->level = 0;
80575 +       hdr = NULL;
80576 +       for (level = fnd->level; level; level--) {
80577 +               struct indx_node *in = fnd->nodes[level - 1];
80579 +               ib = in->index;
80580 +               if (ib_is_empty(ib)) {
80581 +                       sub_vbn = ib->vbn;
80582 +               } else {
80583 +                       hdr = &ib->ihdr;
80584 +                       n2d = in;
80585 +                       level2 = level;
80586 +                       break;
80587 +               }
80588 +       }
80590 +       if (!hdr)
80591 +               hdr = &root->ihdr;
80593 +       e = hdr_first_de(hdr);
80594 +       if (!e) {
80595 +               err = -EINVAL;
80596 +               goto out;
80597 +       }
80599 +       if (hdr != &root->ihdr || !de_is_last(e)) {
80600 +               prev = NULL;
80601 +               while (!de_is_last(e)) {
80602 +                       if (de_has_vcn(e) && sub_vbn == de_get_vbn_le(e))
80603 +                               break;
80604 +                       prev = e;
80605 +                       e = hdr_next_de(hdr, e);
80606 +                       if (!e) {
80607 +                               err = -EINVAL;
80608 +                               goto out;
80609 +                       }
80610 +               }
80612 +               if (sub_vbn != de_get_vbn_le(e)) {
80613 +                       /*
80614 +                        * Didn't find the parent entry, although this buffer is the parent trail.
80615 +                        * Something is corrupt.
80616 +                        */
80617 +                       err = -EINVAL;
80618 +                       goto out;
80619 +               }
80621 +               if (de_is_last(e)) {
80622 +                       /*
80623 +                        * Since we can't remove the end entry, we'll remove its
80624 +                        * predecessor instead. This means we have to transfer the
80625 +                        * predecessor's sub_vcn to the end entry.
80626 +                        * Note: that this index block is not empty, so the
80627 +                        * predecessor must exist
80628 +                        */
80629 +                       if (!prev) {
80630 +                               err = -EINVAL;
80631 +                               goto out;
80632 +                       }
80634 +                       if (de_has_vcn(prev)) {
80635 +                               de_set_vbn_le(e, de_get_vbn_le(prev));
80636 +                       } else if (de_has_vcn(e)) {
80637 +                               le16_sub_cpu(&e->size, sizeof(u64));
80638 +                               e->flags &= ~NTFS_IE_HAS_SUBNODES;
80639 +                               le32_sub_cpu(&hdr->used, sizeof(u64));
80640 +                       }
80641 +                       e = prev;
80642 +               }
80644 +               /*
80645 +                * Copy the current entry into a temporary buffer (stripping off its
80646 +                * down-pointer, if any) and delete it from the current buffer or root,
80647 +                * as appropriate.
80648 +                */
80649 +               e_size = le16_to_cpu(e->size);
80650 +               me = ntfs_memdup(e, e_size);
80651 +               if (!me) {
80652 +                       err = -ENOMEM;
80653 +                       goto out;
80654 +               }
80656 +               if (de_has_vcn(me)) {
80657 +                       me->flags &= ~NTFS_IE_HAS_SUBNODES;
80658 +                       le16_sub_cpu(&me->size, sizeof(u64));
80659 +               }
80661 +               hdr_delete_de(hdr, e);
80663 +               if (hdr == &root->ihdr) {
80664 +                       level = 0;
80665 +                       hdr->total = hdr->used;
80667 +                       /* Shrink resident root attribute */
80668 +                       mi_resize_attr(mi, attr, 0 - e_size);
80669 +               } else {
80670 +                       indx_write(indx, ni, n2d, 0);
80671 +                       level = level2;
80672 +               }
80674 +               /* Mark unused buffers as free */
80675 +               trim_bit = -1;
80676 +               for (; level < fnd->level; level++) {
80677 +                       ib = fnd->nodes[level]->index;
80678 +                       if (ib_is_empty(ib)) {
80679 +                               size_t k = le64_to_cpu(ib->vbn) >>
80680 +                                          indx->idx2vbn_bits;
80682 +                               indx_mark_free(indx, ni, k);
80683 +                               if (k < trim_bit)
80684 +                                       trim_bit = k;
80685 +                       }
80686 +               }
80688 +               fnd_clear(fnd);
80689 +               /*fnd->root_de = NULL;*/
80691 +               /*
80692 +                * Re-insert the entry into the tree.
80693 +                * Find the spot the tree where we want to insert the new entry.
80694 +                */
80695 +               err = indx_insert_entry(indx, ni, me, ctx, fnd);
80696 +               ntfs_free(me);
80697 +               if (err)
80698 +                       goto out;
80700 +               if (trim_bit != -1)
80701 +                       indx_shrink(indx, ni, trim_bit);
80702 +       } else {
80703 +               /*
80704 +                * This tree needs to be collapsed down to an empty root.
80705 +                * Recreate the index root as an empty leaf and free all the bits the
80706 +                * index allocation bitmap.
80707 +                */
80708 +               fnd_clear(fnd);
80709 +               fnd_clear(fnd2);
80711 +               in = &s_index_names[indx->type];
80713 +               err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
80714 +                                   &indx->alloc_run, 0, NULL, false, NULL);
80715 +               err = ni_remove_attr(ni, ATTR_ALLOC, in->name, in->name_len,
80716 +                                    false, NULL);
80717 +               run_close(&indx->alloc_run);
80719 +               err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
80720 +                                   &indx->bitmap_run, 0, NULL, false, NULL);
80721 +               err = ni_remove_attr(ni, ATTR_BITMAP, in->name, in->name_len,
80722 +                                    false, NULL);
80723 +               run_close(&indx->bitmap_run);
80725 +               root = indx_get_root(indx, ni, &attr, &mi);
80726 +               if (!root) {
80727 +                       err = -EINVAL;
80728 +                       goto out;
80729 +               }
80731 +               root_size = le32_to_cpu(attr->res.data_size);
80732 +               new_root_size =
80733 +                       sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE);
80735 +               if (new_root_size != root_size &&
80736 +                   !mi_resize_attr(mi, attr, new_root_size - root_size)) {
80737 +                       err = -EINVAL;
80738 +                       goto out;
80739 +               }
80741 +               /* Fill first entry */
80742 +               e = (struct NTFS_DE *)(root + 1);
80743 +               e->ref.low = 0;
80744 +               e->ref.high = 0;
80745 +               e->ref.seq = 0;
80746 +               e->size = cpu_to_le16(sizeof(struct NTFS_DE));
80747 +               e->flags = NTFS_IE_LAST; // 0x02
80748 +               e->key_size = 0;
80749 +               e->res = 0;
80751 +               hdr = &root->ihdr;
80752 +               hdr->flags = 0;
80753 +               hdr->used = hdr->total = cpu_to_le32(
80754 +                       new_root_size - offsetof(struct INDEX_ROOT, ihdr));
80755 +               mi->dirty = true;
80756 +       }
80758 +out:
80759 +       fnd_put(fnd2);
80760 +out1:
80761 +       fnd_put(fnd);
80762 +out2:
80763 +       return err;
80766 +int indx_update_dup(struct ntfs_inode *ni, struct ntfs_sb_info *sbi,
80767 +                   const struct ATTR_FILE_NAME *fname,
80768 +                   const struct NTFS_DUP_INFO *dup, int sync)
80770 +       int err, diff;
80771 +       struct NTFS_DE *e = NULL;
80772 +       struct ATTR_FILE_NAME *e_fname;
80773 +       struct ntfs_fnd *fnd;
80774 +       struct INDEX_ROOT *root;
80775 +       struct mft_inode *mi;
80776 +       struct ntfs_index *indx = &ni->dir;
80778 +       fnd = fnd_get();
80779 +       if (!fnd) {
80780 +               err = -ENOMEM;
80781 +               goto out1;
80782 +       }
80784 +       root = indx_get_root(indx, ni, NULL, &mi);
80785 +       if (!root) {
80786 +               err = -EINVAL;
80787 +               goto out;
80788 +       }
80790 +       /* Find entries tree and on disk */
80791 +       err = indx_find(indx, ni, root, fname, fname_full_size(fname), sbi,
80792 +                       &diff, &e, fnd);
80793 +       if (err)
80794 +               goto out;
80796 +       if (!e) {
80797 +               err = -EINVAL;
80798 +               goto out;
80799 +       }
80801 +       if (diff) {
80802 +               err = -EINVAL;
80803 +               goto out;
80804 +       }
80806 +       e_fname = (struct ATTR_FILE_NAME *)(e + 1);
80808 +       if (!memcmp(&e_fname->dup, dup, sizeof(*dup))) {
80809 +               /* nothing to update in index! Try to avoid this call */
80810 +               goto out;
80811 +       }
80813 +       memcpy(&e_fname->dup, dup, sizeof(*dup));
80815 +       if (fnd->level) {
80816 +               err = indx_write(indx, ni, fnd->nodes[fnd->level - 1], sync);
80817 +       } else if (sync) {
80818 +               mi->dirty = true;
80819 +               err = mi_write(mi, 1);
80820 +       } else {
80821 +               mi->dirty = true;
80822 +               mark_inode_dirty(&ni->vfs_inode);
80823 +       }
80825 +out:
80826 +       fnd_put(fnd);
80828 +out1:
80829 +       return err;
80831 diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
80832 new file mode 100644
80833 index 000000000000..9e836c192ddf
80834 --- /dev/null
80835 +++ b/fs/ntfs3/inode.c
80836 @@ -0,0 +1,2033 @@
80837 +// SPDX-License-Identifier: GPL-2.0
80839 + *
80840 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
80841 + *
80842 + */
80844 +#include <linux/blkdev.h>
80845 +#include <linux/buffer_head.h>
80846 +#include <linux/fs.h>
80847 +#include <linux/iversion.h>
80848 +#include <linux/mpage.h>
80849 +#include <linux/namei.h>
80850 +#include <linux/nls.h>
80851 +#include <linux/uio.h>
80852 +#include <linux/version.h>
80853 +#include <linux/writeback.h>
80855 +#include "debug.h"
80856 +#include "ntfs.h"
80857 +#include "ntfs_fs.h"
80860 + * ntfs_read_mft
80861 + *
80862 + * reads record and parses MFT
80863 + */
80864 +static struct inode *ntfs_read_mft(struct inode *inode,
80865 +                                  const struct cpu_str *name,
80866 +                                  const struct MFT_REF *ref)
80868 +       int err = 0;
80869 +       struct ntfs_inode *ni = ntfs_i(inode);
80870 +       struct super_block *sb = inode->i_sb;
80871 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
80872 +       mode_t mode = 0;
80873 +       struct ATTR_STD_INFO5 *std5 = NULL;
80874 +       struct ATTR_LIST_ENTRY *le;
80875 +       struct ATTRIB *attr;
80876 +       bool is_match = false;
80877 +       bool is_root = false;
80878 +       bool is_dir;
80879 +       unsigned long ino = inode->i_ino;
80880 +       u32 rp_fa = 0, asize, t32;
80881 +       u16 roff, rsize, names = 0;
80882 +       const struct ATTR_FILE_NAME *fname = NULL;
80883 +       const struct INDEX_ROOT *root;
80884 +       struct REPARSE_DATA_BUFFER rp; // 0x18 bytes
80885 +       u64 t64;
80886 +       struct MFT_REC *rec;
80887 +       struct runs_tree *run;
80889 +       inode->i_op = NULL;
80891 +       err = mi_init(&ni->mi, sbi, ino);
80892 +       if (err)
80893 +               goto out;
80895 +       if (!sbi->mft.ni && ino == MFT_REC_MFT && !sb->s_root) {
80896 +               t64 = sbi->mft.lbo >> sbi->cluster_bits;
80897 +               t32 = bytes_to_cluster(sbi, MFT_REC_VOL * sbi->record_size);
80898 +               sbi->mft.ni = ni;
80899 +               init_rwsem(&ni->file.run_lock);
80901 +               if (!run_add_entry(&ni->file.run, 0, t64, t32, true)) {
80902 +                       err = -ENOMEM;
80903 +                       goto out;
80904 +               }
80905 +       }
80907 +       err = mi_read(&ni->mi, ino == MFT_REC_MFT);
80909 +       if (err)
80910 +               goto out;
80912 +       rec = ni->mi.mrec;
80914 +       if (sbi->flags & NTFS_FLAGS_LOG_REPLAYING) {
80915 +               ;
80916 +       } else if (ref->seq != rec->seq) {
80917 +               err = -EINVAL;
80918 +               ntfs_err(sb, "MFT: r=%lx, expect seq=%x instead of %x!", ino,
80919 +                        le16_to_cpu(ref->seq), le16_to_cpu(rec->seq));
80920 +               goto out;
80921 +       } else if (!is_rec_inuse(rec)) {
80922 +               err = -EINVAL;
80923 +               ntfs_err(sb, "Inode r=%x is not in use!", (u32)ino);
80924 +               goto out;
80925 +       }
80927 +       if (le32_to_cpu(rec->total) != sbi->record_size) {
80928 +               // bad inode?
80929 +               err = -EINVAL;
80930 +               goto out;
80931 +       }
80933 +       if (!is_rec_base(rec))
80934 +               goto Ok;
80936 +       /* record should contain $I30 root */
80937 +       is_dir = rec->flags & RECORD_FLAG_DIR;
80939 +       inode->i_generation = le16_to_cpu(rec->seq);
80941 +       /* Enumerate all struct Attributes MFT */
80942 +       le = NULL;
80943 +       attr = NULL;
80945 +       /*
80946 +        * to reduce tab pressure use goto instead of
80947 +        * while( (attr = ni_enum_attr_ex(ni, attr, &le, NULL) ))
80948 +        */
80949 +next_attr:
80950 +       run = NULL;
80951 +       err = -EINVAL;
80952 +       attr = ni_enum_attr_ex(ni, attr, &le, NULL);
80953 +       if (!attr)
80954 +               goto end_enum;
80956 +       if (le && le->vcn) {
80957 +               /* This is non primary attribute segment. Ignore if not MFT */
80958 +               if (ino != MFT_REC_MFT || attr->type != ATTR_DATA)
80959 +                       goto next_attr;
80961 +               run = &ni->file.run;
80962 +               asize = le32_to_cpu(attr->size);
80963 +               goto attr_unpack_run;
80964 +       }
80966 +       roff = attr->non_res ? 0 : le16_to_cpu(attr->res.data_off);
80967 +       rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size);
80968 +       asize = le32_to_cpu(attr->size);
80970 +       switch (attr->type) {
80971 +       case ATTR_STD:
80972 +               if (attr->non_res ||
80973 +                   asize < sizeof(struct ATTR_STD_INFO) + roff ||
80974 +                   rsize < sizeof(struct ATTR_STD_INFO))
80975 +                       goto out;
80977 +               if (std5)
80978 +                       goto next_attr;
80980 +               std5 = Add2Ptr(attr, roff);
80982 +#ifdef STATX_BTIME
80983 +               nt2kernel(std5->cr_time, &ni->i_crtime);
80984 +#endif
80985 +               nt2kernel(std5->a_time, &inode->i_atime);
80986 +               nt2kernel(std5->c_time, &inode->i_ctime);
80987 +               nt2kernel(std5->m_time, &inode->i_mtime);
80989 +               ni->std_fa = std5->fa;
80991 +               if (asize >= sizeof(struct ATTR_STD_INFO5) + roff &&
80992 +                   rsize >= sizeof(struct ATTR_STD_INFO5))
80993 +                       ni->std_security_id = std5->security_id;
80994 +               goto next_attr;
80996 +       case ATTR_LIST:
80997 +               if (attr->name_len || le || ino == MFT_REC_LOG)
80998 +                       goto out;
81000 +               err = ntfs_load_attr_list(ni, attr);
81001 +               if (err)
81002 +                       goto out;
81004 +               le = NULL;
81005 +               attr = NULL;
81006 +               goto next_attr;
81008 +       case ATTR_NAME:
81009 +               if (attr->non_res || asize < SIZEOF_ATTRIBUTE_FILENAME + roff ||
81010 +                   rsize < SIZEOF_ATTRIBUTE_FILENAME)
81011 +                       goto out;
81013 +               fname = Add2Ptr(attr, roff);
81014 +               if (fname->type == FILE_NAME_DOS)
81015 +                       goto next_attr;
81017 +               names += 1;
81018 +               if (name && name->len == fname->name_len &&
81019 +                   !ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len,
81020 +                                       NULL, false))
81021 +                       is_match = true;
81023 +               goto next_attr;
81025 +       case ATTR_DATA:
81026 +               if (is_dir) {
81027 +                       /* ignore data attribute in dir record */
81028 +                       goto next_attr;
81029 +               }
81031 +               if (ino == MFT_REC_BADCLUST && !attr->non_res)
81032 +                       goto next_attr;
81034 +               if (attr->name_len &&
81035 +                   ((ino != MFT_REC_BADCLUST || !attr->non_res ||
81036 +                     attr->name_len != ARRAY_SIZE(BAD_NAME) ||
81037 +                     memcmp(attr_name(attr), BAD_NAME, sizeof(BAD_NAME))) &&
81038 +                    (ino != MFT_REC_SECURE || !attr->non_res ||
81039 +                     attr->name_len != ARRAY_SIZE(SDS_NAME) ||
81040 +                     memcmp(attr_name(attr), SDS_NAME, sizeof(SDS_NAME))))) {
81041 +                       /* file contains stream attribute. ignore it */
81042 +                       goto next_attr;
81043 +               }
81045 +               if (is_attr_sparsed(attr))
81046 +                       ni->std_fa |= FILE_ATTRIBUTE_SPARSE_FILE;
81047 +               else
81048 +                       ni->std_fa &= ~FILE_ATTRIBUTE_SPARSE_FILE;
81050 +               if (is_attr_compressed(attr))
81051 +                       ni->std_fa |= FILE_ATTRIBUTE_COMPRESSED;
81052 +               else
81053 +                       ni->std_fa &= ~FILE_ATTRIBUTE_COMPRESSED;
81055 +               if (is_attr_encrypted(attr))
81056 +                       ni->std_fa |= FILE_ATTRIBUTE_ENCRYPTED;
81057 +               else
81058 +                       ni->std_fa &= ~FILE_ATTRIBUTE_ENCRYPTED;
81060 +               if (!attr->non_res) {
81061 +                       ni->i_valid = inode->i_size = rsize;
81062 +                       inode_set_bytes(inode, rsize);
81063 +                       t32 = asize;
81064 +               } else {
81065 +                       t32 = le16_to_cpu(attr->nres.run_off);
81066 +               }
81068 +               mode = S_IFREG | (0777 & sbi->options.fs_fmask_inv);
81070 +               if (!attr->non_res) {
81071 +                       ni->ni_flags |= NI_FLAG_RESIDENT;
81072 +                       goto next_attr;
81073 +               }
81075 +               inode_set_bytes(inode, attr_ondisk_size(attr));
81077 +               ni->i_valid = le64_to_cpu(attr->nres.valid_size);
81078 +               inode->i_size = le64_to_cpu(attr->nres.data_size);
81079 +               if (!attr->nres.alloc_size)
81080 +                       goto next_attr;
81082 +               run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run
81083 +                                           : &ni->file.run;
81084 +               break;
81086 +       case ATTR_ROOT:
81087 +               if (attr->non_res)
81088 +                       goto out;
81090 +               root = Add2Ptr(attr, roff);
81091 +               is_root = true;
81093 +               if (attr->name_len != ARRAY_SIZE(I30_NAME) ||
81094 +                   memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
81095 +                       goto next_attr;
81097 +               if (root->type != ATTR_NAME ||
81098 +                   root->rule != NTFS_COLLATION_TYPE_FILENAME)
81099 +                       goto out;
81101 +               if (!is_dir)
81102 +                       goto next_attr;
81104 +               ni->ni_flags |= NI_FLAG_DIR;
81106 +               err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
81107 +               if (err)
81108 +                       goto out;
81110 +               mode = sb->s_root
81111 +                              ? (S_IFDIR | (0777 & sbi->options.fs_dmask_inv))
81112 +                              : (S_IFDIR | 0777);
81113 +               goto next_attr;
81115 +       case ATTR_ALLOC:
81116 +               if (!is_root || attr->name_len != ARRAY_SIZE(I30_NAME) ||
81117 +                   memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
81118 +                       goto next_attr;
81120 +               inode->i_size = le64_to_cpu(attr->nres.data_size);
81121 +               ni->i_valid = le64_to_cpu(attr->nres.valid_size);
81122 +               inode_set_bytes(inode, le64_to_cpu(attr->nres.alloc_size));
81124 +               run = &ni->dir.alloc_run;
81125 +               break;
81127 +       case ATTR_BITMAP:
81128 +               if (ino == MFT_REC_MFT) {
81129 +                       if (!attr->non_res)
81130 +                               goto out;
81131 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
81132 +                       /* 0x20000000 = 2^32 / 8 */
81133 +                       if (le64_to_cpu(attr->nres.alloc_size) >= 0x20000000)
81134 +                               goto out;
81135 +#endif
81136 +                       run = &sbi->mft.bitmap.run;
81137 +                       break;
81138 +               } else if (is_dir && attr->name_len == ARRAY_SIZE(I30_NAME) &&
81139 +                          !memcmp(attr_name(attr), I30_NAME,
81140 +                                  sizeof(I30_NAME)) &&
81141 +                          attr->non_res) {
81142 +                       run = &ni->dir.bitmap_run;
81143 +                       break;
81144 +               }
81145 +               goto next_attr;
81147 +       case ATTR_REPARSE:
81148 +               if (attr->name_len)
81149 +                       goto next_attr;
81151 +               rp_fa = ni_parse_reparse(ni, attr, &rp);
81152 +               switch (rp_fa) {
81153 +               case REPARSE_LINK:
81154 +                       if (!attr->non_res) {
81155 +                               inode->i_size = rsize;
81156 +                               inode_set_bytes(inode, rsize);
81157 +                               t32 = asize;
81158 +                       } else {
81159 +                               inode->i_size =
81160 +                                       le64_to_cpu(attr->nres.data_size);
81161 +                               t32 = le16_to_cpu(attr->nres.run_off);
81162 +                       }
81164 +                       /* Looks like normal symlink */
81165 +                       ni->i_valid = inode->i_size;
81167 +                       /* Clear directory bit */
81168 +                       if (ni->ni_flags & NI_FLAG_DIR) {
81169 +                               indx_clear(&ni->dir);
81170 +                               memset(&ni->dir, 0, sizeof(ni->dir));
81171 +                               ni->ni_flags &= ~NI_FLAG_DIR;
81172 +                       } else {
81173 +                               run_close(&ni->file.run);
81174 +                       }
81175 +                       mode = S_IFLNK | 0777;
81176 +                       is_dir = false;
81177 +                       if (attr->non_res) {
81178 +                               run = &ni->file.run;
81179 +                               goto attr_unpack_run; // double break
81180 +                       }
81181 +                       break;
81183 +               case REPARSE_COMPRESSED:
81184 +                       break;
81186 +               case REPARSE_DEDUPLICATED:
81187 +                       break;
81188 +               }
81189 +               goto next_attr;
81191 +       case ATTR_EA_INFO:
81192 +               if (!attr->name_len &&
81193 +                   resident_data_ex(attr, sizeof(struct EA_INFO)))
81194 +                       ni->ni_flags |= NI_FLAG_EA;
81195 +               goto next_attr;
81197 +       default:
81198 +               goto next_attr;
81199 +       }
81201 +attr_unpack_run:
81202 +       roff = le16_to_cpu(attr->nres.run_off);
81204 +       t64 = le64_to_cpu(attr->nres.svcn);
81205 +       err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn),
81206 +                           t64, Add2Ptr(attr, roff), asize - roff);
81207 +       if (err < 0)
81208 +               goto out;
81209 +       err = 0;
81210 +       goto next_attr;
81212 +end_enum:
81214 +       if (!std5)
81215 +               goto out;
81217 +       if (!is_match && name) {
81218 +               /* reuse rec as buffer for ascii name */
81219 +               err = -ENOENT;
81220 +               goto out;
81221 +       }
81223 +       if (std5->fa & FILE_ATTRIBUTE_READONLY)
81224 +               mode &= ~0222;
81226 +       /* Setup 'uid' and 'gid' */
81227 +       inode->i_uid = sbi->options.fs_uid;
81228 +       inode->i_gid = sbi->options.fs_gid;
81230 +       if (!names) {
81231 +               err = -EINVAL;
81232 +               goto out;
81233 +       }
81235 +       if (S_ISDIR(mode)) {
81236 +               ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY;
81238 +               /*
81239 +                * dot and dot-dot should be included in count but was not
81240 +                * included in enumeration.
81241 +                * Usually a hard links to directories are disabled
81242 +                */
81243 +               set_nlink(inode, 1);
81244 +               inode->i_op = &ntfs_dir_inode_operations;
81245 +               inode->i_fop = &ntfs_dir_operations;
81246 +               ni->i_valid = 0;
81247 +       } else if (S_ISLNK(mode)) {
81248 +               ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
81249 +               inode->i_op = &ntfs_link_inode_operations;
81250 +               inode->i_fop = NULL;
81251 +               inode_nohighmem(inode); // ??
81252 +               set_nlink(inode, names);
81253 +       } else if (S_ISREG(mode)) {
81254 +               ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
81256 +               set_nlink(inode, names);
81258 +               inode->i_op = &ntfs_file_inode_operations;
81259 +               inode->i_fop = &ntfs_file_operations;
81260 +               inode->i_mapping->a_ops =
81261 +                       is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
81263 +               if (ino != MFT_REC_MFT)
81264 +                       init_rwsem(&ni->file.run_lock);
81265 +       } else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) &&
81266 +                  fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
81267 +               /* Records in $Extend are not a files or general directories */
81268 +       } else {
81269 +               err = -EINVAL;
81270 +               goto out;
81271 +       }
81273 +       if ((sbi->options.sys_immutable &&
81274 +            (std5->fa & FILE_ATTRIBUTE_SYSTEM)) &&
81275 +           !S_ISFIFO(mode) && !S_ISSOCK(mode) && !S_ISLNK(mode)) {
81276 +               inode->i_flags |= S_IMMUTABLE;
81277 +       } else {
81278 +               inode->i_flags &= ~S_IMMUTABLE;
81279 +       }
81281 +       inode->i_mode = mode;
81282 +       if (!(ni->ni_flags & NI_FLAG_EA)) {
81283 +               /* if no xattr then no security (stored in xattr) */
81284 +               inode->i_flags |= S_NOSEC;
81285 +       }
81287 +Ok:
81288 +       if (ino == MFT_REC_MFT && !sb->s_root)
81289 +               sbi->mft.ni = NULL;
81291 +       unlock_new_inode(inode);
81293 +       return inode;
81295 +out:
81296 +       if (ino == MFT_REC_MFT && !sb->s_root)
81297 +               sbi->mft.ni = NULL;
81299 +       iget_failed(inode);
81300 +       return ERR_PTR(err);
81303 +/* returns 1 if match */
81304 +static int ntfs_test_inode(struct inode *inode, void *data)
81306 +       struct MFT_REF *ref = data;
81308 +       return ino_get(ref) == inode->i_ino;
81311 +static int ntfs_set_inode(struct inode *inode, void *data)
81313 +       const struct MFT_REF *ref = data;
81315 +       inode->i_ino = ino_get(ref);
81316 +       return 0;
81319 +struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
81320 +                        const struct cpu_str *name)
81322 +       struct inode *inode;
81324 +       inode = iget5_locked(sb, ino_get(ref), ntfs_test_inode, ntfs_set_inode,
81325 +                            (void *)ref);
81326 +       if (unlikely(!inode))
81327 +               return ERR_PTR(-ENOMEM);
81329 +       /* If this is a freshly allocated inode, need to read it now. */
81330 +       if (inode->i_state & I_NEW)
81331 +               inode = ntfs_read_mft(inode, name, ref);
81332 +       else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
81333 +               /* inode overlaps? */
81334 +               make_bad_inode(inode);
81335 +       }
81337 +       return inode;
81340 +enum get_block_ctx {
81341 +       GET_BLOCK_GENERAL = 0,
81342 +       GET_BLOCK_WRITE_BEGIN = 1,
81343 +       GET_BLOCK_DIRECT_IO_R = 2,
81344 +       GET_BLOCK_DIRECT_IO_W = 3,
81345 +       GET_BLOCK_BMAP = 4,
81348 +static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
81349 +                                      struct buffer_head *bh, int create,
81350 +                                      enum get_block_ctx ctx)
81352 +       struct super_block *sb = inode->i_sb;
81353 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
81354 +       struct ntfs_inode *ni = ntfs_i(inode);
81355 +       struct page *page = bh->b_page;
81356 +       u8 cluster_bits = sbi->cluster_bits;
81357 +       u32 block_size = sb->s_blocksize;
81358 +       u64 bytes, lbo, valid;
81359 +       u32 off;
81360 +       int err;
81361 +       CLST vcn, lcn, len;
81362 +       bool new;
81364 +       /*clear previous state*/
81365 +       clear_buffer_new(bh);
81366 +       clear_buffer_uptodate(bh);
81368 +       /* direct write uses 'create=0'*/
81369 +       if (!create && vbo >= ni->i_valid) {
81370 +               /* out of valid */
81371 +               return 0;
81372 +       }
81374 +       if (vbo >= inode->i_size) {
81375 +               /* out of size */
81376 +               return 0;
81377 +       }
81379 +       if (is_resident(ni)) {
81380 +               ni_lock(ni);
81381 +               err = attr_data_read_resident(ni, page);
81382 +               ni_unlock(ni);
81384 +               if (!err)
81385 +                       set_buffer_uptodate(bh);
81386 +               bh->b_size = block_size;
81387 +               return err;
81388 +       }
81390 +       vcn = vbo >> cluster_bits;
81391 +       off = vbo & sbi->cluster_mask;
81392 +       new = false;
81394 +       err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL);
81395 +       if (err)
81396 +               goto out;
81398 +       if (!len)
81399 +               return 0;
81401 +       bytes = ((u64)len << cluster_bits) - off;
81403 +       if (lcn == SPARSE_LCN) {
81404 +               if (!create) {
81405 +                       if (bh->b_size > bytes)
81406 +                               bh->b_size = bytes;
81408 +                       return 0;
81409 +               }
81410 +               WARN_ON(1);
81411 +       }
81413 +       if (new) {
81414 +               set_buffer_new(bh);
81415 +               if ((len << cluster_bits) > block_size)
81416 +                       ntfs_sparse_cluster(inode, page, vcn, len);
81417 +       }
81419 +       lbo = ((u64)lcn << cluster_bits) + off;
81421 +       set_buffer_mapped(bh);
81422 +       bh->b_bdev = sb->s_bdev;
81423 +       bh->b_blocknr = lbo >> sb->s_blocksize_bits;
81425 +       valid = ni->i_valid;
81427 +       if (ctx == GET_BLOCK_DIRECT_IO_W) {
81428 +               /*ntfs_direct_IO will update ni->i_valid */
81429 +               if (vbo >= valid)
81430 +                       set_buffer_new(bh);
81431 +       } else if (create) {
81432 +               /*normal write*/
81433 +               if (vbo >= valid) {
81434 +                       set_buffer_new(bh);
81435 +                       if (bytes > bh->b_size)
81436 +                               bytes = bh->b_size;
81437 +                       ni->i_valid = vbo + bytes;
81438 +                       mark_inode_dirty(inode);
81439 +               }
81440 +       } else if (valid >= inode->i_size) {
81441 +               /* normal read of normal file*/
81442 +       } else if (vbo >= valid) {
81443 +               /* read out of valid data*/
81444 +               /* should never be here 'cause already checked */
81445 +               clear_buffer_mapped(bh);
81446 +       } else if (vbo + bytes <= valid) {
81447 +               /* normal read */
81448 +       } else if (vbo + block_size <= valid) {
81449 +               /* normal short read */
81450 +               bytes = block_size;
81451 +       } else {
81452 +               /*
81453 +                * read across valid size: vbo < valid && valid < vbo + block_size
81454 +                */
81455 +               u32 voff = valid - vbo;
81457 +               bh->b_size = bytes = block_size;
81458 +               off = vbo & (PAGE_SIZE - 1);
81459 +               set_bh_page(bh, page, off);
81460 +               ll_rw_block(REQ_OP_READ, 0, 1, &bh);
81461 +               wait_on_buffer(bh);
81462 +               /* Uhhuh. Read error. Complain and punt. */
81463 +               if (!buffer_uptodate(bh)) {
81464 +                       err = -EIO;
81465 +                       goto out;
81466 +               }
81467 +               zero_user_segment(page, off + voff, off + block_size);
81468 +       }
81470 +       if (bh->b_size > bytes)
81471 +               bh->b_size = bytes;
81473 +#ifndef __LP64__
81474 +       if (ctx == GET_BLOCK_DIRECT_IO_W || ctx == GET_BLOCK_DIRECT_IO_R) {
81475 +               static_assert(sizeof(size_t) < sizeof(loff_t));
81476 +               if (bytes > 0x40000000u)
81477 +                       bh->b_size = 0x40000000u;
81478 +       }
81479 +#endif
81481 +       return 0;
81483 +out:
81484 +       return err;
81487 +int ntfs_get_block(struct inode *inode, sector_t vbn,
81488 +                  struct buffer_head *bh_result, int create)
81490 +       return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
81491 +                                 bh_result, create, GET_BLOCK_GENERAL);
81494 +static int ntfs_get_block_bmap(struct inode *inode, sector_t vsn,
81495 +                              struct buffer_head *bh_result, int create)
81497 +       return ntfs_get_block_vbo(inode,
81498 +                                 (u64)vsn << inode->i_sb->s_blocksize_bits,
81499 +                                 bh_result, create, GET_BLOCK_BMAP);
81502 +static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
81504 +       return generic_block_bmap(mapping, block, ntfs_get_block_bmap);
81507 +static int ntfs_readpage(struct file *file, struct page *page)
81509 +       int err;
81510 +       struct address_space *mapping = page->mapping;
81511 +       struct inode *inode = mapping->host;
81512 +       struct ntfs_inode *ni = ntfs_i(inode);
81514 +       if (is_resident(ni)) {
81515 +               ni_lock(ni);
81516 +               err = attr_data_read_resident(ni, page);
81517 +               ni_unlock(ni);
81518 +               if (err != E_NTFS_NONRESIDENT) {
81519 +                       unlock_page(page);
81520 +                       return err;
81521 +               }
81522 +       }
81524 +       if (is_compressed(ni)) {
81525 +               ni_lock(ni);
81526 +               err = ni_readpage_cmpr(ni, page);
81527 +               ni_unlock(ni);
81528 +               return err;
81529 +       }
81531 +       /* normal + sparse files */
81532 +       return mpage_readpage(page, ntfs_get_block);
81535 +static void ntfs_readahead(struct readahead_control *rac)
81537 +       struct address_space *mapping = rac->mapping;
81538 +       struct inode *inode = mapping->host;
81539 +       struct ntfs_inode *ni = ntfs_i(inode);
81540 +       u64 valid;
81541 +       loff_t pos;
81543 +       if (is_resident(ni)) {
81544 +               /* no readahead for resident */
81545 +               return;
81546 +       }
81548 +       if (is_compressed(ni)) {
81549 +               /* no readahead for compressed */
81550 +               return;
81551 +       }
81553 +       valid = ni->i_valid;
81554 +       pos = readahead_pos(rac);
81556 +       if (valid < i_size_read(inode) && pos <= valid &&
81557 +           valid < pos + readahead_length(rac)) {
81558 +               /* range cross 'valid'. read it page by page */
81559 +               return;
81560 +       }
81562 +       mpage_readahead(rac, ntfs_get_block);
81565 +static int ntfs_get_block_direct_IO_R(struct inode *inode, sector_t iblock,
81566 +                                     struct buffer_head *bh_result, int create)
81568 +       return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
81569 +                                 bh_result, create, GET_BLOCK_DIRECT_IO_R);
81572 +static int ntfs_get_block_direct_IO_W(struct inode *inode, sector_t iblock,
81573 +                                     struct buffer_head *bh_result, int create)
81575 +       return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
81576 +                                 bh_result, create, GET_BLOCK_DIRECT_IO_W);
81579 +static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
81581 +       struct file *file = iocb->ki_filp;
81582 +       struct address_space *mapping = file->f_mapping;
81583 +       struct inode *inode = mapping->host;
81584 +       struct ntfs_inode *ni = ntfs_i(inode);
81585 +       size_t count = iov_iter_count(iter);
81586 +       loff_t vbo = iocb->ki_pos;
81587 +       loff_t end = vbo + count;
81588 +       int wr = iov_iter_rw(iter) & WRITE;
81589 +       const struct iovec *iov = iter->iov;
81590 +       unsigned long nr_segs = iter->nr_segs;
81591 +       loff_t valid;
81592 +       ssize_t ret;
81594 +       if (is_resident(ni)) {
81595 +               /*switch to buffered write*/
81596 +               ret = 0;
81597 +               goto out;
81598 +       }
81600 +       ret = blockdev_direct_IO(iocb, inode, iter,
81601 +                                wr ? ntfs_get_block_direct_IO_W
81602 +                                   : ntfs_get_block_direct_IO_R);
81603 +       valid = ni->i_valid;
81604 +       if (wr) {
81605 +               if (ret <= 0)
81606 +                       goto out;
81608 +               vbo += ret;
81609 +               if (vbo > valid && !S_ISBLK(inode->i_mode)) {
81610 +                       ni->i_valid = vbo;
81611 +                       mark_inode_dirty(inode);
81612 +               }
81613 +       } else if (vbo < valid && valid < end) {
81614 +               /* fix page */
81615 +               unsigned long uaddr = ~0ul;
81616 +               struct page *page;
81617 +               long i, npages;
81618 +               size_t dvbo = valid - vbo;
81619 +               size_t off = 0;
81621 +               /*Find user address*/
81622 +               for (i = 0; i < nr_segs; i++) {
81623 +                       if (off <= dvbo && dvbo < off + iov[i].iov_len) {
81624 +                               uaddr = (unsigned long)iov[i].iov_base + dvbo -
81625 +                                       off;
81626 +                               break;
81627 +                       }
81628 +                       off += iov[i].iov_len;
81629 +               }
81631 +               if (uaddr == ~0ul)
81632 +                       goto fix_error;
81634 +               npages = get_user_pages_unlocked(uaddr, 1, &page, FOLL_WRITE);
81636 +               if (npages <= 0)
81637 +                       goto fix_error;
81639 +               zero_user_segment(page, valid & (PAGE_SIZE - 1), PAGE_SIZE);
81640 +               put_page(page);
81641 +       }
81643 +out:
81644 +       return ret;
81645 +fix_error:
81646 +       ntfs_inode_warn(inode, "file garbage at 0x%llx", valid);
81647 +       goto out;
81650 +int ntfs_set_size(struct inode *inode, u64 new_size)
81652 +       struct super_block *sb = inode->i_sb;
81653 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
81654 +       struct ntfs_inode *ni = ntfs_i(inode);
81655 +       int err;
81657 +       /* Check for maximum file size */
81658 +       if (is_sparsed(ni) || is_compressed(ni)) {
81659 +               if (new_size > sbi->maxbytes_sparse) {
81660 +                       err = -EFBIG;
81661 +                       goto out;
81662 +               }
81663 +       } else if (new_size > sbi->maxbytes) {
81664 +               err = -EFBIG;
81665 +               goto out;
81666 +       }
81668 +       ni_lock(ni);
81669 +       down_write(&ni->file.run_lock);
81671 +       err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
81672 +                           &ni->i_valid, true, NULL);
81674 +       up_write(&ni->file.run_lock);
81675 +       ni_unlock(ni);
81677 +       mark_inode_dirty(inode);
81679 +out:
81680 +       return err;
81683 +static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
81685 +       struct address_space *mapping = page->mapping;
81686 +       struct inode *inode = mapping->host;
81687 +       struct ntfs_inode *ni = ntfs_i(inode);
81688 +       int err;
81690 +       if (is_resident(ni)) {
81691 +               ni_lock(ni);
81692 +               err = attr_data_write_resident(ni, page);
81693 +               ni_unlock(ni);
81694 +               if (err != E_NTFS_NONRESIDENT) {
81695 +                       unlock_page(page);
81696 +                       return err;
81697 +               }
81698 +       }
81700 +       return block_write_full_page(page, ntfs_get_block, wbc);
81703 +static int ntfs_writepages(struct address_space *mapping,
81704 +                          struct writeback_control *wbc)
81706 +       struct inode *inode = mapping->host;
81707 +       struct ntfs_inode *ni = ntfs_i(inode);
81708 +       /* redirect call to 'ntfs_writepage' for resident files*/
81709 +       get_block_t *get_block = is_resident(ni) ? NULL : &ntfs_get_block;
81711 +       return mpage_writepages(mapping, wbc, get_block);
81714 +static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
81715 +                                     struct buffer_head *bh_result, int create)
81717 +       return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
81718 +                                 bh_result, create, GET_BLOCK_WRITE_BEGIN);
81721 +static int ntfs_write_begin(struct file *file, struct address_space *mapping,
81722 +                           loff_t pos, u32 len, u32 flags, struct page **pagep,
81723 +                           void **fsdata)
81725 +       int err;
81726 +       struct inode *inode = mapping->host;
81727 +       struct ntfs_inode *ni = ntfs_i(inode);
81729 +       *pagep = NULL;
81730 +       if (is_resident(ni)) {
81731 +               struct page *page = grab_cache_page_write_begin(
81732 +                       mapping, pos >> PAGE_SHIFT, flags);
81734 +               if (!page) {
81735 +                       err = -ENOMEM;
81736 +                       goto out;
81737 +               }
81739 +               ni_lock(ni);
81740 +               err = attr_data_read_resident(ni, page);
81741 +               ni_unlock(ni);
81743 +               if (!err) {
81744 +                       *pagep = page;
81745 +                       goto out;
81746 +               }
81747 +               unlock_page(page);
81748 +               put_page(page);
81750 +               if (err != E_NTFS_NONRESIDENT)
81751 +                       goto out;
81752 +       }
81754 +       err = block_write_begin(mapping, pos, len, flags, pagep,
81755 +                               ntfs_get_block_write_begin);
81757 +out:
81758 +       return err;
81761 +/* address_space_operations::write_end */
81762 +static int ntfs_write_end(struct file *file, struct address_space *mapping,
81763 +                         loff_t pos, u32 len, u32 copied, struct page *page,
81764 +                         void *fsdata)
81767 +       struct inode *inode = mapping->host;
81768 +       struct ntfs_inode *ni = ntfs_i(inode);
81769 +       u64 valid = ni->i_valid;
81770 +       bool dirty = false;
81771 +       int err;
81773 +       if (is_resident(ni)) {
81774 +               ni_lock(ni);
81775 +               err = attr_data_write_resident(ni, page);
81776 +               ni_unlock(ni);
81777 +               if (!err) {
81778 +                       dirty = true;
81779 +                       /* clear any buffers in page*/
81780 +                       if (page_has_buffers(page)) {
81781 +                               struct buffer_head *head, *bh;
81783 +                               bh = head = page_buffers(page);
81784 +                               do {
81785 +                                       clear_buffer_dirty(bh);
81786 +                                       clear_buffer_mapped(bh);
81787 +                                       set_buffer_uptodate(bh);
81788 +                               } while (head != (bh = bh->b_this_page));
81789 +                       }
81790 +                       SetPageUptodate(page);
81791 +                       err = copied;
81792 +               }
81793 +               unlock_page(page);
81794 +               put_page(page);
81795 +       } else {
81796 +               err = generic_write_end(file, mapping, pos, len, copied, page,
81797 +                                       fsdata);
81798 +       }
81800 +       if (err >= 0) {
81801 +               if (!(ni->std_fa & FILE_ATTRIBUTE_ARCHIVE)) {
81802 +                       inode->i_ctime = inode->i_mtime = current_time(inode);
81803 +                       ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
81804 +                       dirty = true;
81805 +               }
81807 +               if (valid != ni->i_valid) {
81808 +                       /* ni->i_valid is changed in ntfs_get_block_vbo */
81809 +                       dirty = true;
81810 +               }
81812 +               if (dirty)
81813 +                       mark_inode_dirty(inode);
81814 +       }
81816 +       return err;
81819 +int reset_log_file(struct inode *inode)
81821 +       int err;
81822 +       loff_t pos = 0;
81823 +       u32 log_size = inode->i_size;
81824 +       struct address_space *mapping = inode->i_mapping;
81826 +       for (;;) {
81827 +               u32 len;
81828 +               void *kaddr;
81829 +               struct page *page;
81831 +               len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE;
81833 +               err = block_write_begin(mapping, pos, len, 0, &page,
81834 +                                       ntfs_get_block_write_begin);
81835 +               if (err)
81836 +                       goto out;
81838 +               kaddr = kmap_atomic(page);
81839 +               memset(kaddr, -1, len);
81840 +               kunmap_atomic(kaddr);
81841 +               flush_dcache_page(page);
81843 +               err = block_write_end(NULL, mapping, pos, len, len, page, NULL);
81844 +               if (err < 0)
81845 +                       goto out;
81846 +               pos += len;
81848 +               if (pos >= log_size)
81849 +                       break;
81850 +               balance_dirty_pages_ratelimited(mapping);
81851 +       }
81852 +out:
81853 +       mark_inode_dirty_sync(inode);
81855 +       return err;
81858 +int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc)
81860 +       return _ni_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
81863 +int ntfs_sync_inode(struct inode *inode)
81865 +       return _ni_write_inode(inode, 1);
81869 + * helper function for ntfs_flush_inodes.  This writes both the inode
81870 + * and the file data blocks, waiting for in flight data blocks before
81871 + * the start of the call.  It does not wait for any io started
81872 + * during the call
81873 + */
81874 +static int writeback_inode(struct inode *inode)
81876 +       int ret = sync_inode_metadata(inode, 0);
81878 +       if (!ret)
81879 +               ret = filemap_fdatawrite(inode->i_mapping);
81880 +       return ret;
81884 + * write data and metadata corresponding to i1 and i2.  The io is
81885 + * started but we do not wait for any of it to finish.
81886 + *
81887 + * filemap_flush is used for the block device, so if there is a dirty
81888 + * page for a block already in flight, we will not wait and start the
81889 + * io over again
81890 + */
81891 +int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
81892 +                     struct inode *i2)
81894 +       int ret = 0;
81896 +       if (i1)
81897 +               ret = writeback_inode(i1);
81898 +       if (!ret && i2)
81899 +               ret = writeback_inode(i2);
81900 +       if (!ret)
81901 +               ret = filemap_flush(sb->s_bdev->bd_inode->i_mapping);
81902 +       return ret;
81905 +int inode_write_data(struct inode *inode, const void *data, size_t bytes)
81907 +       pgoff_t idx;
81909 +       /* Write non resident data */
81910 +       for (idx = 0; bytes; idx++) {
81911 +               size_t op = bytes > PAGE_SIZE ? PAGE_SIZE : bytes;
81912 +               struct page *page = ntfs_map_page(inode->i_mapping, idx);
81914 +               if (IS_ERR(page))
81915 +                       return PTR_ERR(page);
81917 +               lock_page(page);
81918 +               WARN_ON(!PageUptodate(page));
81919 +               ClearPageUptodate(page);
81921 +               memcpy(page_address(page), data, op);
81923 +               flush_dcache_page(page);
81924 +               SetPageUptodate(page);
81925 +               unlock_page(page);
81927 +               ntfs_unmap_page(page);
81929 +               bytes -= op;
81930 +               data = Add2Ptr(data, PAGE_SIZE);
81931 +       }
81932 +       return 0;
81936 + * number of bytes to for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK)
81937 + * for unicode string of 'uni_len' length
81938 + */
81939 +static inline u32 ntfs_reparse_bytes(u32 uni_len)
81941 +       /* header + unicode string + decorated unicode string */
81942 +       return sizeof(short) * (2 * uni_len + 4) +
81943 +              offsetof(struct REPARSE_DATA_BUFFER,
81944 +                       SymbolicLinkReparseBuffer.PathBuffer);
81947 +static struct REPARSE_DATA_BUFFER *
81948 +ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
81949 +                          u32 size, u16 *nsize)
81951 +       int i, err;
81952 +       struct REPARSE_DATA_BUFFER *rp;
81953 +       __le16 *rp_name;
81954 +       typeof(rp->SymbolicLinkReparseBuffer) *rs;
81956 +       rp = ntfs_zalloc(ntfs_reparse_bytes(2 * size + 2));
81957 +       if (!rp)
81958 +               return ERR_PTR(-ENOMEM);
81960 +       rs = &rp->SymbolicLinkReparseBuffer;
81961 +       rp_name = rs->PathBuffer;
81963 +       /* Convert link name to utf16 */
81964 +       err = ntfs_nls_to_utf16(sbi, symname, size,
81965 +                               (struct cpu_str *)(rp_name - 1), 2 * size,
81966 +                               UTF16_LITTLE_ENDIAN);
81967 +       if (err < 0)
81968 +               goto out;
81970 +       /* err = the length of unicode name of symlink */
81971 +       *nsize = ntfs_reparse_bytes(err);
81973 +       if (*nsize > sbi->reparse.max_size) {
81974 +               err = -EFBIG;
81975 +               goto out;
81976 +       }
81978 +       /* translate linux '/' into windows '\' */
81979 +       for (i = 0; i < err; i++) {
81980 +               if (rp_name[i] == cpu_to_le16('/'))
81981 +                       rp_name[i] = cpu_to_le16('\\');
81982 +       }
81984 +       rp->ReparseTag = IO_REPARSE_TAG_SYMLINK;
81985 +       rp->ReparseDataLength =
81986 +               cpu_to_le16(*nsize - offsetof(struct REPARSE_DATA_BUFFER,
81987 +                                             SymbolicLinkReparseBuffer));
81989 +       /* PrintName + SubstituteName */
81990 +       rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err);
81991 +       rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8);
81992 +       rs->PrintNameLength = rs->SubstituteNameOffset;
81994 +       /*
81995 +        * TODO: use relative path if possible to allow windows to parse this path
81996 +        * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE)
81997 +        */
81998 +       rs->Flags = 0;
82000 +       memmove(rp_name + err + 4, rp_name, sizeof(short) * err);
82002 +       /* decorate SubstituteName */
82003 +       rp_name += err;
82004 +       rp_name[0] = cpu_to_le16('\\');
82005 +       rp_name[1] = cpu_to_le16('?');
82006 +       rp_name[2] = cpu_to_le16('?');
82007 +       rp_name[3] = cpu_to_le16('\\');
82009 +       return rp;
82010 +out:
82011 +       ntfs_free(rp);
82012 +       return ERR_PTR(err);
82015 +struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
82016 +                               struct inode *dir, struct dentry *dentry,
82017 +                               const struct cpu_str *uni, umode_t mode,
82018 +                               dev_t dev, const char *symname, u32 size,
82019 +                               int excl, struct ntfs_fnd *fnd)
82021 +       int err;
82022 +       struct super_block *sb = dir->i_sb;
82023 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
82024 +       const struct qstr *name = &dentry->d_name;
82025 +       CLST ino = 0;
82026 +       struct ntfs_inode *dir_ni = ntfs_i(dir);
82027 +       struct ntfs_inode *ni = NULL;
82028 +       struct inode *inode = NULL;
82029 +       struct ATTRIB *attr;
82030 +       struct ATTR_STD_INFO5 *std5;
82031 +       struct ATTR_FILE_NAME *fname;
82032 +       struct MFT_REC *rec;
82033 +       u32 asize, dsize, sd_size;
82034 +       enum FILE_ATTRIBUTE fa;
82035 +       __le32 security_id = SECURITY_ID_INVALID;
82036 +       CLST vcn;
82037 +       const void *sd;
82038 +       u16 t16, nsize = 0, aid = 0;
82039 +       struct INDEX_ROOT *root, *dir_root;
82040 +       struct NTFS_DE *e, *new_de = NULL;
82041 +       struct REPARSE_DATA_BUFFER *rp = NULL;
82042 +       bool is_dir = S_ISDIR(mode);
82043 +       bool is_link = S_ISLNK(mode);
82044 +       bool rp_inserted = false;
82045 +       bool is_sp = S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) ||
82046 +                    S_ISSOCK(mode);
82048 +       if (is_sp)
82049 +               return ERR_PTR(-EOPNOTSUPP);
82051 +       dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
82052 +       if (!dir_root)
82053 +               return ERR_PTR(-EINVAL);
82055 +       if (is_dir) {
82056 +               /* use parent's directory attributes */
82057 +               fa = dir_ni->std_fa | FILE_ATTRIBUTE_DIRECTORY |
82058 +                    FILE_ATTRIBUTE_ARCHIVE;
82059 +               /*
82060 +                * By default child directory inherits parent attributes
82061 +                * root directory is hidden + system
82062 +                * Make an exception for children in root
82063 +                */
82064 +               if (dir->i_ino == MFT_REC_ROOT)
82065 +                       fa &= ~(FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM);
82066 +       } else if (is_link) {
82067 +               /* It is good idea that link should be the same type (file/dir) as target */
82068 +               fa = FILE_ATTRIBUTE_REPARSE_POINT;
82070 +               /*
82071 +                * linux: there are dir/file/symlink and so on
82072 +                * NTFS: symlinks are "dir + reparse" or "file + reparse"
82073 +                * It is good idea to create:
82074 +                * dir + reparse if 'symname' points to directory
82075 +                * or
82076 +                * file + reparse if 'symname' points to file
82077 +                * Unfortunately kern_path hangs if symname contains 'dir'
82078 +                */
82080 +               /*
82081 +                *      struct path path;
82082 +                *
82083 +                *      if (!kern_path(symname, LOOKUP_FOLLOW, &path)){
82084 +                *              struct inode *target = d_inode(path.dentry);
82085 +                *
82086 +                *              if (S_ISDIR(target->i_mode))
82087 +                *                      fa |= FILE_ATTRIBUTE_DIRECTORY;
82088 +                *              // if ( target->i_sb == sb ){
82089 +                *              //      use relative path?
82090 +                *              // }
82091 +                *              path_put(&path);
82092 +                *      }
82093 +                */
82094 +       } else if (sbi->options.sparse) {
82095 +               /* sparsed regular file, cause option 'sparse' */
82096 +               fa = FILE_ATTRIBUTE_SPARSE_FILE | FILE_ATTRIBUTE_ARCHIVE;
82097 +       } else if (dir_ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) {
82098 +               /* compressed regular file, if parent is compressed */
82099 +               fa = FILE_ATTRIBUTE_COMPRESSED | FILE_ATTRIBUTE_ARCHIVE;
82100 +       } else {
82101 +               /* regular file, default attributes */
82102 +               fa = FILE_ATTRIBUTE_ARCHIVE;
82103 +       }
82105 +       if (!(mode & 0222))
82106 +               fa |= FILE_ATTRIBUTE_READONLY;
82108 +       /* allocate PATH_MAX bytes */
82109 +       new_de = __getname();
82110 +       if (!new_de) {
82111 +               err = -ENOMEM;
82112 +               goto out1;
82113 +       }
82115 +       /*mark rw ntfs as dirty. it will be cleared at umount*/
82116 +       ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
82118 +       /* Step 1: allocate and fill new mft record */
82119 +       err = ntfs_look_free_mft(sbi, &ino, false, NULL, NULL);
82120 +       if (err)
82121 +               goto out2;
82123 +       ni = ntfs_new_inode(sbi, ino, fa & FILE_ATTRIBUTE_DIRECTORY);
82124 +       if (IS_ERR(ni)) {
82125 +               err = PTR_ERR(ni);
82126 +               ni = NULL;
82127 +               goto out3;
82128 +       }
82129 +       inode = &ni->vfs_inode;
82131 +       inode->i_atime = inode->i_mtime = inode->i_ctime = ni->i_crtime =
82132 +               current_time(inode);
82134 +       rec = ni->mi.mrec;
82135 +       rec->hard_links = cpu_to_le16(1);
82136 +       attr = Add2Ptr(rec, le16_to_cpu(rec->attr_off));
82138 +       /* Get default security id */
82139 +       sd = s_default_security;
82140 +       sd_size = sizeof(s_default_security);
82142 +       if (is_ntfs3(sbi)) {
82143 +               security_id = dir_ni->std_security_id;
82144 +               if (le32_to_cpu(security_id) < SECURITY_ID_FIRST) {
82145 +                       security_id = sbi->security.def_security_id;
82147 +                       if (security_id == SECURITY_ID_INVALID &&
82148 +                           !ntfs_insert_security(sbi, sd, sd_size,
82149 +                                                 &security_id, NULL))
82150 +                               sbi->security.def_security_id = security_id;
82151 +               }
82152 +       }
82154 +       /* Insert standard info */
82155 +       std5 = Add2Ptr(attr, SIZEOF_RESIDENT);
82157 +       if (security_id == SECURITY_ID_INVALID) {
82158 +               dsize = sizeof(struct ATTR_STD_INFO);
82159 +       } else {
82160 +               dsize = sizeof(struct ATTR_STD_INFO5);
82161 +               std5->security_id = security_id;
82162 +               ni->std_security_id = security_id;
82163 +       }
82164 +       asize = SIZEOF_RESIDENT + dsize;
82166 +       attr->type = ATTR_STD;
82167 +       attr->size = cpu_to_le32(asize);
82168 +       attr->id = cpu_to_le16(aid++);
82169 +       attr->res.data_off = SIZEOF_RESIDENT_LE;
82170 +       attr->res.data_size = cpu_to_le32(dsize);
82172 +       std5->cr_time = std5->m_time = std5->c_time = std5->a_time =
82173 +               kernel2nt(&inode->i_atime);
82175 +       ni->std_fa = fa;
82176 +       std5->fa = fa;
82178 +       attr = Add2Ptr(attr, asize);
82180 +       /* Insert file name */
82181 +       err = fill_name_de(sbi, new_de, name, uni);
82182 +       if (err)
82183 +               goto out4;
82185 +       mi_get_ref(&ni->mi, &new_de->ref);
82187 +       fname = (struct ATTR_FILE_NAME *)(new_de + 1);
82188 +       mi_get_ref(&dir_ni->mi, &fname->home);
82189 +       fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time =
82190 +               fname->dup.a_time = std5->cr_time;
82191 +       fname->dup.alloc_size = fname->dup.data_size = 0;
82192 +       fname->dup.fa = std5->fa;
82193 +       fname->dup.ea_size = fname->dup.reparse = 0;
82195 +       dsize = le16_to_cpu(new_de->key_size);
82196 +       asize = QuadAlign(SIZEOF_RESIDENT + dsize);
82198 +       attr->type = ATTR_NAME;
82199 +       attr->size = cpu_to_le32(asize);
82200 +       attr->res.data_off = SIZEOF_RESIDENT_LE;
82201 +       attr->res.flags = RESIDENT_FLAG_INDEXED;
82202 +       attr->id = cpu_to_le16(aid++);
82203 +       attr->res.data_size = cpu_to_le32(dsize);
82204 +       memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), fname, dsize);
82206 +       attr = Add2Ptr(attr, asize);
82208 +       if (security_id == SECURITY_ID_INVALID) {
82209 +               /* Insert security attribute */
82210 +               asize = SIZEOF_RESIDENT + QuadAlign(sd_size);
82212 +               attr->type = ATTR_SECURE;
82213 +               attr->size = cpu_to_le32(asize);
82214 +               attr->id = cpu_to_le16(aid++);
82215 +               attr->res.data_off = SIZEOF_RESIDENT_LE;
82216 +               attr->res.data_size = cpu_to_le32(sd_size);
82217 +               memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), sd, sd_size);
82219 +               attr = Add2Ptr(attr, asize);
82220 +       }
82222 +       if (fa & FILE_ATTRIBUTE_DIRECTORY) {
82223 +               /*
82224 +                * regular directory or symlink to directory
82225 +                * Create root attribute
82226 +                */
82227 +               dsize = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE);
82228 +               asize = sizeof(I30_NAME) + SIZEOF_RESIDENT + dsize;
82230 +               attr->type = ATTR_ROOT;
82231 +               attr->size = cpu_to_le32(asize);
82232 +               attr->id = cpu_to_le16(aid++);
82234 +               attr->name_len = ARRAY_SIZE(I30_NAME);
82235 +               attr->name_off = SIZEOF_RESIDENT_LE;
82236 +               attr->res.data_off =
82237 +                       cpu_to_le16(sizeof(I30_NAME) + SIZEOF_RESIDENT);
82238 +               attr->res.data_size = cpu_to_le32(dsize);
82239 +               memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), I30_NAME,
82240 +                      sizeof(I30_NAME));
82242 +               root = Add2Ptr(attr, sizeof(I30_NAME) + SIZEOF_RESIDENT);
82243 +               memcpy(root, dir_root, offsetof(struct INDEX_ROOT, ihdr));
82244 +               root->ihdr.de_off =
82245 +                       cpu_to_le32(sizeof(struct INDEX_HDR)); // 0x10
82246 +               root->ihdr.used = cpu_to_le32(sizeof(struct INDEX_HDR) +
82247 +                                             sizeof(struct NTFS_DE));
82248 +               root->ihdr.total = root->ihdr.used;
82250 +               e = Add2Ptr(root, sizeof(struct INDEX_ROOT));
82251 +               e->size = cpu_to_le16(sizeof(struct NTFS_DE));
82252 +               e->flags = NTFS_IE_LAST;
82253 +       } else if (is_link) {
82254 +               /*
82255 +                * symlink to file
82256 +                * Create empty resident data attribute
82257 +                */
82258 +               asize = SIZEOF_RESIDENT;
82260 +               /* insert empty ATTR_DATA */
82261 +               attr->type = ATTR_DATA;
82262 +               attr->size = cpu_to_le32(SIZEOF_RESIDENT);
82263 +               attr->id = cpu_to_le16(aid++);
82264 +               attr->name_off = SIZEOF_RESIDENT_LE;
82265 +               attr->res.data_off = SIZEOF_RESIDENT_LE;
82266 +       } else {
82267 +               /*
82268 +                * regular file
82269 +                */
82270 +               attr->type = ATTR_DATA;
82271 +               attr->id = cpu_to_le16(aid++);
82272 +               /* Create empty non resident data attribute */
82273 +               attr->non_res = 1;
82274 +               attr->nres.evcn = cpu_to_le64(-1ll);
82275 +               if (fa & FILE_ATTRIBUTE_SPARSE_FILE) {
82276 +                       attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
82277 +                       attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
82278 +                       attr->flags = ATTR_FLAG_SPARSED;
82279 +                       asize = SIZEOF_NONRESIDENT_EX + 8;
82280 +               } else if (fa & FILE_ATTRIBUTE_COMPRESSED) {
82281 +                       attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
82282 +                       attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
82283 +                       attr->flags = ATTR_FLAG_COMPRESSED;
82284 +                       attr->nres.c_unit = COMPRESSION_UNIT;
82285 +                       asize = SIZEOF_NONRESIDENT_EX + 8;
82286 +               } else {
82287 +                       attr->size = cpu_to_le32(SIZEOF_NONRESIDENT + 8);
82288 +                       attr->name_off = SIZEOF_NONRESIDENT_LE;
82289 +                       asize = SIZEOF_NONRESIDENT + 8;
82290 +               }
82291 +               attr->nres.run_off = attr->name_off;
82292 +       }
82294 +       if (is_dir) {
82295 +               ni->ni_flags |= NI_FLAG_DIR;
82296 +               err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
82297 +               if (err)
82298 +                       goto out4;
82299 +       } else if (is_link) {
82300 +               rp = ntfs_create_reparse_buffer(sbi, symname, size, &nsize);
82302 +               if (IS_ERR(rp)) {
82303 +                       err = PTR_ERR(rp);
82304 +                       rp = NULL;
82305 +                       goto out4;
82306 +               }
82308 +               /*
82309 +                * Insert ATTR_REPARSE
82310 +                */
82311 +               attr = Add2Ptr(attr, asize);
82312 +               attr->type = ATTR_REPARSE;
82313 +               attr->id = cpu_to_le16(aid++);
82315 +               /* resident or non resident? */
82316 +               asize = QuadAlign(SIZEOF_RESIDENT + nsize);
82317 +               t16 = PtrOffset(rec, attr);
82319 +               if (asize + t16 + 8 > sbi->record_size) {
82320 +                       CLST alen;
82321 +                       CLST clst = bytes_to_cluster(sbi, nsize);
82323 +                       /* bytes per runs */
82324 +                       t16 = sbi->record_size - t16 - SIZEOF_NONRESIDENT;
82326 +                       attr->non_res = 1;
82327 +                       attr->nres.evcn = cpu_to_le64(clst - 1);
82328 +                       attr->name_off = SIZEOF_NONRESIDENT_LE;
82329 +                       attr->nres.run_off = attr->name_off;
82330 +                       attr->nres.data_size = cpu_to_le64(nsize);
82331 +                       attr->nres.valid_size = attr->nres.data_size;
82332 +                       attr->nres.alloc_size =
82333 +                               cpu_to_le64(ntfs_up_cluster(sbi, nsize));
82335 +                       err = attr_allocate_clusters(sbi, &ni->file.run, 0, 0,
82336 +                                                    clst, NULL, 0, &alen, 0,
82337 +                                                    NULL);
82338 +                       if (err)
82339 +                               goto out5;
82341 +                       err = run_pack(&ni->file.run, 0, clst,
82342 +                                      Add2Ptr(attr, SIZEOF_NONRESIDENT), t16,
82343 +                                      &vcn);
82344 +                       if (err < 0)
82345 +                               goto out5;
82347 +                       if (vcn != clst) {
82348 +                               err = -EINVAL;
82349 +                               goto out5;
82350 +                       }
82352 +                       asize = SIZEOF_NONRESIDENT + QuadAlign(err);
82353 +                       inode->i_size = nsize;
82354 +               } else {
82355 +                       attr->res.data_off = SIZEOF_RESIDENT_LE;
82356 +                       attr->res.data_size = cpu_to_le32(nsize);
82357 +                       memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), rp, nsize);
82358 +                       inode->i_size = nsize;
82359 +                       nsize = 0;
82360 +               }
82362 +               attr->size = cpu_to_le32(asize);
82364 +               err = ntfs_insert_reparse(sbi, IO_REPARSE_TAG_SYMLINK,
82365 +                                         &new_de->ref);
82366 +               if (err)
82367 +                       goto out5;
82369 +               rp_inserted = true;
82370 +       }
82372 +       attr = Add2Ptr(attr, asize);
82373 +       attr->type = ATTR_END;
82375 +       rec->used = cpu_to_le32(PtrOffset(rec, attr) + 8);
82376 +       rec->next_attr_id = cpu_to_le16(aid);
82378 +       /* Step 2: Add new name in index */
82379 +       err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, fnd);
82380 +       if (err)
82381 +               goto out6;
82383 +       /* Update current directory record */
82384 +       mark_inode_dirty(dir);
82386 +       /* Fill vfs inode fields */
82387 +       inode->i_uid = sbi->options.uid ? sbi->options.fs_uid : current_fsuid();
82388 +       inode->i_gid = sbi->options.gid          ? sbi->options.fs_gid
82389 +                      : (dir->i_mode & S_ISGID) ? dir->i_gid
82390 +                                                : current_fsgid();
82391 +       inode->i_generation = le16_to_cpu(rec->seq);
82393 +       dir->i_mtime = dir->i_ctime = inode->i_atime;
82395 +       if (is_dir) {
82396 +               if (dir->i_mode & S_ISGID)
82397 +                       mode |= S_ISGID;
82398 +               inode->i_op = &ntfs_dir_inode_operations;
82399 +               inode->i_fop = &ntfs_dir_operations;
82400 +       } else if (is_link) {
82401 +               inode->i_op = &ntfs_link_inode_operations;
82402 +               inode->i_fop = NULL;
82403 +               inode->i_mapping->a_ops = &ntfs_aops;
82404 +       } else {
82405 +               inode->i_op = &ntfs_file_inode_operations;
82406 +               inode->i_fop = &ntfs_file_operations;
82407 +               inode->i_mapping->a_ops =
82408 +                       is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
82409 +               init_rwsem(&ni->file.run_lock);
82410 +       }
82412 +       inode->i_mode = mode;
82414 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
82415 +       if (!is_link && (sb->s_flags & SB_POSIXACL)) {
82416 +               err = ntfs_init_acl(mnt_userns, inode, dir);
82417 +               if (err)
82418 +                       goto out6;
82419 +       } else
82420 +#endif
82421 +       {
82422 +               inode->i_flags |= S_NOSEC;
82423 +       }
82425 +       /* Write non resident data */
82426 +       if (nsize) {
82427 +               err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize);
82428 +               if (err)
82429 +                       goto out7;
82430 +       }
82432 +       /* call 'd_instantiate' after inode->i_op is set but before finish_open */
82433 +       d_instantiate(dentry, inode);
82435 +       mark_inode_dirty(inode);
82436 +       mark_inode_dirty(dir);
82438 +       /* normal exit */
82439 +       goto out2;
82441 +out7:
82443 +       /* undo 'indx_insert_entry' */
82444 +       indx_delete_entry(&dir_ni->dir, dir_ni, new_de + 1,
82445 +                         le16_to_cpu(new_de->key_size), sbi);
82446 +out6:
82447 +       if (rp_inserted)
82448 +               ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
82450 +out5:
82451 +       if (is_dir || run_is_empty(&ni->file.run))
82452 +               goto out4;
82454 +       run_deallocate(sbi, &ni->file.run, false);
82456 +out4:
82457 +       clear_rec_inuse(rec);
82458 +       clear_nlink(inode);
82459 +       ni->mi.dirty = false;
82460 +       discard_new_inode(inode);
82461 +out3:
82462 +       ntfs_mark_rec_free(sbi, ino);
82464 +out2:
82465 +       __putname(new_de);
82466 +       ntfs_free(rp);
82468 +out1:
82469 +       if (err)
82470 +               return ERR_PTR(err);
82472 +       unlock_new_inode(inode);
82474 +       return inode;
82477 +int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
82479 +       int err;
82480 +       struct inode *dir = d_inode(dentry->d_parent);
82481 +       struct ntfs_inode *dir_ni = ntfs_i(dir);
82482 +       struct ntfs_inode *ni = ntfs_i(inode);
82483 +       struct super_block *sb = inode->i_sb;
82484 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
82485 +       const struct qstr *name = &dentry->d_name;
82486 +       struct NTFS_DE *new_de = NULL;
82487 +       struct ATTR_FILE_NAME *fname;
82488 +       struct ATTRIB *attr;
82489 +       u16 key_size;
82490 +       struct INDEX_ROOT *dir_root;
82492 +       dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
82493 +       if (!dir_root)
82494 +               return -EINVAL;
82496 +       /* allocate PATH_MAX bytes */
82497 +       new_de = __getname();
82498 +       if (!new_de)
82499 +               return -ENOMEM;
82501 +       /*mark rw ntfs as dirty. it will be cleared at umount*/
82502 +       ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
82504 +       // Insert file name
82505 +       err = fill_name_de(sbi, new_de, name, NULL);
82506 +       if (err)
82507 +               goto out;
82509 +       key_size = le16_to_cpu(new_de->key_size);
82510 +       err = ni_insert_resident(ni, key_size, ATTR_NAME, NULL, 0, &attr, NULL);
82511 +       if (err)
82512 +               goto out;
82514 +       mi_get_ref(&ni->mi, &new_de->ref);
82516 +       fname = (struct ATTR_FILE_NAME *)(new_de + 1);
82517 +       mi_get_ref(&dir_ni->mi, &fname->home);
82518 +       fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time =
82519 +               fname->dup.a_time = kernel2nt(&inode->i_ctime);
82520 +       fname->dup.alloc_size = fname->dup.data_size = 0;
82521 +       fname->dup.fa = ni->std_fa;
82522 +       fname->dup.ea_size = fname->dup.reparse = 0;
82524 +       memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), fname, key_size);
82526 +       err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, NULL);
82527 +       if (err)
82528 +               goto out;
82530 +       le16_add_cpu(&ni->mi.mrec->hard_links, 1);
82531 +       ni->mi.dirty = true;
82533 +out:
82534 +       __putname(new_de);
82535 +       return err;
82539 + * ntfs_unlink_inode
82540 + *
82541 + * inode_operations::unlink
82542 + * inode_operations::rmdir
82543 + */
82544 +int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
82546 +       int err;
82547 +       struct super_block *sb = dir->i_sb;
82548 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
82549 +       struct inode *inode = d_inode(dentry);
82550 +       struct ntfs_inode *ni = ntfs_i(inode);
82551 +       const struct qstr *name = &dentry->d_name;
82552 +       struct ntfs_inode *dir_ni = ntfs_i(dir);
82553 +       struct ntfs_index *indx = &dir_ni->dir;
82554 +       struct cpu_str *uni = NULL;
82555 +       struct ATTR_FILE_NAME *fname;
82556 +       u8 name_type;
82557 +       struct ATTR_LIST_ENTRY *le;
82558 +       struct MFT_REF ref;
82559 +       bool is_dir = S_ISDIR(inode->i_mode);
82560 +       struct INDEX_ROOT *dir_root;
82562 +       dir_root = indx_get_root(indx, dir_ni, NULL, NULL);
82563 +       if (!dir_root)
82564 +               return -EINVAL;
82566 +       ni_lock(ni);
82568 +       if (is_dir && !dir_is_empty(inode)) {
82569 +               err = -ENOTEMPTY;
82570 +               goto out1;
82571 +       }
82573 +       if (ntfs_is_meta_file(sbi, inode->i_ino)) {
82574 +               err = -EINVAL;
82575 +               goto out1;
82576 +       }
82578 +       /* allocate PATH_MAX bytes */
82579 +       uni = __getname();
82580 +       if (!uni) {
82581 +               err = -ENOMEM;
82582 +               goto out1;
82583 +       }
82585 +       /* Convert input string to unicode */
82586 +       err = ntfs_nls_to_utf16(sbi, name->name, name->len, uni, NTFS_NAME_LEN,
82587 +                               UTF16_HOST_ENDIAN);
82588 +       if (err < 0)
82589 +               goto out2;
82591 +       /*mark rw ntfs as dirty. it will be cleared at umount*/
82592 +       ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
82594 +       /* find name in record */
82595 +       mi_get_ref(&dir_ni->mi, &ref);
82597 +       le = NULL;
82598 +       fname = ni_fname_name(ni, uni, &ref, &le);
82599 +       if (!fname) {
82600 +               err = -ENOENT;
82601 +               goto out3;
82602 +       }
82604 +       name_type = paired_name(fname->type);
82606 +       err = indx_delete_entry(indx, dir_ni, fname, fname_full_size(fname),
82607 +                               sbi);
82608 +       if (err)
82609 +               goto out3;
82611 +       /* Then remove name from mft */
82612 +       ni_remove_attr_le(ni, attr_from_name(fname), le);
82614 +       le16_add_cpu(&ni->mi.mrec->hard_links, -1);
82615 +       ni->mi.dirty = true;
82617 +       if (name_type != FILE_NAME_POSIX) {
82618 +               /* Now we should delete name by type */
82619 +               fname = ni_fname_type(ni, name_type, &le);
82620 +               if (fname) {
82621 +                       err = indx_delete_entry(indx, dir_ni, fname,
82622 +                                               fname_full_size(fname), sbi);
82623 +                       if (err)
82624 +                               goto out3;
82626 +                       ni_remove_attr_le(ni, attr_from_name(fname), le);
82628 +                       le16_add_cpu(&ni->mi.mrec->hard_links, -1);
82629 +               }
82630 +       }
82631 +out3:
82632 +       switch (err) {
82633 +       case 0:
82634 +               drop_nlink(inode);
82635 +       case -ENOTEMPTY:
82636 +       case -ENOSPC:
82637 +       case -EROFS:
82638 +               break;
82639 +       default:
82640 +               make_bad_inode(inode);
82641 +       }
82643 +       dir->i_mtime = dir->i_ctime = current_time(dir);
82644 +       mark_inode_dirty(dir);
82645 +       inode->i_ctime = dir->i_ctime;
82646 +       if (inode->i_nlink)
82647 +               mark_inode_dirty(inode);
82649 +out2:
82650 +       __putname(uni);
82651 +out1:
82652 +       ni_unlock(ni);
82653 +       return err;
82656 +void ntfs_evict_inode(struct inode *inode)
82658 +       truncate_inode_pages_final(&inode->i_data);
82660 +       if (inode->i_nlink)
82661 +               _ni_write_inode(inode, inode_needs_sync(inode));
82663 +       invalidate_inode_buffers(inode);
82664 +       clear_inode(inode);
82666 +       ni_clear(ntfs_i(inode));
82669 +static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
82670 +                                     int buflen)
82672 +       int i, err = 0;
82673 +       struct ntfs_inode *ni = ntfs_i(inode);
82674 +       struct super_block *sb = inode->i_sb;
82675 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
82676 +       u64 i_size = inode->i_size;
82677 +       u16 nlen = 0;
82678 +       void *to_free = NULL;
82679 +       struct REPARSE_DATA_BUFFER *rp;
82680 +       struct le_str *uni;
82681 +       struct ATTRIB *attr;
82683 +       /* Reparse data present. Try to parse it */
82684 +       static_assert(!offsetof(struct REPARSE_DATA_BUFFER, ReparseTag));
82685 +       static_assert(sizeof(u32) == sizeof(rp->ReparseTag));
82687 +       *buffer = 0;
82689 +       /* Read into temporal buffer */
82690 +       if (i_size > sbi->reparse.max_size || i_size <= sizeof(u32)) {
82691 +               err = -EINVAL;
82692 +               goto out;
82693 +       }
82695 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL, NULL);
82696 +       if (!attr) {
82697 +               err = -EINVAL;
82698 +               goto out;
82699 +       }
82701 +       if (!attr->non_res) {
82702 +               rp = resident_data_ex(attr, i_size);
82703 +               if (!rp) {
82704 +                       err = -EINVAL;
82705 +                       goto out;
82706 +               }
82707 +       } else {
82708 +               rp = ntfs_malloc(i_size);
82709 +               if (!rp) {
82710 +                       err = -ENOMEM;
82711 +                       goto out;
82712 +               }
82713 +               to_free = rp;
82714 +               err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, i_size, NULL);
82715 +               if (err)
82716 +                       goto out;
82717 +       }
82719 +       err = -EINVAL;
82721 +       /* Microsoft Tag */
82722 +       switch (rp->ReparseTag) {
82723 +       case IO_REPARSE_TAG_MOUNT_POINT:
82724 +               /* Mount points and junctions */
82725 +               /* Can we use 'Rp->MountPointReparseBuffer.PrintNameLength'? */
82726 +               if (i_size <= offsetof(struct REPARSE_DATA_BUFFER,
82727 +                                      MountPointReparseBuffer.PathBuffer))
82728 +                       goto out;
82729 +               uni = Add2Ptr(rp,
82730 +                             offsetof(struct REPARSE_DATA_BUFFER,
82731 +                                      MountPointReparseBuffer.PathBuffer) +
82732 +                                     le16_to_cpu(rp->MountPointReparseBuffer
82733 +                                                         .PrintNameOffset) -
82734 +                                     2);
82735 +               nlen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength);
82736 +               break;
82738 +       case IO_REPARSE_TAG_SYMLINK:
82739 +               /* FolderSymbolicLink */
82740 +               /* Can we use 'Rp->SymbolicLinkReparseBuffer.PrintNameLength'? */
82741 +               if (i_size <= offsetof(struct REPARSE_DATA_BUFFER,
82742 +                                      SymbolicLinkReparseBuffer.PathBuffer))
82743 +                       goto out;
82744 +               uni = Add2Ptr(rp,
82745 +                             offsetof(struct REPARSE_DATA_BUFFER,
82746 +                                      SymbolicLinkReparseBuffer.PathBuffer) +
82747 +                                     le16_to_cpu(rp->SymbolicLinkReparseBuffer
82748 +                                                         .PrintNameOffset) -
82749 +                                     2);
82750 +               nlen = le16_to_cpu(
82751 +                       rp->SymbolicLinkReparseBuffer.PrintNameLength);
82752 +               break;
82754 +       case IO_REPARSE_TAG_CLOUD:
82755 +       case IO_REPARSE_TAG_CLOUD_1:
82756 +       case IO_REPARSE_TAG_CLOUD_2:
82757 +       case IO_REPARSE_TAG_CLOUD_3:
82758 +       case IO_REPARSE_TAG_CLOUD_4:
82759 +       case IO_REPARSE_TAG_CLOUD_5:
82760 +       case IO_REPARSE_TAG_CLOUD_6:
82761 +       case IO_REPARSE_TAG_CLOUD_7:
82762 +       case IO_REPARSE_TAG_CLOUD_8:
82763 +       case IO_REPARSE_TAG_CLOUD_9:
82764 +       case IO_REPARSE_TAG_CLOUD_A:
82765 +       case IO_REPARSE_TAG_CLOUD_B:
82766 +       case IO_REPARSE_TAG_CLOUD_C:
82767 +       case IO_REPARSE_TAG_CLOUD_D:
82768 +       case IO_REPARSE_TAG_CLOUD_E:
82769 +       case IO_REPARSE_TAG_CLOUD_F:
82770 +               err = sizeof("OneDrive") - 1;
82771 +               if (err > buflen)
82772 +                       err = buflen;
82773 +               memcpy(buffer, "OneDrive", err);
82774 +               goto out;
82776 +       default:
82777 +               if (IsReparseTagMicrosoft(rp->ReparseTag)) {
82778 +                       /* unknown Microsoft Tag */
82779 +                       goto out;
82780 +               }
82781 +               if (!IsReparseTagNameSurrogate(rp->ReparseTag) ||
82782 +                   i_size <= sizeof(struct REPARSE_POINT)) {
82783 +                       goto out;
82784 +               }
82786 +               /* Users tag */
82787 +               uni = Add2Ptr(rp, sizeof(struct REPARSE_POINT) - 2);
82788 +               nlen = le16_to_cpu(rp->ReparseDataLength) -
82789 +                      sizeof(struct REPARSE_POINT);
82790 +       }
82792 +       /* Convert nlen from bytes to UNICODE chars */
82793 +       nlen >>= 1;
82795 +       /* Check that name is available */
82796 +       if (!nlen || &uni->name[nlen] > (__le16 *)Add2Ptr(rp, i_size))
82797 +               goto out;
82799 +       /* If name is already zero terminated then truncate it now */
82800 +       if (!uni->name[nlen - 1])
82801 +               nlen -= 1;
82802 +       uni->len = nlen;
82804 +       err = ntfs_utf16_to_nls(sbi, uni, buffer, buflen);
82806 +       if (err < 0)
82807 +               goto out;
82809 +       /* translate windows '\' into linux '/' */
82810 +       for (i = 0; i < err; i++) {
82811 +               if (buffer[i] == '\\')
82812 +                       buffer[i] = '/';
82813 +       }
82815 +       /* Always set last zero */
82816 +       buffer[err] = 0;
82817 +out:
82818 +       ntfs_free(to_free);
82819 +       return err;
82822 +static const char *ntfs_get_link(struct dentry *de, struct inode *inode,
82823 +                                struct delayed_call *done)
82825 +       int err;
82826 +       char *ret;
82828 +       if (!de)
82829 +               return ERR_PTR(-ECHILD);
82831 +       ret = kmalloc(PAGE_SIZE, GFP_NOFS);
82832 +       if (!ret)
82833 +               return ERR_PTR(-ENOMEM);
82835 +       err = ntfs_readlink_hlp(inode, ret, PAGE_SIZE);
82836 +       if (err < 0) {
82837 +               kfree(ret);
82838 +               return ERR_PTR(err);
82839 +       }
82841 +       set_delayed_call(done, kfree_link, ret);
82843 +       return ret;
82846 +const struct inode_operations ntfs_link_inode_operations = {
82847 +       .get_link = ntfs_get_link,
82848 +       .setattr = ntfs3_setattr,
82849 +       .listxattr = ntfs_listxattr,
82850 +       .permission = ntfs_permission,
82851 +       .get_acl = ntfs_get_acl,
82852 +       .set_acl = ntfs_set_acl,
82855 +const struct address_space_operations ntfs_aops = {
82856 +       .readpage = ntfs_readpage,
82857 +       .readahead = ntfs_readahead,
82858 +       .writepage = ntfs_writepage,
82859 +       .writepages = ntfs_writepages,
82860 +       .write_begin = ntfs_write_begin,
82861 +       .write_end = ntfs_write_end,
82862 +       .direct_IO = ntfs_direct_IO,
82863 +       .bmap = ntfs_bmap,
82866 +const struct address_space_operations ntfs_aops_cmpr = {
82867 +       .readpage = ntfs_readpage,
82868 +       .readahead = ntfs_readahead,
82870 diff --git a/fs/ntfs3/lib/decompress_common.c b/fs/ntfs3/lib/decompress_common.c
82871 new file mode 100644
82872 index 000000000000..83c9e93aea77
82873 --- /dev/null
82874 +++ b/fs/ntfs3/lib/decompress_common.c
82875 @@ -0,0 +1,332 @@
82876 +// SPDX-License-Identifier: GPL-2.0-or-later
82878 + * decompress_common.c - Code shared by the XPRESS and LZX decompressors
82879 + *
82880 + * Copyright (C) 2015 Eric Biggers
82881 + *
82882 + * This program is free software: you can redistribute it and/or modify it under
82883 + * the terms of the GNU General Public License as published by the Free Software
82884 + * Foundation, either version 2 of the License, or (at your option) any later
82885 + * version.
82886 + *
82887 + * This program is distributed in the hope that it will be useful, but WITHOUT
82888 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
82889 + * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
82890 + * details.
82891 + *
82892 + * You should have received a copy of the GNU General Public License along with
82893 + * this program.  If not, see <http://www.gnu.org/licenses/>.
82894 + */
82896 +#include "decompress_common.h"
82899 + * make_huffman_decode_table() -
82900 + *
82901 + * Build a decoding table for a canonical prefix code, or "Huffman code".
82902 + *
82903 + * This is an internal function, not part of the library API!
82904 + *
82905 + * This takes as input the length of the codeword for each symbol in the
82906 + * alphabet and produces as output a table that can be used for fast
82907 + * decoding of prefix-encoded symbols using read_huffsym().
82908 + *
82909 + * Strictly speaking, a canonical prefix code might not be a Huffman
82910 + * code.  But this algorithm will work either way; and in fact, since
82911 + * Huffman codes are defined in terms of symbol frequencies, there is no
82912 + * way for the decompressor to know whether the code is a true Huffman
82913 + * code or not until all symbols have been decoded.
82914 + *
82915 + * Because the prefix code is assumed to be "canonical", it can be
82916 + * reconstructed directly from the codeword lengths.  A prefix code is
82917 + * canonical if and only if a longer codeword never lexicographically
82918 + * precedes a shorter codeword, and the lexicographic ordering of
82919 + * codewords of the same length is the same as the lexicographic ordering
82920 + * of the corresponding symbols.  Consequently, we can sort the symbols
82921 + * primarily by codeword length and secondarily by symbol value, then
82922 + * reconstruct the prefix code by generating codewords lexicographically
82923 + * in that order.
82924 + *
82925 + * This function does not, however, generate the prefix code explicitly.
82926 + * Instead, it directly builds a table for decoding symbols using the
82927 + * code.  The basic idea is this: given the next 'max_codeword_len' bits
82928 + * in the input, we can look up the decoded symbol by indexing a table
82929 + * containing 2**max_codeword_len entries.  A codeword with length
82930 + * 'max_codeword_len' will have exactly one entry in this table, whereas
82931 + * a codeword shorter than 'max_codeword_len' will have multiple entries
82932 + * in this table.  Precisely, a codeword of length n will be represented
82933 + * by 2**(max_codeword_len - n) entries in this table.  The 0-based index
82934 + * of each such entry will contain the corresponding codeword as a prefix
82935 + * when zero-padded on the left to 'max_codeword_len' binary digits.
82936 + *
82937 + * That's the basic idea, but we implement two optimizations regarding
82938 + * the format of the decode table itself:
82939 + *
82940 + * - For many compression formats, the maximum codeword length is too
82941 + *   long for it to be efficient to build the full decoding table
82942 + *   whenever a new prefix code is used.  Instead, we can build the table
82943 + *   using only 2**table_bits entries, where 'table_bits' is some number
82944 + *   less than or equal to 'max_codeword_len'.  Then, only codewords of
82945 + *   length 'table_bits' and shorter can be directly looked up.  For
82946 + *   longer codewords, the direct lookup instead produces the root of a
82947 + *   binary tree.  Using this tree, the decoder can do traditional
82948 + *   bit-by-bit decoding of the remainder of the codeword.  Child nodes
82949 + *   are allocated in extra entries at the end of the table; leaf nodes
82950 + *   contain symbols.  Note that the long-codeword case is, in general,
82951 + *   not performance critical, since in Huffman codes the most frequently
82952 + *   used symbols are assigned the shortest codeword lengths.
82953 + *
82954 + * - When we decode a symbol using a direct lookup of the table, we still
82955 + *   need to know its length so that the bitstream can be advanced by the
82956 + *   appropriate number of bits.  The simple solution is to simply retain
82957 + *   the 'lens' array and use the decoded symbol as an index into it.
82958 + *   However, this requires two separate array accesses in the fast path.
82959 + *   The optimization is to store the length directly in the decode
82960 + *   table.  We use the bottom 11 bits for the symbol and the top 5 bits
82961 + *   for the length.  In addition, to combine this optimization with the
82962 + *   previous one, we introduce a special case where the top 2 bits of
82963 + *   the length are both set if the entry is actually the root of a
82964 + *   binary tree.
82965 + *
82966 + * @decode_table:
82967 + *     The array in which to create the decoding table.  This must have
82968 + *     a length of at least ((2**table_bits) + 2 * num_syms) entries.
82969 + *
82970 + * @num_syms:
82971 + *     The number of symbols in the alphabet; also, the length of the
82972 + *     'lens' array.  Must be less than or equal to 2048.
82973 + *
82974 + * @table_bits:
82975 + *     The order of the decode table size, as explained above.  Must be
82976 + *     less than or equal to 13.
82977 + *
82978 + * @lens:
82979 + *     An array of length @num_syms, indexable by symbol, that gives the
82980 + *     length of the codeword, in bits, for that symbol.  The length can
82981 + *     be 0, which means that the symbol does not have a codeword
82982 + *     assigned.
82983 + *
82984 + * @max_codeword_len:
82985 + *     The longest codeword length allowed in the compression format.
82986 + *     All entries in 'lens' must be less than or equal to this value.
82987 + *     This must be less than or equal to 23.
82988 + *
82989 + * @working_space
82990 + *     A temporary array of length '2 * (max_codeword_len + 1) +
82991 + *     num_syms'.
82992 + *
82993 + * Returns 0 on success, or -1 if the lengths do not form a valid prefix
82994 + * code.
82995 + */
82996 +int make_huffman_decode_table(u16 decode_table[], const u32 num_syms,
82997 +                             const u32 table_bits, const u8 lens[],
82998 +                             const u32 max_codeword_len,
82999 +                             u16 working_space[])
83001 +       const u32 table_num_entries = 1 << table_bits;
83002 +       u16 * const len_counts = &working_space[0];
83003 +       u16 * const offsets = &working_space[1 * (max_codeword_len + 1)];
83004 +       u16 * const sorted_syms = &working_space[2 * (max_codeword_len + 1)];
83005 +       int left;
83006 +       void *decode_table_ptr;
83007 +       u32 sym_idx;
83008 +       u32 codeword_len;
83009 +       u32 stores_per_loop;
83010 +       u32 decode_table_pos;
83011 +       u32 len;
83012 +       u32 sym;
83014 +       /* Count how many symbols have each possible codeword length.
83015 +        * Note that a length of 0 indicates the corresponding symbol is not
83016 +        * used in the code and therefore does not have a codeword.
83017 +        */
83018 +       for (len = 0; len <= max_codeword_len; len++)
83019 +               len_counts[len] = 0;
83020 +       for (sym = 0; sym < num_syms; sym++)
83021 +               len_counts[lens[sym]]++;
83023 +       /* We can assume all lengths are <= max_codeword_len, but we
83024 +        * cannot assume they form a valid prefix code.  A codeword of
83025 +        * length n should require a proportion of the codespace equaling
83026 +        * (1/2)^n.  The code is valid if and only if the codespace is
83027 +        * exactly filled by the lengths, by this measure.
83028 +        */
83029 +       left = 1;
83030 +       for (len = 1; len <= max_codeword_len; len++) {
83031 +               left <<= 1;
83032 +               left -= len_counts[len];
83033 +               if (left < 0) {
83034 +                       /* The lengths overflow the codespace; that is, the code
83035 +                        * is over-subscribed.
83036 +                        */
83037 +                       return -1;
83038 +               }
83039 +       }
83041 +       if (left) {
83042 +               /* The lengths do not fill the codespace; that is, they form an
83043 +                * incomplete set.
83044 +                */
83045 +               if (left == (1 << max_codeword_len)) {
83046 +                       /* The code is completely empty.  This is arguably
83047 +                        * invalid, but in fact it is valid in LZX and XPRESS,
83048 +                        * so we must allow it.  By definition, no symbols can
83049 +                        * be decoded with an empty code.  Consequently, we
83050 +                        * technically don't even need to fill in the decode
83051 +                        * table.  However, to avoid accessing uninitialized
83052 +                        * memory if the algorithm nevertheless attempts to
83053 +                        * decode symbols using such a code, we zero out the
83054 +                        * decode table.
83055 +                        */
83056 +                       memset(decode_table, 0,
83057 +                              table_num_entries * sizeof(decode_table[0]));
83058 +                       return 0;
83059 +               }
83060 +               return -1;
83061 +       }
83063 +       /* Sort the symbols primarily by length and secondarily by symbol order.
83064 +        */
83066 +       /* Initialize 'offsets' so that offsets[len] for 1 <= len <=
83067 +        * max_codeword_len is the number of codewords shorter than 'len' bits.
83068 +        */
83069 +       offsets[1] = 0;
83070 +       for (len = 1; len < max_codeword_len; len++)
83071 +               offsets[len + 1] = offsets[len] + len_counts[len];
83073 +       /* Use the 'offsets' array to sort the symbols.  Note that we do not
83074 +        * include symbols that are not used in the code.  Consequently, fewer
83075 +        * than 'num_syms' entries in 'sorted_syms' may be filled.
83076 +        */
83077 +       for (sym = 0; sym < num_syms; sym++)
83078 +               if (lens[sym])
83079 +                       sorted_syms[offsets[lens[sym]]++] = sym;
83081 +       /* Fill entries for codewords with length <= table_bits
83082 +        * --- that is, those short enough for a direct mapping.
83083 +        *
83084 +        * The table will start with entries for the shortest codeword(s), which
83085 +        * have the most entries.  From there, the number of entries per
83086 +        * codeword will decrease.
83087 +        */
83088 +       decode_table_ptr = decode_table;
83089 +       sym_idx = 0;
83090 +       codeword_len = 1;
83091 +       stores_per_loop = (1 << (table_bits - codeword_len));
83092 +       for (; stores_per_loop != 0; codeword_len++, stores_per_loop >>= 1) {
83093 +               u32 end_sym_idx = sym_idx + len_counts[codeword_len];
83095 +               for (; sym_idx < end_sym_idx; sym_idx++) {
83096 +                       u16 entry;
83097 +                       u16 *p;
83098 +                       u32 n;
83100 +                       entry = ((u32)codeword_len << 11) | sorted_syms[sym_idx];
83101 +                       p = (u16 *)decode_table_ptr;
83102 +                       n = stores_per_loop;
83104 +                       do {
83105 +                               *p++ = entry;
83106 +                       } while (--n);
83108 +                       decode_table_ptr = p;
83109 +               }
83110 +       }
83112 +       /* If we've filled in the entire table, we are done.  Otherwise,
83113 +        * there are codewords longer than table_bits for which we must
83114 +        * generate binary trees.
83115 +        */
83116 +       decode_table_pos = (u16 *)decode_table_ptr - decode_table;
83117 +       if (decode_table_pos != table_num_entries) {
83118 +               u32 j;
83119 +               u32 next_free_tree_slot;
83120 +               u32 cur_codeword;
83122 +               /* First, zero out the remaining entries.  This is
83123 +                * necessary so that these entries appear as
83124 +                * "unallocated" in the next part.  Each of these entries
83125 +                * will eventually be filled with the representation of
83126 +                * the root node of a binary tree.
83127 +                */
83128 +               j = decode_table_pos;
83129 +               do {
83130 +                       decode_table[j] = 0;
83131 +               } while (++j != table_num_entries);
83133 +               /* We allocate child nodes starting at the end of the
83134 +                * direct lookup table.  Note that there should be
83135 +                * 2*num_syms extra entries for this purpose, although
83136 +                * fewer than this may actually be needed.
83137 +                */
83138 +               next_free_tree_slot = table_num_entries;
83140 +               /* Iterate through each codeword with length greater than
83141 +                * 'table_bits', primarily in order of codeword length
83142 +                * and secondarily in order of symbol.
83143 +                */
83144 +               for (cur_codeword = decode_table_pos << 1;
83145 +                    codeword_len <= max_codeword_len;
83146 +                    codeword_len++, cur_codeword <<= 1) {
83147 +                       u32 end_sym_idx = sym_idx + len_counts[codeword_len];
83149 +                       for (; sym_idx < end_sym_idx; sym_idx++, cur_codeword++) {
83150 +                               /* 'sorted_sym' is the symbol represented by the
83151 +                                * codeword.
83152 +                                */
83153 +                               u32 sorted_sym = sorted_syms[sym_idx];
83154 +                               u32 extra_bits = codeword_len - table_bits;
83155 +                               u32 node_idx = cur_codeword >> extra_bits;
83157 +                               /* Go through each bit of the current codeword
83158 +                                * beyond the prefix of length @table_bits and
83159 +                                * walk the appropriate binary tree, allocating
83160 +                                * any slots that have not yet been allocated.
83161 +                                *
83162 +                                * Note that the 'pointer' entry to the binary
83163 +                                * tree, which is stored in the direct lookup
83164 +                                * portion of the table, is represented
83165 +                                * identically to other internal (non-leaf)
83166 +                                * nodes of the binary tree; it can be thought
83167 +                                * of as simply the root of the tree.  The
83168 +                                * representation of these internal nodes is
83169 +                                * simply the index of the left child combined
83170 +                                * with the special bits 0xC000 to distingush
83171 +                                * the entry from direct mapping and leaf node
83172 +                                * entries.
83173 +                                */
83174 +                               do {
83175 +                                       /* At least one bit remains in the
83176 +                                        * codeword, but the current node is an
83177 +                                        * unallocated leaf.  Change it to an
83178 +                                        * internal node.
83179 +                                        */
83180 +                                       if (decode_table[node_idx] == 0) {
83181 +                                               decode_table[node_idx] =
83182 +                                                       next_free_tree_slot | 0xC000;
83183 +                                               decode_table[next_free_tree_slot++] = 0;
83184 +                                               decode_table[next_free_tree_slot++] = 0;
83185 +                                       }
83187 +                                       /* Go to the left child if the next bit
83188 +                                        * in the codeword is 0; otherwise go to
83189 +                                        * the right child.
83190 +                                        */
83191 +                                       node_idx = decode_table[node_idx] & 0x3FFF;
83192 +                                       --extra_bits;
83193 +                                       node_idx += (cur_codeword >> extra_bits) & 1;
83194 +                               } while (extra_bits != 0);
83196 +                               /* We've traversed the tree using the entire
83197 +                                * codeword, and we're now at the entry where
83198 +                                * the actual symbol will be stored.  This is
83199 +                                * distinguished from internal nodes by not
83200 +                                * having its high two bits set.
83201 +                                */
83202 +                               decode_table[node_idx] = sorted_sym;
83203 +                       }
83204 +               }
83205 +       }
83206 +       return 0;
83208 diff --git a/fs/ntfs3/lib/decompress_common.h b/fs/ntfs3/lib/decompress_common.h
83209 new file mode 100644
83210 index 000000000000..66297f398403
83211 --- /dev/null
83212 +++ b/fs/ntfs3/lib/decompress_common.h
83213 @@ -0,0 +1,352 @@
83214 +/* SPDX-License-Identifier: GPL-2.0-or-later */
83217 + * decompress_common.h - Code shared by the XPRESS and LZX decompressors
83218 + *
83219 + * Copyright (C) 2015 Eric Biggers
83220 + *
83221 + * This program is free software: you can redistribute it and/or modify it under
83222 + * the terms of the GNU General Public License as published by the Free Software
83223 + * Foundation, either version 2 of the License, or (at your option) any later
83224 + * version.
83225 + *
83226 + * This program is distributed in the hope that it will be useful, but WITHOUT
83227 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
83228 + * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
83229 + * details.
83230 + *
83231 + * You should have received a copy of the GNU General Public License along with
83232 + * this program.  If not, see <http://www.gnu.org/licenses/>.
83233 + */
83235 +#include <linux/string.h>
83236 +#include <linux/compiler.h>
83237 +#include <linux/types.h>
83238 +#include <linux/slab.h>
83239 +#include <asm/unaligned.h>
83242 +/* "Force inline" macro (not required, but helpful for performance)  */
83243 +#define forceinline __always_inline
83245 +/* Enable whole-word match copying on selected architectures  */
83246 +#if defined(__i386__) || defined(__x86_64__) || defined(__ARM_FEATURE_UNALIGNED)
83247 +#  define FAST_UNALIGNED_ACCESS
83248 +#endif
83250 +/* Size of a machine word  */
83251 +#define WORDBYTES (sizeof(size_t))
83253 +static forceinline void
83254 +copy_unaligned_word(const void *src, void *dst)
83256 +       put_unaligned(get_unaligned((const size_t *)src), (size_t *)dst);
83260 +/* Generate a "word" with platform-dependent size whose bytes all contain the
83261 + * value 'b'.
83262 + */
83263 +static forceinline size_t repeat_byte(u8 b)
83265 +       size_t v;
83267 +       v = b;
83268 +       v |= v << 8;
83269 +       v |= v << 16;
83270 +       v |= v << ((WORDBYTES == 8) ? 32 : 0);
83271 +       return v;
83274 +/* Structure that encapsulates a block of in-memory data being interpreted as a
83275 + * stream of bits, optionally with interwoven literal bytes.  Bits are assumed
83276 + * to be stored in little endian 16-bit coding units, with the bits ordered high
83277 + * to low.
83278 + */
83279 +struct input_bitstream {
83281 +       /* Bits that have been read from the input buffer.  The bits are
83282 +        * left-justified; the next bit is always bit 31.
83283 +        */
83284 +       u32 bitbuf;
83286 +       /* Number of bits currently held in @bitbuf.  */
83287 +       u32 bitsleft;
83289 +       /* Pointer to the next byte to be retrieved from the input buffer.  */
83290 +       const u8 *next;
83292 +       /* Pointer to just past the end of the input buffer.  */
83293 +       const u8 *end;
83296 +/* Initialize a bitstream to read from the specified input buffer.  */
83297 +static forceinline void init_input_bitstream(struct input_bitstream *is,
83298 +                                            const void *buffer, u32 size)
83300 +       is->bitbuf = 0;
83301 +       is->bitsleft = 0;
83302 +       is->next = buffer;
83303 +       is->end = is->next + size;
83306 +/* Ensure the bit buffer variable for the bitstream contains at least @num_bits
83307 + * bits.  Following this, bitstream_peek_bits() and/or bitstream_remove_bits()
83308 + * may be called on the bitstream to peek or remove up to @num_bits bits.  Note
83309 + * that @num_bits must be <= 16.
83310 + */
83311 +static forceinline void bitstream_ensure_bits(struct input_bitstream *is,
83312 +                                             u32 num_bits)
83314 +       if (is->bitsleft < num_bits) {
83315 +               if (is->end - is->next >= 2) {
83316 +                       is->bitbuf |= (u32)get_unaligned_le16(is->next)
83317 +                                       << (16 - is->bitsleft);
83318 +                       is->next += 2;
83319 +               }
83320 +               is->bitsleft += 16;
83321 +       }
83324 +/* Return the next @num_bits bits from the bitstream, without removing them.
83325 + * There must be at least @num_bits remaining in the buffer variable, from a
83326 + * previous call to bitstream_ensure_bits().
83327 + */
83328 +static forceinline u32
83329 +bitstream_peek_bits(const struct input_bitstream *is, const u32 num_bits)
83331 +       return (is->bitbuf >> 1) >> (sizeof(is->bitbuf) * 8 - num_bits - 1);
83334 +/* Remove @num_bits from the bitstream.  There must be at least @num_bits
83335 + * remaining in the buffer variable, from a previous call to
83336 + * bitstream_ensure_bits().
83337 + */
83338 +static forceinline void
83339 +bitstream_remove_bits(struct input_bitstream *is, u32 num_bits)
83341 +       is->bitbuf <<= num_bits;
83342 +       is->bitsleft -= num_bits;
83345 +/* Remove and return @num_bits bits from the bitstream.  There must be at least
83346 + * @num_bits remaining in the buffer variable, from a previous call to
83347 + * bitstream_ensure_bits().
83348 + */
83349 +static forceinline u32
83350 +bitstream_pop_bits(struct input_bitstream *is, u32 num_bits)
83352 +       u32 bits = bitstream_peek_bits(is, num_bits);
83354 +       bitstream_remove_bits(is, num_bits);
83355 +       return bits;
83358 +/* Read and return the next @num_bits bits from the bitstream.  */
83359 +static forceinline u32
83360 +bitstream_read_bits(struct input_bitstream *is, u32 num_bits)
83362 +       bitstream_ensure_bits(is, num_bits);
83363 +       return bitstream_pop_bits(is, num_bits);
83366 +/* Read and return the next literal byte embedded in the bitstream.  */
83367 +static forceinline u8
83368 +bitstream_read_byte(struct input_bitstream *is)
83370 +       if (unlikely(is->end == is->next))
83371 +               return 0;
83372 +       return *is->next++;
83375 +/* Read and return the next 16-bit integer embedded in the bitstream.  */
83376 +static forceinline u16
83377 +bitstream_read_u16(struct input_bitstream *is)
83379 +       u16 v;
83381 +       if (unlikely(is->end - is->next < 2))
83382 +               return 0;
83383 +       v = get_unaligned_le16(is->next);
83384 +       is->next += 2;
83385 +       return v;
83388 +/* Read and return the next 32-bit integer embedded in the bitstream.  */
83389 +static forceinline u32
83390 +bitstream_read_u32(struct input_bitstream *is)
83392 +       u32 v;
83394 +       if (unlikely(is->end - is->next < 4))
83395 +               return 0;
83396 +       v = get_unaligned_le32(is->next);
83397 +       is->next += 4;
83398 +       return v;
83401 +/* Read into @dst_buffer an array of literal bytes embedded in the bitstream.
83402 + * Return either a pointer to the byte past the last written, or NULL if the
83403 + * read overflows the input buffer.
83404 + */
83405 +static forceinline void *bitstream_read_bytes(struct input_bitstream *is,
83406 +                                             void *dst_buffer, size_t count)
83408 +       if ((size_t)(is->end - is->next) < count)
83409 +               return NULL;
83410 +       memcpy(dst_buffer, is->next, count);
83411 +       is->next += count;
83412 +       return (u8 *)dst_buffer + count;
83415 +/* Align the input bitstream on a coding-unit boundary.  */
83416 +static forceinline void bitstream_align(struct input_bitstream *is)
83418 +       is->bitsleft = 0;
83419 +       is->bitbuf = 0;
83422 +extern int make_huffman_decode_table(u16 decode_table[], const u32 num_syms,
83423 +                                    const u32 num_bits, const u8 lens[],
83424 +                                    const u32 max_codeword_len,
83425 +                                    u16 working_space[]);
83428 +/* Reads and returns the next Huffman-encoded symbol from a bitstream.  If the
83429 + * input data is exhausted, the Huffman symbol is decoded as if the missing bits
83430 + * are all zeroes.
83431 + */
83432 +static forceinline u32 read_huffsym(struct input_bitstream *istream,
83433 +                                        const u16 decode_table[],
83434 +                                        u32 table_bits,
83435 +                                        u32 max_codeword_len)
83437 +       u32 entry;
83438 +       u32 key_bits;
83440 +       bitstream_ensure_bits(istream, max_codeword_len);
83442 +       /* Index the decode table by the next table_bits bits of the input.  */
83443 +       key_bits = bitstream_peek_bits(istream, table_bits);
83444 +       entry = decode_table[key_bits];
83445 +       if (entry < 0xC000) {
83446 +               /* Fast case: The decode table directly provided the
83447 +                * symbol and codeword length.  The low 11 bits are the
83448 +                * symbol, and the high 5 bits are the codeword length.
83449 +                */
83450 +               bitstream_remove_bits(istream, entry >> 11);
83451 +               return entry & 0x7FF;
83452 +       }
83453 +       /* Slow case: The codeword for the symbol is longer than
83454 +        * table_bits, so the symbol does not have an entry
83455 +        * directly in the first (1 << table_bits) entries of the
83456 +        * decode table.  Traverse the appropriate binary tree
83457 +        * bit-by-bit to decode the symbol.
83458 +        */
83459 +       bitstream_remove_bits(istream, table_bits);
83460 +       do {
83461 +               key_bits = (entry & 0x3FFF) + bitstream_pop_bits(istream, 1);
83462 +       } while ((entry = decode_table[key_bits]) >= 0xC000);
83463 +       return entry;
83467 + * Copy an LZ77 match at (dst - offset) to dst.
83468 + *
83469 + * The length and offset must be already validated --- that is, (dst - offset)
83470 + * can't underrun the output buffer, and (dst + length) can't overrun the output
83471 + * buffer.  Also, the length cannot be 0.
83472 + *
83473 + * @bufend points to the byte past the end of the output buffer.  This function
83474 + * won't write any data beyond this position.
83475 + *
83476 + * Returns dst + length.
83477 + */
83478 +static forceinline u8 *lz_copy(u8 *dst, u32 length, u32 offset, const u8 *bufend,
83479 +                              u32 min_length)
83481 +       const u8 *src = dst - offset;
83483 +       /*
83484 +        * Try to copy one machine word at a time.  On i386 and x86_64 this is
83485 +        * faster than copying one byte at a time, unless the data is
83486 +        * near-random and all the matches have very short lengths.  Note that
83487 +        * since this requires unaligned memory accesses, it won't necessarily
83488 +        * be faster on every architecture.
83489 +        *
83490 +        * Also note that we might copy more than the length of the match.  For
83491 +        * example, if a word is 8 bytes and the match is of length 5, then
83492 +        * we'll simply copy 8 bytes.  This is okay as long as we don't write
83493 +        * beyond the end of the output buffer, hence the check for (bufend -
83494 +        * end >= WORDBYTES - 1).
83495 +        */
83496 +#ifdef FAST_UNALIGNED_ACCESS
83497 +       u8 * const end = dst + length;
83499 +       if (bufend - end >= (ptrdiff_t)(WORDBYTES - 1)) {
83501 +               if (offset >= WORDBYTES) {
83502 +                       /* The source and destination words don't overlap.  */
83504 +                       /* To improve branch prediction, one iteration of this
83505 +                        * loop is unrolled.  Most matches are short and will
83506 +                        * fail the first check.  But if that check passes, then
83507 +                        * it becomes increasing likely that the match is long
83508 +                        * and we'll need to continue copying.
83509 +                        */
83511 +                       copy_unaligned_word(src, dst);
83512 +                       src += WORDBYTES;
83513 +                       dst += WORDBYTES;
83515 +                       if (dst < end) {
83516 +                               do {
83517 +                                       copy_unaligned_word(src, dst);
83518 +                                       src += WORDBYTES;
83519 +                                       dst += WORDBYTES;
83520 +                               } while (dst < end);
83521 +                       }
83522 +                       return end;
83523 +               } else if (offset == 1) {
83525 +                       /* Offset 1 matches are equivalent to run-length
83526 +                        * encoding of the previous byte.  This case is common
83527 +                        * if the data contains many repeated bytes.
83528 +                        */
83529 +                       size_t v = repeat_byte(*(dst - 1));
83531 +                       do {
83532 +                               put_unaligned(v, (size_t *)dst);
83533 +                               src += WORDBYTES;
83534 +                               dst += WORDBYTES;
83535 +                       } while (dst < end);
83536 +                       return end;
83537 +               }
83538 +               /*
83539 +                * We don't bother with special cases for other 'offset <
83540 +                * WORDBYTES', which are usually rarer than 'offset == 1'.  Extra
83541 +                * checks will just slow things down.  Actually, it's possible
83542 +                * to handle all the 'offset < WORDBYTES' cases using the same
83543 +                * code, but it still becomes more complicated doesn't seem any
83544 +                * faster overall; it definitely slows down the more common
83545 +                * 'offset == 1' case.
83546 +                */
83547 +       }
83548 +#endif /* FAST_UNALIGNED_ACCESS */
83550 +       /* Fall back to a bytewise copy.  */
83552 +       if (min_length >= 2) {
83553 +               *dst++ = *src++;
83554 +               length--;
83555 +       }
83556 +       if (min_length >= 3) {
83557 +               *dst++ = *src++;
83558 +               length--;
83559 +       }
83560 +       do {
83561 +               *dst++ = *src++;
83562 +       } while (--length);
83564 +       return dst;
83566 diff --git a/fs/ntfs3/lib/lib.h b/fs/ntfs3/lib/lib.h
83567 new file mode 100644
83568 index 000000000000..f508fbad2e71
83569 --- /dev/null
83570 +++ b/fs/ntfs3/lib/lib.h
83571 @@ -0,0 +1,26 @@
83572 +/* SPDX-License-Identifier: GPL-2.0-or-later */
83574 + * Adapted for linux kernel by Alexander Mamaev:
83575 + * - remove implementations of get_unaligned_
83576 + * - assume GCC is always defined
83577 + * - ISO C90
83578 + * - linux kernel code style
83579 + */
83582 +/* globals from xpress_decompress.c */
83583 +struct xpress_decompressor *xpress_allocate_decompressor(void);
83584 +void xpress_free_decompressor(struct xpress_decompressor *d);
83585 +int xpress_decompress(struct xpress_decompressor *__restrict d,
83586 +                     const void *__restrict compressed_data,
83587 +                     size_t compressed_size,
83588 +                     void *__restrict uncompressed_data,
83589 +                     size_t uncompressed_size);
83591 +/* globals from lzx_decompress.c */
83592 +struct lzx_decompressor *lzx_allocate_decompressor(void);
83593 +void lzx_free_decompressor(struct lzx_decompressor *d);
83594 +int lzx_decompress(struct lzx_decompressor *__restrict d,
83595 +                  const void *__restrict compressed_data,
83596 +                  size_t compressed_size, void *__restrict uncompressed_data,
83597 +                  size_t uncompressed_size);
83598 diff --git a/fs/ntfs3/lib/lzx_decompress.c b/fs/ntfs3/lib/lzx_decompress.c
83599 new file mode 100644
83600 index 000000000000..77a381a693d1
83601 --- /dev/null
83602 +++ b/fs/ntfs3/lib/lzx_decompress.c
83603 @@ -0,0 +1,683 @@
83604 +// SPDX-License-Identifier: GPL-2.0-or-later
83606 + * lzx_decompress.c - A decompressor for the LZX compression format, which can
83607 + * be used in "System Compressed" files.  This is based on the code from wimlib.
83608 + * This code only supports a window size (dictionary size) of 32768 bytes, since
83609 + * this is the only size used in System Compression.
83610 + *
83611 + * Copyright (C) 2015 Eric Biggers
83612 + *
83613 + * This program is free software: you can redistribute it and/or modify it under
83614 + * the terms of the GNU General Public License as published by the Free Software
83615 + * Foundation, either version 2 of the License, or (at your option) any later
83616 + * version.
83617 + *
83618 + * This program is distributed in the hope that it will be useful, but WITHOUT
83619 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
83620 + * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
83621 + * details.
83622 + *
83623 + * You should have received a copy of the GNU General Public License along with
83624 + * this program.  If not, see <http://www.gnu.org/licenses/>.
83625 + */
83627 +#include "decompress_common.h"
83628 +#include "lib.h"
83630 +/* Number of literal byte values  */
83631 +#define LZX_NUM_CHARS                  256
83633 +/* The smallest and largest allowed match lengths  */
83634 +#define LZX_MIN_MATCH_LEN              2
83635 +#define LZX_MAX_MATCH_LEN              257
83637 +/* Number of distinct match lengths that can be represented  */
83638 +#define LZX_NUM_LENS                   (LZX_MAX_MATCH_LEN - LZX_MIN_MATCH_LEN + 1)
83640 +/* Number of match lengths for which no length symbol is required  */
83641 +#define LZX_NUM_PRIMARY_LENS           7
83642 +#define LZX_NUM_LEN_HEADERS            (LZX_NUM_PRIMARY_LENS + 1)
83644 +/* Valid values of the 3-bit block type field  */
83645 +#define LZX_BLOCKTYPE_VERBATIM         1
83646 +#define LZX_BLOCKTYPE_ALIGNED          2
83647 +#define LZX_BLOCKTYPE_UNCOMPRESSED     3
83649 +/* Number of offset slots for a window size of 32768  */
83650 +#define LZX_NUM_OFFSET_SLOTS           30
83652 +/* Number of symbols in the main code for a window size of 32768  */
83653 +#define LZX_MAINCODE_NUM_SYMBOLS       \
83654 +       (LZX_NUM_CHARS + (LZX_NUM_OFFSET_SLOTS * LZX_NUM_LEN_HEADERS))
83656 +/* Number of symbols in the length code  */
83657 +#define LZX_LENCODE_NUM_SYMBOLS                (LZX_NUM_LENS - LZX_NUM_PRIMARY_LENS)
83659 +/* Number of symbols in the precode  */
83660 +#define LZX_PRECODE_NUM_SYMBOLS                20
83662 +/* Number of bits in which each precode codeword length is represented  */
83663 +#define LZX_PRECODE_ELEMENT_SIZE       4
83665 +/* Number of low-order bits of each match offset that are entropy-encoded in
83666 + * aligned offset blocks
83667 + */
83668 +#define LZX_NUM_ALIGNED_OFFSET_BITS    3
83670 +/* Number of symbols in the aligned offset code  */
83671 +#define LZX_ALIGNEDCODE_NUM_SYMBOLS    (1 << LZX_NUM_ALIGNED_OFFSET_BITS)
83673 +/* Mask for the match offset bits that are entropy-encoded in aligned offset
83674 + * blocks
83675 + */
83676 +#define LZX_ALIGNED_OFFSET_BITMASK     ((1 << LZX_NUM_ALIGNED_OFFSET_BITS) - 1)
83678 +/* Number of bits in which each aligned offset codeword length is represented  */
83679 +#define LZX_ALIGNEDCODE_ELEMENT_SIZE   3
83681 +/* Maximum lengths (in bits) of the codewords in each Huffman code  */
83682 +#define LZX_MAX_MAIN_CODEWORD_LEN      16
83683 +#define LZX_MAX_LEN_CODEWORD_LEN       16
83684 +#define LZX_MAX_PRE_CODEWORD_LEN       ((1 << LZX_PRECODE_ELEMENT_SIZE) - 1)
83685 +#define LZX_MAX_ALIGNED_CODEWORD_LEN   ((1 << LZX_ALIGNEDCODE_ELEMENT_SIZE) - 1)
83687 +/* The default "filesize" value used in pre/post-processing.  In the LZX format
83688 + * used in cabinet files this value must be given to the decompressor, whereas
83689 + * in the LZX format used in WIM files and system-compressed files this value is
83690 + * fixed at 12000000.
83691 + */
83692 +#define LZX_DEFAULT_FILESIZE           12000000
83694 +/* Assumed block size when the encoded block size begins with a 0 bit.  */
83695 +#define LZX_DEFAULT_BLOCK_SIZE         32768
83697 +/* Number of offsets in the recent (or "repeat") offsets queue.  */
83698 +#define LZX_NUM_RECENT_OFFSETS         3
83700 +/* These values are chosen for fast decompression.  */
83701 +#define LZX_MAINCODE_TABLEBITS         11
83702 +#define LZX_LENCODE_TABLEBITS          10
83703 +#define LZX_PRECODE_TABLEBITS          6
83704 +#define LZX_ALIGNEDCODE_TABLEBITS      7
83706 +#define LZX_READ_LENS_MAX_OVERRUN      50
83708 +/* Mapping: offset slot => first match offset that uses that offset slot.
83709 + */
83710 +static const u32 lzx_offset_slot_base[LZX_NUM_OFFSET_SLOTS + 1] = {
83711 +       0,      1,      2,      3,      4,      /* 0  --- 4  */
83712 +       6,      8,      12,     16,     24,     /* 5  --- 9  */
83713 +       32,     48,     64,     96,     128,    /* 10 --- 14 */
83714 +       192,    256,    384,    512,    768,    /* 15 --- 19 */
83715 +       1024,   1536,   2048,   3072,   4096,   /* 20 --- 24 */
83716 +       6144,   8192,   12288,  16384,  24576,  /* 25 --- 29 */
83717 +       32768,                                  /* extra     */
83720 +/* Mapping: offset slot => how many extra bits must be read and added to the
83721 + * corresponding offset slot base to decode the match offset.
83722 + */
83723 +static const u8 lzx_extra_offset_bits[LZX_NUM_OFFSET_SLOTS] = {
83724 +       0,      0,      0,      0,      1,
83725 +       1,      2,      2,      3,      3,
83726 +       4,      4,      5,      5,      6,
83727 +       6,      7,      7,      8,      8,
83728 +       9,      9,      10,     10,     11,
83729 +       11,     12,     12,     13,     13,
83732 +/* Reusable heap-allocated memory for LZX decompression  */
83733 +struct lzx_decompressor {
83735 +       /* Huffman decoding tables, and arrays that map symbols to codeword
83736 +        * lengths
83737 +        */
83739 +       u16 maincode_decode_table[(1 << LZX_MAINCODE_TABLEBITS) +
83740 +                                       (LZX_MAINCODE_NUM_SYMBOLS * 2)];
83741 +       u8 maincode_lens[LZX_MAINCODE_NUM_SYMBOLS + LZX_READ_LENS_MAX_OVERRUN];
83744 +       u16 lencode_decode_table[(1 << LZX_LENCODE_TABLEBITS) +
83745 +                                       (LZX_LENCODE_NUM_SYMBOLS * 2)];
83746 +       u8 lencode_lens[LZX_LENCODE_NUM_SYMBOLS + LZX_READ_LENS_MAX_OVERRUN];
83749 +       u16 alignedcode_decode_table[(1 << LZX_ALIGNEDCODE_TABLEBITS) +
83750 +                                       (LZX_ALIGNEDCODE_NUM_SYMBOLS * 2)];
83751 +       u8 alignedcode_lens[LZX_ALIGNEDCODE_NUM_SYMBOLS];
83753 +       u16 precode_decode_table[(1 << LZX_PRECODE_TABLEBITS) +
83754 +                                (LZX_PRECODE_NUM_SYMBOLS * 2)];
83755 +       u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
83757 +       /* Temporary space for make_huffman_decode_table()  */
83758 +       u16 working_space[2 * (1 + LZX_MAX_MAIN_CODEWORD_LEN) +
83759 +                         LZX_MAINCODE_NUM_SYMBOLS];
83762 +static void undo_e8_translation(void *target, s32 input_pos)
83764 +       s32 abs_offset, rel_offset;
83766 +       abs_offset = get_unaligned_le32(target);
83767 +       if (abs_offset >= 0) {
83768 +               if (abs_offset < LZX_DEFAULT_FILESIZE) {
83769 +                       /* "good translation" */
83770 +                       rel_offset = abs_offset - input_pos;
83771 +                       put_unaligned_le32(rel_offset, target);
83772 +               }
83773 +       } else {
83774 +               if (abs_offset >= -input_pos) {
83775 +                       /* "compensating translation" */
83776 +                       rel_offset = abs_offset + LZX_DEFAULT_FILESIZE;
83777 +                       put_unaligned_le32(rel_offset, target);
83778 +               }
83779 +       }
83783 + * Undo the 'E8' preprocessing used in LZX.  Before compression, the
83784 + * uncompressed data was preprocessed by changing the targets of suspected x86
83785 + * CALL instructions from relative offsets to absolute offsets.  After
83786 + * match/literal decoding, the decompressor must undo the translation.
83787 + */
83788 +static void lzx_postprocess(u8 *data, u32 size)
83790 +       /*
83791 +        * A worthwhile optimization is to push the end-of-buffer check into the
83792 +        * relatively rare E8 case.  This is possible if we replace the last six
83793 +        * bytes of data with E8 bytes; then we are guaranteed to hit an E8 byte
83794 +        * before reaching end-of-buffer.  In addition, this scheme guarantees
83795 +        * that no translation can begin following an E8 byte in the last 10
83796 +        * bytes because a 4-byte offset containing E8 as its high byte is a
83797 +        * large negative number that is not valid for translation.  That is
83798 +        * exactly what we need.
83799 +        */
83800 +       u8 *tail;
83801 +       u8 saved_bytes[6];
83802 +       u8 *p;
83804 +       if (size <= 10)
83805 +               return;
83807 +       tail = &data[size - 6];
83808 +       memcpy(saved_bytes, tail, 6);
83809 +       memset(tail, 0xE8, 6);
83810 +       p = data;
83811 +       for (;;) {
83812 +               while (*p != 0xE8)
83813 +                       p++;
83814 +               if (p >= tail)
83815 +                       break;
83816 +               undo_e8_translation(p + 1, p - data);
83817 +               p += 5;
83818 +       }
83819 +       memcpy(tail, saved_bytes, 6);
83822 +/* Read a Huffman-encoded symbol using the precode.  */
83823 +static forceinline u32 read_presym(const struct lzx_decompressor *d,
83824 +                                       struct input_bitstream *is)
83826 +       return read_huffsym(is, d->precode_decode_table,
83827 +                           LZX_PRECODE_TABLEBITS, LZX_MAX_PRE_CODEWORD_LEN);
83830 +/* Read a Huffman-encoded symbol using the main code.  */
83831 +static forceinline u32 read_mainsym(const struct lzx_decompressor *d,
83832 +                                        struct input_bitstream *is)
83834 +       return read_huffsym(is, d->maincode_decode_table,
83835 +                           LZX_MAINCODE_TABLEBITS, LZX_MAX_MAIN_CODEWORD_LEN);
83838 +/* Read a Huffman-encoded symbol using the length code.  */
83839 +static forceinline u32 read_lensym(const struct lzx_decompressor *d,
83840 +                                       struct input_bitstream *is)
83842 +       return read_huffsym(is, d->lencode_decode_table,
83843 +                           LZX_LENCODE_TABLEBITS, LZX_MAX_LEN_CODEWORD_LEN);
83846 +/* Read a Huffman-encoded symbol using the aligned offset code.  */
83847 +static forceinline u32 read_alignedsym(const struct lzx_decompressor *d,
83848 +                                           struct input_bitstream *is)
83850 +       return read_huffsym(is, d->alignedcode_decode_table,
83851 +                           LZX_ALIGNEDCODE_TABLEBITS,
83852 +                           LZX_MAX_ALIGNED_CODEWORD_LEN);
83856 + * Read the precode from the compressed input bitstream, then use it to decode
83857 + * @num_lens codeword length values.
83858 + *
83859 + * @is:                The input bitstream.
83860 + *
83861 + * @lens:      An array that contains the length values from the previous time
83862 + *             the codeword lengths for this Huffman code were read, or all 0's
83863 + *             if this is the first time.  This array must have at least
83864 + *             (@num_lens + LZX_READ_LENS_MAX_OVERRUN) entries.
83865 + *
83866 + * @num_lens:  Number of length values to decode.
83867 + *
83868 + * Returns 0 on success, or -1 if the data was invalid.
83869 + */
83870 +static int lzx_read_codeword_lens(struct lzx_decompressor *d,
83871 +                                 struct input_bitstream *is,
83872 +                                 u8 *lens, u32 num_lens)
83874 +       u8 *len_ptr = lens;
83875 +       u8 *lens_end = lens + num_lens;
83876 +       int i;
83878 +       /* Read the lengths of the precode codewords.  These are given
83879 +        * explicitly.
83880 +        */
83881 +       for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++) {
83882 +               d->precode_lens[i] =
83883 +                       bitstream_read_bits(is, LZX_PRECODE_ELEMENT_SIZE);
83884 +       }
83886 +       /* Make the decoding table for the precode.  */
83887 +       if (make_huffman_decode_table(d->precode_decode_table,
83888 +                                     LZX_PRECODE_NUM_SYMBOLS,
83889 +                                     LZX_PRECODE_TABLEBITS,
83890 +                                     d->precode_lens,
83891 +                                     LZX_MAX_PRE_CODEWORD_LEN,
83892 +                                     d->working_space))
83893 +               return -1;
83895 +       /* Decode the codeword lengths.  */
83896 +       do {
83897 +               u32 presym;
83898 +               u8 len;
83900 +               /* Read the next precode symbol.  */
83901 +               presym = read_presym(d, is);
83902 +               if (presym < 17) {
83903 +                       /* Difference from old length  */
83904 +                       len = *len_ptr - presym;
83905 +                       if ((s8)len < 0)
83906 +                               len += 17;
83907 +                       *len_ptr++ = len;
83908 +               } else {
83909 +                       /* Special RLE values  */
83911 +                       u32 run_len;
83913 +                       if (presym == 17) {
83914 +                               /* Run of 0's  */
83915 +                               run_len = 4 + bitstream_read_bits(is, 4);
83916 +                               len = 0;
83917 +                       } else if (presym == 18) {
83918 +                               /* Longer run of 0's  */
83919 +                               run_len = 20 + bitstream_read_bits(is, 5);
83920 +                               len = 0;
83921 +                       } else {
83922 +                               /* Run of identical lengths  */
83923 +                               run_len = 4 + bitstream_read_bits(is, 1);
83924 +                               presym = read_presym(d, is);
83925 +                               if (presym > 17)
83926 +                                       return -1;
83927 +                               len = *len_ptr - presym;
83928 +                               if ((s8)len < 0)
83929 +                                       len += 17;
83930 +                       }
83932 +                       do {
83933 +                               *len_ptr++ = len;
83934 +                       } while (--run_len);
83935 +                       /* Worst case overrun is when presym == 18,
83936 +                        * run_len == 20 + 31, and only 1 length was remaining.
83937 +                        * So LZX_READ_LENS_MAX_OVERRUN == 50.
83938 +                        *
83939 +                        * Overrun while reading the first half of maincode_lens
83940 +                        * can corrupt the previous values in the second half.
83941 +                        * This doesn't really matter because the resulting
83942 +                        * lengths will still be in range, and data that
83943 +                        * generates overruns is invalid anyway.
83944 +                        */
83945 +               }
83946 +       } while (len_ptr < lens_end);
83948 +       return 0;
83952 + * Read the header of an LZX block and save the block type and (uncompressed)
83953 + * size in *block_type_ret and *block_size_ret, respectively.
83954 + *
83955 + * If the block is compressed, also update the Huffman decode @tables with the
83956 + * new Huffman codes.  If the block is uncompressed, also update the match
83957 + * offset @queue with the new match offsets.
83958 + *
83959 + * Return 0 on success, or -1 if the data was invalid.
83960 + */
83961 +static int lzx_read_block_header(struct lzx_decompressor *d,
83962 +                                struct input_bitstream *is,
83963 +                                int *block_type_ret,
83964 +                                u32 *block_size_ret,
83965 +                                u32 recent_offsets[])
83967 +       int block_type;
83968 +       u32 block_size;
83969 +       int i;
83971 +       bitstream_ensure_bits(is, 4);
83973 +       /* The first three bits tell us what kind of block it is, and should be
83974 +        * one of the LZX_BLOCKTYPE_* values.
83975 +        */
83976 +       block_type = bitstream_pop_bits(is, 3);
83978 +       /* Read the block size.  */
83979 +       if (bitstream_pop_bits(is, 1)) {
83980 +               block_size = LZX_DEFAULT_BLOCK_SIZE;
83981 +       } else {
83982 +               block_size = 0;
83983 +               block_size |= bitstream_read_bits(is, 8);
83984 +               block_size <<= 8;
83985 +               block_size |= bitstream_read_bits(is, 8);
83986 +       }
83988 +       switch (block_type) {
83990 +       case LZX_BLOCKTYPE_ALIGNED:
83992 +               /* Read the aligned offset code and prepare its decode table.
83993 +                */
83995 +               for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
83996 +                       d->alignedcode_lens[i] =
83997 +                               bitstream_read_bits(is,
83998 +                                                   LZX_ALIGNEDCODE_ELEMENT_SIZE);
83999 +               }
84001 +               if (make_huffman_decode_table(d->alignedcode_decode_table,
84002 +                                             LZX_ALIGNEDCODE_NUM_SYMBOLS,
84003 +                                             LZX_ALIGNEDCODE_TABLEBITS,
84004 +                                             d->alignedcode_lens,
84005 +                                             LZX_MAX_ALIGNED_CODEWORD_LEN,
84006 +                                             d->working_space))
84007 +                       return -1;
84009 +               /* Fall though, since the rest of the header for aligned offset
84010 +                * blocks is the same as that for verbatim blocks.
84011 +                */
84012 +               fallthrough;
84014 +       case LZX_BLOCKTYPE_VERBATIM:
84016 +               /* Read the main code and prepare its decode table.
84017 +                *
84018 +                * Note that the codeword lengths in the main code are encoded
84019 +                * in two parts: one part for literal symbols, and one part for
84020 +                * match symbols.
84021 +                */
84023 +               if (lzx_read_codeword_lens(d, is, d->maincode_lens,
84024 +                                          LZX_NUM_CHARS))
84025 +                       return -1;
84027 +               if (lzx_read_codeword_lens(d, is,
84028 +                                          d->maincode_lens + LZX_NUM_CHARS,
84029 +                                          LZX_MAINCODE_NUM_SYMBOLS - LZX_NUM_CHARS))
84030 +                       return -1;
84032 +               if (make_huffman_decode_table(d->maincode_decode_table,
84033 +                                             LZX_MAINCODE_NUM_SYMBOLS,
84034 +                                             LZX_MAINCODE_TABLEBITS,
84035 +                                             d->maincode_lens,
84036 +                                             LZX_MAX_MAIN_CODEWORD_LEN,
84037 +                                             d->working_space))
84038 +                       return -1;
84040 +               /* Read the length code and prepare its decode table.  */
84042 +               if (lzx_read_codeword_lens(d, is, d->lencode_lens,
84043 +                                          LZX_LENCODE_NUM_SYMBOLS))
84044 +                       return -1;
84046 +               if (make_huffman_decode_table(d->lencode_decode_table,
84047 +                                             LZX_LENCODE_NUM_SYMBOLS,
84048 +                                             LZX_LENCODE_TABLEBITS,
84049 +                                             d->lencode_lens,
84050 +                                             LZX_MAX_LEN_CODEWORD_LEN,
84051 +                                             d->working_space))
84052 +                       return -1;
84054 +               break;
84056 +       case LZX_BLOCKTYPE_UNCOMPRESSED:
84058 +               /* Before reading the three recent offsets from the uncompressed
84059 +                * block header, the stream must be aligned on a 16-bit
84060 +                * boundary.  But if the stream is *already* aligned, then the
84061 +                * next 16 bits must be discarded.
84062 +                */
84063 +               bitstream_ensure_bits(is, 1);
84064 +               bitstream_align(is);
84066 +               recent_offsets[0] = bitstream_read_u32(is);
84067 +               recent_offsets[1] = bitstream_read_u32(is);
84068 +               recent_offsets[2] = bitstream_read_u32(is);
84070 +               /* Offsets of 0 are invalid.  */
84071 +               if (recent_offsets[0] == 0 || recent_offsets[1] == 0 ||
84072 +                   recent_offsets[2] == 0)
84073 +                       return -1;
84074 +               break;
84076 +       default:
84077 +               /* Unrecognized block type.  */
84078 +               return -1;
84079 +       }
84081 +       *block_type_ret = block_type;
84082 +       *block_size_ret = block_size;
84083 +       return 0;
84086 +/* Decompress a block of LZX-compressed data.  */
84087 +static int lzx_decompress_block(const struct lzx_decompressor *d,
84088 +                               struct input_bitstream *is,
84089 +                               int block_type, u32 block_size,
84090 +                               u8 * const out_begin, u8 *out_next,
84091 +                               u32 recent_offsets[])
84093 +       u8 * const block_end = out_next + block_size;
84094 +       u32 ones_if_aligned = 0U - (block_type == LZX_BLOCKTYPE_ALIGNED);
84096 +       do {
84097 +               u32 mainsym;
84098 +               u32 match_len;
84099 +               u32 match_offset;
84100 +               u32 offset_slot;
84101 +               u32 num_extra_bits;
84103 +               mainsym = read_mainsym(d, is);
84104 +               if (mainsym < LZX_NUM_CHARS) {
84105 +                       /* Literal  */
84106 +                       *out_next++ = mainsym;
84107 +                       continue;
84108 +               }
84110 +               /* Match  */
84112 +               /* Decode the length header and offset slot.  */
84113 +               mainsym -= LZX_NUM_CHARS;
84114 +               match_len = mainsym % LZX_NUM_LEN_HEADERS;
84115 +               offset_slot = mainsym / LZX_NUM_LEN_HEADERS;
84117 +               /* If needed, read a length symbol to decode the full length. */
84118 +               if (match_len == LZX_NUM_PRIMARY_LENS)
84119 +                       match_len += read_lensym(d, is);
84120 +               match_len += LZX_MIN_MATCH_LEN;
84122 +               if (offset_slot < LZX_NUM_RECENT_OFFSETS) {
84123 +                       /* Repeat offset  */
84125 +                       /* Note: This isn't a real LRU queue, since using the R2
84126 +                        * offset doesn't bump the R1 offset down to R2.  This
84127 +                        * quirk allows all 3 recent offsets to be handled by
84128 +                        * the same code.  (For R0, the swap is a no-op.)
84129 +                        */
84130 +                       match_offset = recent_offsets[offset_slot];
84131 +                       recent_offsets[offset_slot] = recent_offsets[0];
84132 +                       recent_offsets[0] = match_offset;
84133 +               } else {
84134 +                       /* Explicit offset  */
84136 +                       /* Look up the number of extra bits that need to be read
84137 +                        * to decode offsets with this offset slot.
84138 +                        */
84139 +                       num_extra_bits = lzx_extra_offset_bits[offset_slot];
84141 +                       /* Start with the offset slot base value.  */
84142 +                       match_offset = lzx_offset_slot_base[offset_slot];
84144 +                       /* In aligned offset blocks, the low-order 3 bits of
84145 +                        * each offset are encoded using the aligned offset
84146 +                        * code.  Otherwise, all the extra bits are literal.
84147 +                        */
84149 +                       if ((num_extra_bits & ones_if_aligned) >= LZX_NUM_ALIGNED_OFFSET_BITS) {
84150 +                               match_offset +=
84151 +                                       bitstream_read_bits(is, num_extra_bits -
84152 +                                                               LZX_NUM_ALIGNED_OFFSET_BITS)
84153 +                                                       << LZX_NUM_ALIGNED_OFFSET_BITS;
84154 +                               match_offset += read_alignedsym(d, is);
84155 +                       } else {
84156 +                               match_offset += bitstream_read_bits(is, num_extra_bits);
84157 +                       }
84159 +                       /* Adjust the offset.  */
84160 +                       match_offset -= (LZX_NUM_RECENT_OFFSETS - 1);
84162 +                       /* Update the recent offsets.  */
84163 +                       recent_offsets[2] = recent_offsets[1];
84164 +                       recent_offsets[1] = recent_offsets[0];
84165 +                       recent_offsets[0] = match_offset;
84166 +               }
84168 +               /* Validate the match, then copy it to the current position.  */
84170 +               if (match_len > (size_t)(block_end - out_next))
84171 +                       return -1;
84173 +               if (match_offset > (size_t)(out_next - out_begin))
84174 +                       return -1;
84176 +               out_next = lz_copy(out_next, match_len, match_offset,
84177 +                                  block_end, LZX_MIN_MATCH_LEN);
84179 +       } while (out_next != block_end);
84181 +       return 0;
84185 + * lzx_allocate_decompressor - Allocate an LZX decompressor
84186 + *
84187 + * Return the pointer to the decompressor on success, or return NULL and set
84188 + * errno on failure.
84189 + */
84190 +struct lzx_decompressor *lzx_allocate_decompressor(void)
84192 +       return kmalloc(sizeof(struct lzx_decompressor), GFP_NOFS);
84196 + * lzx_decompress - Decompress a buffer of LZX-compressed data
84197 + *
84198 + * @decompressor:      A decompressor allocated with lzx_allocate_decompressor()
84199 + * @compressed_data:   The buffer of data to decompress
84200 + * @compressed_size:   Number of bytes of compressed data
84201 + * @uncompressed_data: The buffer in which to store the decompressed data
84202 + * @uncompressed_size: The number of bytes the data decompresses into
84203 + *
84204 + * Return 0 on success, or return -1 and set errno on failure.
84205 + */
84206 +int lzx_decompress(struct lzx_decompressor *decompressor,
84207 +                  const void *compressed_data, size_t compressed_size,
84208 +                  void *uncompressed_data, size_t uncompressed_size)
84210 +       struct lzx_decompressor *d = decompressor;
84211 +       u8 * const out_begin = uncompressed_data;
84212 +       u8 *out_next = out_begin;
84213 +       u8 * const out_end = out_begin + uncompressed_size;
84214 +       struct input_bitstream is;
84215 +       u32 recent_offsets[LZX_NUM_RECENT_OFFSETS] = {1, 1, 1};
84216 +       int e8_status = 0;
84218 +       init_input_bitstream(&is, compressed_data, compressed_size);
84220 +       /* Codeword lengths begin as all 0's for delta encoding purposes.  */
84221 +       memset(d->maincode_lens, 0, LZX_MAINCODE_NUM_SYMBOLS);
84222 +       memset(d->lencode_lens, 0, LZX_LENCODE_NUM_SYMBOLS);
84224 +       /* Decompress blocks until we have all the uncompressed data.  */
84226 +       while (out_next != out_end) {
84227 +               int block_type;
84228 +               u32 block_size;
84230 +               if (lzx_read_block_header(d, &is, &block_type, &block_size,
84231 +                                         recent_offsets))
84232 +                       goto invalid;
84234 +               if (block_size < 1 || block_size > (size_t)(out_end - out_next))
84235 +                       goto invalid;
84237 +               if (block_type != LZX_BLOCKTYPE_UNCOMPRESSED) {
84239 +                       /* Compressed block  */
84241 +                       if (lzx_decompress_block(d,
84242 +                                                &is,
84243 +                                                block_type,
84244 +                                                block_size,
84245 +                                                out_begin,
84246 +                                                out_next,
84247 +                                                recent_offsets))
84248 +                               goto invalid;
84250 +                       e8_status |= d->maincode_lens[0xe8];
84251 +                       out_next += block_size;
84252 +               } else {
84253 +                       /* Uncompressed block  */
84255 +                       out_next = bitstream_read_bytes(&is, out_next,
84256 +                                                       block_size);
84257 +                       if (!out_next)
84258 +                               goto invalid;
84260 +                       if (block_size & 1)
84261 +                               bitstream_read_byte(&is);
84263 +                       e8_status = 1;
84264 +               }
84265 +       }
84267 +       /* Postprocess the data unless it cannot possibly contain 0xe8 bytes. */
84268 +       if (e8_status)
84269 +               lzx_postprocess(uncompressed_data, uncompressed_size);
84271 +       return 0;
84273 +invalid:
84274 +       return -1;
84278 + * lzx_free_decompressor - Free an LZX decompressor
84279 + *
84280 + * @decompressor:       A decompressor that was allocated with
84281 + *                     lzx_allocate_decompressor(), or NULL.
84282 + */
84283 +void lzx_free_decompressor(struct lzx_decompressor *decompressor)
84285 +       kfree(decompressor);
84287 diff --git a/fs/ntfs3/lib/xpress_decompress.c b/fs/ntfs3/lib/xpress_decompress.c
84288 new file mode 100644
84289 index 000000000000..3d98f36a981e
84290 --- /dev/null
84291 +++ b/fs/ntfs3/lib/xpress_decompress.c
84292 @@ -0,0 +1,155 @@
84293 +// SPDX-License-Identifier: GPL-2.0-or-later
84295 + * xpress_decompress.c - A decompressor for the XPRESS compression format
84296 + * (Huffman variant), which can be used in "System Compressed" files.  This is
84297 + * based on the code from wimlib.
84298 + *
84299 + * Copyright (C) 2015 Eric Biggers
84300 + *
84301 + * This program is free software: you can redistribute it and/or modify it under
84302 + * the terms of the GNU General Public License as published by the Free Software
84303 + * Foundation, either version 2 of the License, or (at your option) any later
84304 + * version.
84305 + *
84306 + * This program is distributed in the hope that it will be useful, but WITHOUT
84307 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
84308 + * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
84309 + * details.
84310 + *
84311 + * You should have received a copy of the GNU General Public License along with
84312 + * this program.  If not, see <http://www.gnu.org/licenses/>.
84313 + */
84315 +#include "decompress_common.h"
84316 +#include "lib.h"
84318 +#define XPRESS_NUM_SYMBOLS     512
84319 +#define XPRESS_MAX_CODEWORD_LEN        15
84320 +#define XPRESS_MIN_MATCH_LEN   3
84322 +/* This value is chosen for fast decompression.  */
84323 +#define XPRESS_TABLEBITS 12
84325 +/* Reusable heap-allocated memory for XPRESS decompression  */
84326 +struct xpress_decompressor {
84328 +       /* The Huffman decoding table  */
84329 +       u16 decode_table[(1 << XPRESS_TABLEBITS) + 2 * XPRESS_NUM_SYMBOLS];
84331 +       /* An array that maps symbols to codeword lengths  */
84332 +       u8 lens[XPRESS_NUM_SYMBOLS];
84334 +       /* Temporary space for make_huffman_decode_table()  */
84335 +       u16 working_space[2 * (1 + XPRESS_MAX_CODEWORD_LEN) +
84336 +                         XPRESS_NUM_SYMBOLS];
84340 + * xpress_allocate_decompressor - Allocate an XPRESS decompressor
84341 + *
84342 + * Return the pointer to the decompressor on success, or return NULL and set
84343 + * errno on failure.
84344 + */
84345 +struct xpress_decompressor *xpress_allocate_decompressor(void)
84347 +       return kmalloc(sizeof(struct xpress_decompressor), GFP_NOFS);
84351 + * xpress_decompress - Decompress a buffer of XPRESS-compressed data
84352 + *
84353 + * @decompressor:       A decompressor that was allocated with
84354 + *                     xpress_allocate_decompressor()
84355 + * @compressed_data:   The buffer of data to decompress
84356 + * @compressed_size:   Number of bytes of compressed data
84357 + * @uncompressed_data: The buffer in which to store the decompressed data
84358 + * @uncompressed_size: The number of bytes the data decompresses into
84359 + *
84360 + * Return 0 on success, or return -1 and set errno on failure.
84361 + */
84362 +int xpress_decompress(struct xpress_decompressor *decompressor,
84363 +                     const void *compressed_data, size_t compressed_size,
84364 +                     void *uncompressed_data, size_t uncompressed_size)
84366 +       struct xpress_decompressor *d = decompressor;
84367 +       const u8 * const in_begin = compressed_data;
84368 +       u8 * const out_begin = uncompressed_data;
84369 +       u8 *out_next = out_begin;
84370 +       u8 * const out_end = out_begin + uncompressed_size;
84371 +       struct input_bitstream is;
84372 +       u32 i;
84374 +       /* Read the Huffman codeword lengths.  */
84375 +       if (compressed_size < XPRESS_NUM_SYMBOLS / 2)
84376 +               goto invalid;
84377 +       for (i = 0; i < XPRESS_NUM_SYMBOLS / 2; i++) {
84378 +               d->lens[i*2 + 0] = in_begin[i] & 0xF;
84379 +               d->lens[i*2 + 1] = in_begin[i] >> 4;
84380 +       }
84382 +       /* Build a decoding table for the Huffman code.  */
84383 +       if (make_huffman_decode_table(d->decode_table, XPRESS_NUM_SYMBOLS,
84384 +                                     XPRESS_TABLEBITS, d->lens,
84385 +                                     XPRESS_MAX_CODEWORD_LEN,
84386 +                                     d->working_space))
84387 +               goto invalid;
84389 +       /* Decode the matches and literals.  */
84391 +       init_input_bitstream(&is, in_begin + XPRESS_NUM_SYMBOLS / 2,
84392 +                            compressed_size - XPRESS_NUM_SYMBOLS / 2);
84394 +       while (out_next != out_end) {
84395 +               u32 sym;
84396 +               u32 log2_offset;
84397 +               u32 length;
84398 +               u32 offset;
84400 +               sym = read_huffsym(&is, d->decode_table,
84401 +                                  XPRESS_TABLEBITS, XPRESS_MAX_CODEWORD_LEN);
84402 +               if (sym < 256) {
84403 +                       /* Literal  */
84404 +                       *out_next++ = sym;
84405 +               } else {
84406 +                       /* Match  */
84407 +                       length = sym & 0xf;
84408 +                       log2_offset = (sym >> 4) & 0xf;
84410 +                       bitstream_ensure_bits(&is, 16);
84412 +                       offset = ((u32)1 << log2_offset) |
84413 +                                bitstream_pop_bits(&is, log2_offset);
84415 +                       if (length == 0xf) {
84416 +                               length += bitstream_read_byte(&is);
84417 +                               if (length == 0xf + 0xff)
84418 +                                       length = bitstream_read_u16(&is);
84419 +                       }
84420 +                       length += XPRESS_MIN_MATCH_LEN;
84422 +                       if (offset > (size_t)(out_next - out_begin))
84423 +                               goto invalid;
84425 +                       if (length > (size_t)(out_end - out_next))
84426 +                               goto invalid;
84428 +                       out_next = lz_copy(out_next, length, offset, out_end,
84429 +                                          XPRESS_MIN_MATCH_LEN);
84430 +               }
84431 +       }
84432 +       return 0;
84434 +invalid:
84435 +       return -1;
84439 + * xpress_free_decompressor - Free an XPRESS decompressor
84440 + *
84441 + * @decompressor:       A decompressor that was allocated with
84442 + *                     xpress_allocate_decompressor(), or NULL.
84443 + */
84444 +void xpress_free_decompressor(struct xpress_decompressor *decompressor)
84446 +       kfree(decompressor);
84448 diff --git a/fs/ntfs3/lznt.c b/fs/ntfs3/lznt.c
84449 new file mode 100644
84450 index 000000000000..ead9ab7d69b3
84451 --- /dev/null
84452 +++ b/fs/ntfs3/lznt.c
84453 @@ -0,0 +1,452 @@
84454 +// SPDX-License-Identifier: GPL-2.0
84456 + *
84457 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
84458 + *
84459 + */
84460 +#include <linux/blkdev.h>
84461 +#include <linux/buffer_head.h>
84462 +#include <linux/fs.h>
84463 +#include <linux/nls.h>
84465 +#include "debug.h"
84466 +#include "ntfs.h"
84467 +#include "ntfs_fs.h"
84469 +// clang-format off
84470 +/* src buffer is zero */
84471 +#define LZNT_ERROR_ALL_ZEROS   1
84472 +#define LZNT_CHUNK_SIZE                0x1000
84473 +// clang-format on
84475 +struct lznt_hash {
84476 +       const u8 *p1;
84477 +       const u8 *p2;
84480 +struct lznt {
84481 +       const u8 *unc;
84482 +       const u8 *unc_end;
84483 +       const u8 *best_match;
84484 +       size_t max_len;
84485 +       bool std;
84487 +       struct lznt_hash hash[LZNT_CHUNK_SIZE];
84490 +static inline size_t get_match_len(const u8 *ptr, const u8 *end, const u8 *prev,
84491 +                                  size_t max_len)
84493 +       size_t len = 0;
84495 +       while (ptr + len < end && ptr[len] == prev[len] && ++len < max_len)
84496 +               ;
84497 +       return len;
84500 +static size_t longest_match_std(const u8 *src, struct lznt *ctx)
84502 +       size_t hash_index;
84503 +       size_t len1 = 0, len2 = 0;
84504 +       const u8 **hash;
84506 +       hash_index =
84507 +               ((40543U * ((((src[0] << 4) ^ src[1]) << 4) ^ src[2])) >> 4) &
84508 +               (LZNT_CHUNK_SIZE - 1);
84510 +       hash = &(ctx->hash[hash_index].p1);
84512 +       if (hash[0] >= ctx->unc && hash[0] < src && hash[0][0] == src[0] &&
84513 +           hash[0][1] == src[1] && hash[0][2] == src[2]) {
84514 +               len1 = 3;
84515 +               if (ctx->max_len > 3)
84516 +                       len1 += get_match_len(src + 3, ctx->unc_end,
84517 +                                             hash[0] + 3, ctx->max_len - 3);
84518 +       }
84520 +       if (hash[1] >= ctx->unc && hash[1] < src && hash[1][0] == src[0] &&
84521 +           hash[1][1] == src[1] && hash[1][2] == src[2]) {
84522 +               len2 = 3;
84523 +               if (ctx->max_len > 3)
84524 +                       len2 += get_match_len(src + 3, ctx->unc_end,
84525 +                                             hash[1] + 3, ctx->max_len - 3);
84526 +       }
84528 +       /* Compare two matches and select the best one */
84529 +       if (len1 < len2) {
84530 +               ctx->best_match = hash[1];
84531 +               len1 = len2;
84532 +       } else {
84533 +               ctx->best_match = hash[0];
84534 +       }
84536 +       hash[1] = hash[0];
84537 +       hash[0] = src;
84538 +       return len1;
84541 +static size_t longest_match_best(const u8 *src, struct lznt *ctx)
84543 +       size_t max_len;
84544 +       const u8 *ptr;
84546 +       if (ctx->unc >= src || !ctx->max_len)
84547 +               return 0;
84549 +       max_len = 0;
84550 +       for (ptr = ctx->unc; ptr < src; ++ptr) {
84551 +               size_t len =
84552 +                       get_match_len(src, ctx->unc_end, ptr, ctx->max_len);
84553 +               if (len >= max_len) {
84554 +                       max_len = len;
84555 +                       ctx->best_match = ptr;
84556 +               }
84557 +       }
84559 +       return max_len >= 3 ? max_len : 0;
84562 +static const size_t s_max_len[] = {
84563 +       0x1002, 0x802, 0x402, 0x202, 0x102, 0x82, 0x42, 0x22, 0x12,
84566 +static const size_t s_max_off[] = {
84567 +       0x10, 0x20, 0x40, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
84570 +static inline u16 make_pair(size_t offset, size_t len, size_t index)
84572 +       return ((offset - 1) << (12 - index)) |
84573 +              ((len - 3) & (((1 << (12 - index)) - 1)));
84576 +static inline size_t parse_pair(u16 pair, size_t *offset, size_t index)
84578 +       *offset = 1 + (pair >> (12 - index));
84579 +       return 3 + (pair & ((1 << (12 - index)) - 1));
84583 + * compress_chunk
84584 + *
84585 + * returns one of the three values:
84586 + * 0 - ok, 'cmpr' contains 'cmpr_chunk_size' bytes of compressed data
84587 + * 1 - input buffer is full zero
84588 + * -2 - the compressed buffer is too small to hold the compressed data
84589 + */
84590 +static inline int compress_chunk(size_t (*match)(const u8 *, struct lznt *),
84591 +                                const u8 *unc, const u8 *unc_end, u8 *cmpr,
84592 +                                u8 *cmpr_end, size_t *cmpr_chunk_size,
84593 +                                struct lznt *ctx)
84595 +       size_t cnt = 0;
84596 +       size_t idx = 0;
84597 +       const u8 *up = unc;
84598 +       u8 *cp = cmpr + 3;
84599 +       u8 *cp2 = cmpr + 2;
84600 +       u8 not_zero = 0;
84601 +       /* Control byte of 8-bit values: ( 0 - means byte as is, 1 - short pair ) */
84602 +       u8 ohdr = 0;
84603 +       u8 *last;
84604 +       u16 t16;
84606 +       if (unc + LZNT_CHUNK_SIZE < unc_end)
84607 +               unc_end = unc + LZNT_CHUNK_SIZE;
84609 +       last = min(cmpr + LZNT_CHUNK_SIZE + sizeof(short), cmpr_end);
84611 +       ctx->unc = unc;
84612 +       ctx->unc_end = unc_end;
84613 +       ctx->max_len = s_max_len[0];
84615 +       while (up < unc_end) {
84616 +               size_t max_len;
84618 +               while (unc + s_max_off[idx] < up)
84619 +                       ctx->max_len = s_max_len[++idx];
84621 +               // Find match
84622 +               max_len = up + 3 <= unc_end ? (*match)(up, ctx) : 0;
84624 +               if (!max_len) {
84625 +                       if (cp >= last)
84626 +                               goto NotCompressed;
84627 +                       not_zero |= *cp++ = *up++;
84628 +               } else if (cp + 1 >= last) {
84629 +                       goto NotCompressed;
84630 +               } else {
84631 +                       t16 = make_pair(up - ctx->best_match, max_len, idx);
84632 +                       *cp++ = t16;
84633 +                       *cp++ = t16 >> 8;
84635 +                       ohdr |= 1 << cnt;
84636 +                       up += max_len;
84637 +               }
84639 +               cnt = (cnt + 1) & 7;
84640 +               if (!cnt) {
84641 +                       *cp2 = ohdr;
84642 +                       ohdr = 0;
84643 +                       cp2 = cp;
84644 +                       cp += 1;
84645 +               }
84646 +       }
84648 +       if (cp2 < last)
84649 +               *cp2 = ohdr;
84650 +       else
84651 +               cp -= 1;
84653 +       *cmpr_chunk_size = cp - cmpr;
84655 +       t16 = (*cmpr_chunk_size - 3) | 0xB000;
84656 +       cmpr[0] = t16;
84657 +       cmpr[1] = t16 >> 8;
84659 +       return not_zero ? 0 : LZNT_ERROR_ALL_ZEROS;
84661 +NotCompressed:
84663 +       if ((cmpr + LZNT_CHUNK_SIZE + sizeof(short)) > last)
84664 +               return -2;
84666 +       /*
84667 +        * Copy non cmpr data
84668 +        * 0x3FFF == ((LZNT_CHUNK_SIZE + 2 - 3) | 0x3000)
84669 +        */
84670 +       cmpr[0] = 0xff;
84671 +       cmpr[1] = 0x3f;
84673 +       memcpy(cmpr + sizeof(short), unc, LZNT_CHUNK_SIZE);
84674 +       *cmpr_chunk_size = LZNT_CHUNK_SIZE + sizeof(short);
84676 +       return 0;
84679 +static inline ssize_t decompress_chunk(u8 *unc, u8 *unc_end, const u8 *cmpr,
84680 +                                      const u8 *cmpr_end)
84682 +       u8 *up = unc;
84683 +       u8 ch = *cmpr++;
84684 +       size_t bit = 0;
84685 +       size_t index = 0;
84686 +       u16 pair;
84687 +       size_t offset, length;
84689 +       /* Do decompression until pointers are inside range */
84690 +       while (up < unc_end && cmpr < cmpr_end) {
84691 +               /* Correct index */
84692 +               while (unc + s_max_off[index] < up)
84693 +                       index += 1;
84695 +               /* Check the current flag for zero */
84696 +               if (!(ch & (1 << bit))) {
84697 +                       /* Just copy byte */
84698 +                       *up++ = *cmpr++;
84699 +                       goto next;
84700 +               }
84702 +               /* Check for boundary */
84703 +               if (cmpr + 1 >= cmpr_end)
84704 +                       return -EINVAL;
84706 +               /* Read a short from little endian stream */
84707 +               pair = cmpr[1];
84708 +               pair <<= 8;
84709 +               pair |= cmpr[0];
84711 +               cmpr += 2;
84713 +               /* Translate packed information into offset and length */
84714 +               length = parse_pair(pair, &offset, index);
84716 +               /* Check offset for boundary */
84717 +               if (unc + offset > up)
84718 +                       return -EINVAL;
84720 +               /* Truncate the length if necessary */
84721 +               if (up + length >= unc_end)
84722 +                       length = unc_end - up;
84724 +               /* Now we copy bytes. This is the heart of LZ algorithm. */
84725 +               for (; length > 0; length--, up++)
84726 +                       *up = *(up - offset);
84728 +next:
84729 +               /* Advance flag bit value */
84730 +               bit = (bit + 1) & 7;
84732 +               if (!bit) {
84733 +                       if (cmpr >= cmpr_end)
84734 +                               break;
84736 +                       ch = *cmpr++;
84737 +               }
84738 +       }
84740 +       /* return the size of uncompressed data */
84741 +       return up - unc;
84745 + * 0 - standard compression
84746 + * !0 - best compression, requires a lot of cpu
84747 + */
84748 +struct lznt *get_lznt_ctx(int level)
84750 +       struct lznt *r = ntfs_zalloc(level ? offsetof(struct lznt, hash)
84751 +                                          : sizeof(struct lznt));
84753 +       if (r)
84754 +               r->std = !level;
84755 +       return r;
84759 + * compress_lznt
84760 + *
84761 + * Compresses "unc" into "cmpr"
84762 + * +x - ok, 'cmpr' contains 'final_compressed_size' bytes of compressed data
84763 + * 0 - input buffer is full zero
84764 + */
84765 +size_t compress_lznt(const void *unc, size_t unc_size, void *cmpr,
84766 +                    size_t cmpr_size, struct lznt *ctx)
84768 +       int err;
84769 +       size_t (*match)(const u8 *src, struct lznt *ctx);
84770 +       u8 *p = cmpr;
84771 +       u8 *end = p + cmpr_size;
84772 +       const u8 *unc_chunk = unc;
84773 +       const u8 *unc_end = unc_chunk + unc_size;
84774 +       bool is_zero = true;
84776 +       if (ctx->std) {
84777 +               match = &longest_match_std;
84778 +               memset(ctx->hash, 0, sizeof(ctx->hash));
84779 +       } else {
84780 +               match = &longest_match_best;
84781 +       }
84783 +       /* compression cycle */
84784 +       for (; unc_chunk < unc_end; unc_chunk += LZNT_CHUNK_SIZE) {
84785 +               cmpr_size = 0;
84786 +               err = compress_chunk(match, unc_chunk, unc_end, p, end,
84787 +                                    &cmpr_size, ctx);
84788 +               if (err < 0)
84789 +                       return unc_size;
84791 +               if (is_zero && err != LZNT_ERROR_ALL_ZEROS)
84792 +                       is_zero = false;
84794 +               p += cmpr_size;
84795 +       }
84797 +       if (p <= end - 2)
84798 +               p[0] = p[1] = 0;
84800 +       return is_zero ? 0 : PtrOffset(cmpr, p);
84804 + * decompress_lznt
84805 + *
84806 + * decompresses "cmpr" into "unc"
84807 + */
84808 +ssize_t decompress_lznt(const void *cmpr, size_t cmpr_size, void *unc,
84809 +                       size_t unc_size)
84811 +       const u8 *cmpr_chunk = cmpr;
84812 +       const u8 *cmpr_end = cmpr_chunk + cmpr_size;
84813 +       u8 *unc_chunk = unc;
84814 +       u8 *unc_end = unc_chunk + unc_size;
84815 +       u16 chunk_hdr;
84817 +       if (cmpr_size < sizeof(short))
84818 +               return -EINVAL;
84820 +       /* read chunk header */
84821 +       chunk_hdr = cmpr_chunk[1];
84822 +       chunk_hdr <<= 8;
84823 +       chunk_hdr |= cmpr_chunk[0];
84825 +       /* loop through decompressing chunks */
84826 +       for (;;) {
84827 +               size_t chunk_size_saved;
84828 +               size_t unc_use;
84829 +               size_t cmpr_use = 3 + (chunk_hdr & (LZNT_CHUNK_SIZE - 1));
84831 +               /* Check that the chunk actually fits the supplied buffer */
84832 +               if (cmpr_chunk + cmpr_use > cmpr_end)
84833 +                       return -EINVAL;
84835 +               /* First make sure the chunk contains compressed data */
84836 +               if (chunk_hdr & 0x8000) {
84837 +                       /* Decompress a chunk and return if we get an error */
84838 +                       ssize_t err =
84839 +                               decompress_chunk(unc_chunk, unc_end,
84840 +                                                cmpr_chunk + sizeof(chunk_hdr),
84841 +                                                cmpr_chunk + cmpr_use);
84842 +                       if (err < 0)
84843 +                               return err;
84844 +                       unc_use = err;
84845 +               } else {
84846 +                       /* This chunk does not contain compressed data */
84847 +                       unc_use = unc_chunk + LZNT_CHUNK_SIZE > unc_end
84848 +                                         ? unc_end - unc_chunk
84849 +                                         : LZNT_CHUNK_SIZE;
84851 +                       if (cmpr_chunk + sizeof(chunk_hdr) + unc_use >
84852 +                           cmpr_end) {
84853 +                               return -EINVAL;
84854 +                       }
84856 +                       memcpy(unc_chunk, cmpr_chunk + sizeof(chunk_hdr),
84857 +                              unc_use);
84858 +               }
84860 +               /* Advance pointers */
84861 +               cmpr_chunk += cmpr_use;
84862 +               unc_chunk += unc_use;
84864 +               /* Check for the end of unc buffer */
84865 +               if (unc_chunk >= unc_end)
84866 +                       break;
84868 +               /* Proceed the next chunk */
84869 +               if (cmpr_chunk > cmpr_end - 2)
84870 +                       break;
84872 +               chunk_size_saved = LZNT_CHUNK_SIZE;
84874 +               /* read chunk header */
84875 +               chunk_hdr = cmpr_chunk[1];
84876 +               chunk_hdr <<= 8;
84877 +               chunk_hdr |= cmpr_chunk[0];
84879 +               if (!chunk_hdr)
84880 +                       break;
84882 +               /* Check the size of unc buffer */
84883 +               if (unc_use < chunk_size_saved) {
84884 +                       size_t t1 = chunk_size_saved - unc_use;
84885 +                       u8 *t2 = unc_chunk + t1;
84887 +                       /* 'Zero' memory */
84888 +                       if (t2 >= unc_end)
84889 +                               break;
84891 +                       memset(unc_chunk, 0, t1);
84892 +                       unc_chunk = t2;
84893 +               }
84894 +       }
84896 +       /* Check compression boundary */
84897 +       if (cmpr_chunk > cmpr_end)
84898 +               return -EINVAL;
84900 +       /*
84901 +        * The unc size is just a difference between current
84902 +        * pointer and original one
84903 +        */
84904 +       return PtrOffset(unc, unc_chunk);
84906 diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
84907 new file mode 100644
84908 index 000000000000..f5db12cd3b20
84909 --- /dev/null
84910 +++ b/fs/ntfs3/namei.c
84911 @@ -0,0 +1,578 @@
84912 +// SPDX-License-Identifier: GPL-2.0
84914 + *
84915 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
84916 + *
84917 + */
84919 +#include <linux/blkdev.h>
84920 +#include <linux/buffer_head.h>
84921 +#include <linux/fs.h>
84922 +#include <linux/iversion.h>
84923 +#include <linux/namei.h>
84924 +#include <linux/nls.h>
84926 +#include "debug.h"
84927 +#include "ntfs.h"
84928 +#include "ntfs_fs.h"
84931 + * fill_name_de
84932 + *
84933 + * formats NTFS_DE in 'buf'
84934 + */
84935 +int fill_name_de(struct ntfs_sb_info *sbi, void *buf, const struct qstr *name,
84936 +                const struct cpu_str *uni)
84938 +       int err;
84939 +       struct NTFS_DE *e = buf;
84940 +       u16 data_size;
84941 +       struct ATTR_FILE_NAME *fname = (struct ATTR_FILE_NAME *)(e + 1);
84943 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
84944 +       e->ref.high = fname->home.high = 0;
84945 +#endif
84946 +       if (uni) {
84947 +#ifdef __BIG_ENDIAN
84948 +               int ulen = uni->len;
84949 +               __le16 *uname = fname->name;
84950 +               const u16 *name_cpu = uni->name;
84952 +               while (ulen--)
84953 +                       *uname++ = cpu_to_le16(*name_cpu++);
84954 +#else
84955 +               memcpy(fname->name, uni->name, uni->len * sizeof(u16));
84956 +#endif
84957 +               fname->name_len = uni->len;
84959 +       } else {
84960 +               /* Convert input string to unicode */
84961 +               err = ntfs_nls_to_utf16(sbi, name->name, name->len,
84962 +                                       (struct cpu_str *)&fname->name_len,
84963 +                                       NTFS_NAME_LEN, UTF16_LITTLE_ENDIAN);
84964 +               if (err < 0)
84965 +                       return err;
84966 +       }
84968 +       fname->type = FILE_NAME_POSIX;
84969 +       data_size = fname_full_size(fname);
84971 +       e->size = cpu_to_le16(QuadAlign(data_size) + sizeof(struct NTFS_DE));
84972 +       e->key_size = cpu_to_le16(data_size);
84973 +       e->flags = 0;
84974 +       e->res = 0;
84976 +       return 0;
84980 + * ntfs_lookup
84981 + *
84982 + * inode_operations::lookup
84983 + */
84984 +static struct dentry *ntfs_lookup(struct inode *dir, struct dentry *dentry,
84985 +                                 u32 flags)
84987 +       struct ntfs_inode *ni = ntfs_i(dir);
84988 +       struct cpu_str *uni = __getname();
84989 +       struct inode *inode;
84990 +       int err;
84992 +       if (!uni)
84993 +               inode = ERR_PTR(-ENOMEM);
84994 +       else {
84995 +               err = ntfs_nls_to_utf16(ni->mi.sbi, dentry->d_name.name,
84996 +                                       dentry->d_name.len, uni, NTFS_NAME_LEN,
84997 +                                       UTF16_HOST_ENDIAN);
84998 +               if (err < 0)
84999 +                       inode = ERR_PTR(err);
85000 +               else {
85001 +                       ni_lock(ni);
85002 +                       inode = dir_search_u(dir, uni, NULL);
85003 +                       ni_unlock(ni);
85004 +               }
85005 +               __putname(uni);
85006 +       }
85008 +       return d_splice_alias(inode, dentry);
85012 + * ntfs_create
85013 + *
85014 + * inode_operations::create
85015 + */
85016 +static int ntfs_create(struct user_namespace *mnt_userns, struct inode *dir,
85017 +                      struct dentry *dentry, umode_t mode, bool excl)
85019 +       struct ntfs_inode *ni = ntfs_i(dir);
85020 +       struct inode *inode;
85022 +       ni_lock_dir(ni);
85024 +       inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFREG | mode,
85025 +                                 0, NULL, 0, excl, NULL);
85027 +       ni_unlock(ni);
85029 +       return IS_ERR(inode) ? PTR_ERR(inode) : 0;
85033 + * ntfs_link
85034 + *
85035 + * inode_operations::link
85036 + */
85037 +static int ntfs_link(struct dentry *ode, struct inode *dir, struct dentry *de)
85039 +       int err;
85040 +       struct inode *inode = d_inode(ode);
85041 +       struct ntfs_inode *ni = ntfs_i(inode);
85043 +       if (S_ISDIR(inode->i_mode))
85044 +               return -EPERM;
85046 +       if (inode->i_nlink >= NTFS_LINK_MAX)
85047 +               return -EMLINK;
85049 +       ni_lock_dir(ntfs_i(dir));
85050 +       if (inode != dir)
85051 +               ni_lock(ni);
85053 +       dir->i_ctime = dir->i_mtime = inode->i_ctime = current_time(inode);
85054 +       inc_nlink(inode);
85055 +       ihold(inode);
85057 +       err = ntfs_link_inode(inode, de);
85058 +       if (!err) {
85059 +               mark_inode_dirty(inode);
85060 +               mark_inode_dirty(dir);
85061 +               d_instantiate(de, inode);
85062 +       } else {
85063 +               drop_nlink(inode);
85064 +               iput(inode);
85065 +       }
85067 +       if (inode != dir)
85068 +               ni_unlock(ni);
85069 +       ni_unlock(ntfs_i(dir));
85071 +       return err;
85075 + * ntfs_unlink
85076 + *
85077 + * inode_operations::unlink
85078 + */
85079 +static int ntfs_unlink(struct inode *dir, struct dentry *dentry)
85081 +       struct ntfs_inode *ni = ntfs_i(dir);
85082 +       int err;
85084 +       ni_lock_dir(ni);
85086 +       err = ntfs_unlink_inode(dir, dentry);
85088 +       ni_unlock(ni);
85090 +       return err;
85094 + * ntfs_symlink
85095 + *
85096 + * inode_operations::symlink
85097 + */
85098 +static int ntfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
85099 +                       struct dentry *dentry, const char *symname)
85101 +       u32 size = strlen(symname);
85102 +       struct inode *inode;
85103 +       struct ntfs_inode *ni = ntfs_i(dir);
85105 +       ni_lock_dir(ni);
85107 +       inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFLNK | 0777,
85108 +                                 0, symname, size, 0, NULL);
85110 +       ni_unlock(ni);
85112 +       return IS_ERR(inode) ? PTR_ERR(inode) : 0;
85116 + * ntfs_mkdir
85117 + *
85118 + * inode_operations::mkdir
85119 + */
85120 +static int ntfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
85121 +                     struct dentry *dentry, umode_t mode)
85123 +       struct inode *inode;
85124 +       struct ntfs_inode *ni = ntfs_i(dir);
85126 +       ni_lock_dir(ni);
85128 +       inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFDIR | mode,
85129 +                                 0, NULL, -1, 0, NULL);
85131 +       ni_unlock(ni);
85133 +       return IS_ERR(inode) ? PTR_ERR(inode) : 0;
85137 + * ntfs_rmdir
85138 + *
85139 + * inode_operations::rm_dir
85140 + */
85141 +static int ntfs_rmdir(struct inode *dir, struct dentry *dentry)
85143 +       struct ntfs_inode *ni = ntfs_i(dir);
85144 +       int err;
85146 +       ni_lock_dir(ni);
85148 +       err = ntfs_unlink_inode(dir, dentry);
85150 +       ni_unlock(ni);
85152 +       return err;
85156 + * ntfs_rename
85157 + *
85158 + * inode_operations::rename
85159 + */
85160 +static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
85161 +                      struct dentry *old_dentry, struct inode *new_dir,
85162 +                      struct dentry *new_dentry, u32 flags)
85164 +       int err;
85165 +       struct super_block *sb = old_dir->i_sb;
85166 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
85167 +       struct ntfs_inode *old_dir_ni = ntfs_i(old_dir);
85168 +       struct ntfs_inode *new_dir_ni = ntfs_i(new_dir);
85169 +       struct ntfs_inode *old_ni;
85170 +       struct ATTR_FILE_NAME *old_name, *new_name, *fname;
85171 +       u8 name_type;
85172 +       bool is_same;
85173 +       struct inode *old_inode, *new_inode;
85174 +       struct NTFS_DE *old_de, *new_de;
85175 +       struct ATTRIB *attr;
85176 +       struct ATTR_LIST_ENTRY *le;
85177 +       u16 new_de_key_size;
85179 +       static_assert(SIZEOF_ATTRIBUTE_FILENAME_MAX + SIZEOF_RESIDENT < 1024);
85180 +       static_assert(SIZEOF_ATTRIBUTE_FILENAME_MAX + sizeof(struct NTFS_DE) <
85181 +                     1024);
85182 +       static_assert(PATH_MAX >= 4 * 1024);
85184 +       if (flags & ~RENAME_NOREPLACE)
85185 +               return -EINVAL;
85187 +       old_inode = d_inode(old_dentry);
85188 +       new_inode = d_inode(new_dentry);
85190 +       old_ni = ntfs_i(old_inode);
85192 +       is_same = old_dentry->d_name.len == new_dentry->d_name.len &&
85193 +                 !memcmp(old_dentry->d_name.name, new_dentry->d_name.name,
85194 +                         old_dentry->d_name.len);
85196 +       if (is_same && old_dir == new_dir) {
85197 +               /* Nothing to do */
85198 +               err = 0;
85199 +               goto out;
85200 +       }
85202 +       if (ntfs_is_meta_file(sbi, old_inode->i_ino)) {
85203 +               err = -EINVAL;
85204 +               goto out;
85205 +       }
85207 +       if (new_inode) {
85208 +               /*target name exists. unlink it*/
85209 +               dget(new_dentry);
85210 +               ni_lock_dir(new_dir_ni);
85211 +               err = ntfs_unlink_inode(new_dir, new_dentry);
85212 +               ni_unlock(new_dir_ni);
85213 +               dput(new_dentry);
85214 +               if (err)
85215 +                       goto out;
85216 +       }
85218 +       /* allocate PATH_MAX bytes */
85219 +       old_de = __getname();
85220 +       if (!old_de) {
85221 +               err = -ENOMEM;
85222 +               goto out;
85223 +       }
85225 +       err = fill_name_de(sbi, old_de, &old_dentry->d_name, NULL);
85226 +       if (err < 0)
85227 +               goto out1;
85229 +       old_name = (struct ATTR_FILE_NAME *)(old_de + 1);
85231 +       if (is_same) {
85232 +               new_de = old_de;
85233 +       } else {
85234 +               new_de = Add2Ptr(old_de, 1024);
85235 +               err = fill_name_de(sbi, new_de, &new_dentry->d_name, NULL);
85236 +               if (err < 0)
85237 +                       goto out1;
85238 +       }
85240 +       ni_lock_dir(old_dir_ni);
85241 +       ni_lock(old_ni);
85243 +       mi_get_ref(&old_dir_ni->mi, &old_name->home);
85245 +       /*get pointer to file_name in mft*/
85246 +       fname = ni_fname_name(old_ni, (struct cpu_str *)&old_name->name_len,
85247 +                             &old_name->home, &le);
85248 +       if (!fname) {
85249 +               err = -EINVAL;
85250 +               goto out2;
85251 +       }
85253 +       /* Copy fname info from record into new fname */
85254 +       new_name = (struct ATTR_FILE_NAME *)(new_de + 1);
85255 +       memcpy(&new_name->dup, &fname->dup, sizeof(fname->dup));
85257 +       name_type = paired_name(fname->type);
85259 +       /* remove first name from directory */
85260 +       err = indx_delete_entry(&old_dir_ni->dir, old_dir_ni, old_de + 1,
85261 +                               le16_to_cpu(old_de->key_size), sbi);
85262 +       if (err)
85263 +               goto out3;
85265 +       /* remove first name from mft */
85266 +       err = ni_remove_attr_le(old_ni, attr_from_name(fname), le);
85267 +       if (err)
85268 +               goto out4;
85270 +       le16_add_cpu(&old_ni->mi.mrec->hard_links, -1);
85271 +       old_ni->mi.dirty = true;
85273 +       if (name_type != FILE_NAME_POSIX) {
85274 +               /* get paired name */
85275 +               fname = ni_fname_type(old_ni, name_type, &le);
85276 +               if (fname) {
85277 +                       /* remove second name from directory */
85278 +                       err = indx_delete_entry(&old_dir_ni->dir, old_dir_ni,
85279 +                                               fname, fname_full_size(fname),
85280 +                                               sbi);
85281 +                       if (err)
85282 +                               goto out5;
85284 +                       /* remove second name from mft */
85285 +                       err = ni_remove_attr_le(old_ni, attr_from_name(fname),
85286 +                                               le);
85287 +                       if (err)
85288 +                               goto out6;
85290 +                       le16_add_cpu(&old_ni->mi.mrec->hard_links, -1);
85291 +                       old_ni->mi.dirty = true;
85292 +               }
85293 +       }
85295 +       /* Add new name */
85296 +       mi_get_ref(&old_ni->mi, &new_de->ref);
85297 +       mi_get_ref(&ntfs_i(new_dir)->mi, &new_name->home);
85299 +       new_de_key_size = le16_to_cpu(new_de->key_size);
85301 +       /* insert new name in mft */
85302 +       err = ni_insert_resident(old_ni, new_de_key_size, ATTR_NAME, NULL, 0,
85303 +                                &attr, NULL);
85304 +       if (err)
85305 +               goto out7;
85307 +       attr->res.flags = RESIDENT_FLAG_INDEXED;
85309 +       memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), new_name, new_de_key_size);
85311 +       le16_add_cpu(&old_ni->mi.mrec->hard_links, 1);
85312 +       old_ni->mi.dirty = true;
85314 +       /* insert new name in directory */
85315 +       err = indx_insert_entry(&new_dir_ni->dir, new_dir_ni, new_de, sbi,
85316 +                               NULL);
85317 +       if (err)
85318 +               goto out8;
85320 +       if (IS_DIRSYNC(new_dir))
85321 +               err = ntfs_sync_inode(old_inode);
85322 +       else
85323 +               mark_inode_dirty(old_inode);
85325 +       old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
85326 +       if (IS_DIRSYNC(old_dir))
85327 +               (void)ntfs_sync_inode(old_dir);
85328 +       else
85329 +               mark_inode_dirty(old_dir);
85331 +       if (old_dir != new_dir) {
85332 +               new_dir->i_mtime = new_dir->i_ctime = old_dir->i_ctime;
85333 +               mark_inode_dirty(new_dir);
85334 +       }
85336 +       if (old_inode) {
85337 +               old_inode->i_ctime = old_dir->i_ctime;
85338 +               mark_inode_dirty(old_inode);
85339 +       }
85341 +       err = 0;
85342 +       /* normal way */
85343 +       goto out2;
85345 +out8:
85346 +       /* undo
85347 +        * ni_insert_resident(old_ni, new_de_key_size, ATTR_NAME, NULL, 0,
85348 +        *                       &attr, NULL);
85349 +        */
85350 +       mi_remove_attr(&old_ni->mi, attr);
85351 +out7:
85352 +       /* undo
85353 +        * ni_remove_attr_le(old_ni, attr_from_name(fname), le);
85354 +        */
85355 +out6:
85356 +       /* undo
85357 +        * indx_delete_entry(&old_dir_ni->dir, old_dir_ni,
85358 +        *                                      fname, fname_full_size(fname),
85359 +        *                                      sbi);
85360 +        */
85361 +out5:
85362 +       /* undo
85363 +        * ni_remove_attr_le(old_ni, attr_from_name(fname), le);
85364 +        */
85365 +out4:
85366 +       /* undo:
85367 +        * indx_delete_entry(&old_dir_ni->dir, old_dir_ni, old_de + 1,
85368 +        *                      old_de->key_size, NULL);
85369 +        */
85370 +out3:
85371 +out2:
85372 +       ni_unlock(old_ni);
85373 +       ni_unlock(old_dir_ni);
85374 +out1:
85375 +       __putname(old_de);
85376 +out:
85377 +       return err;
85381 + * ntfs_atomic_open
85382 + *
85383 + * inode_operations::atomic_open
85384 + */
85385 +static int ntfs_atomic_open(struct inode *dir, struct dentry *dentry,
85386 +                           struct file *file, u32 flags, umode_t mode)
85388 +       int err;
85389 +       bool excl = !!(flags & O_EXCL);
85390 +       struct inode *inode;
85391 +       struct ntfs_fnd *fnd = NULL;
85392 +       struct ntfs_inode *ni = ntfs_i(dir);
85393 +       struct dentry *d = NULL;
85394 +       struct cpu_str *uni = __getname();
85396 +       if (!uni)
85397 +               return -ENOMEM;
85399 +       err = ntfs_nls_to_utf16(ni->mi.sbi, dentry->d_name.name,
85400 +                               dentry->d_name.len, uni, NTFS_NAME_LEN,
85401 +                               UTF16_HOST_ENDIAN);
85402 +       if (err < 0)
85403 +               goto out;
85405 +       ni_lock_dir(ni);
85407 +       if (d_in_lookup(dentry)) {
85408 +               fnd = fnd_get();
85409 +               if (!fnd) {
85410 +                       err = -ENOMEM;
85411 +                       goto out1;
85412 +               }
85414 +               d = d_splice_alias(dir_search_u(dir, uni, fnd), dentry);
85415 +               if (IS_ERR(d)) {
85416 +                       err = PTR_ERR(d);
85417 +                       d = NULL;
85418 +                       goto out2;
85419 +               }
85421 +               if (d)
85422 +                       dentry = d;
85423 +       }
85425 +       if (!(flags & O_CREAT) || d_really_is_positive(dentry)) {
85426 +               err = finish_no_open(file, d);
85427 +               goto out2;
85428 +       }
85430 +       file->f_mode |= FMODE_CREATED;
85432 +       /*fnd contains tree's path to insert to*/
85433 +       /* TODO: init_user_ns? */
85434 +       inode = ntfs_create_inode(&init_user_ns, dir, dentry, uni, mode, 0,
85435 +                                 NULL, 0, excl, fnd);
85436 +       err = IS_ERR(inode) ? PTR_ERR(inode)
85437 +                           : finish_open(file, dentry, ntfs_file_open);
85438 +       dput(d);
85440 +out2:
85441 +       fnd_put(fnd);
85442 +out1:
85443 +       ni_unlock(ni);
85444 +out:
85445 +       __putname(uni);
85447 +       return err;
85450 +struct dentry *ntfs3_get_parent(struct dentry *child)
85452 +       struct inode *inode = d_inode(child);
85453 +       struct ntfs_inode *ni = ntfs_i(inode);
85455 +       struct ATTR_LIST_ENTRY *le = NULL;
85456 +       struct ATTRIB *attr = NULL;
85457 +       struct ATTR_FILE_NAME *fname;
85459 +       while ((attr = ni_find_attr(ni, attr, &le, ATTR_NAME, NULL, 0, NULL,
85460 +                                   NULL))) {
85461 +               fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
85462 +               if (!fname)
85463 +                       continue;
85465 +               return d_obtain_alias(
85466 +                       ntfs_iget5(inode->i_sb, &fname->home, NULL));
85467 +       }
85469 +       return ERR_PTR(-ENOENT);
85472 +const struct inode_operations ntfs_dir_inode_operations = {
85473 +       .lookup = ntfs_lookup,
85474 +       .create = ntfs_create,
85475 +       .link = ntfs_link,
85476 +       .unlink = ntfs_unlink,
85477 +       .symlink = ntfs_symlink,
85478 +       .mkdir = ntfs_mkdir,
85479 +       .rmdir = ntfs_rmdir,
85480 +       .rename = ntfs_rename,
85481 +       .permission = ntfs_permission,
85482 +       .get_acl = ntfs_get_acl,
85483 +       .set_acl = ntfs_set_acl,
85484 +       .setattr = ntfs3_setattr,
85485 +       .getattr = ntfs_getattr,
85486 +       .listxattr = ntfs_listxattr,
85487 +       .atomic_open = ntfs_atomic_open,
85488 +       .fiemap = ntfs_fiemap,
85490 diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
85491 new file mode 100644
85492 index 000000000000..40398e6c39c9
85493 --- /dev/null
85494 +++ b/fs/ntfs3/ntfs.h
85495 @@ -0,0 +1,1238 @@
85496 +/* SPDX-License-Identifier: GPL-2.0 */
85498 + *
85499 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
85500 + *
85501 + * on-disk ntfs structs
85502 + */
85504 +// clang-format off
85506 +/* TODO:
85507 + * - Check 4K mft record and 512 bytes cluster
85508 + */
85511 + * Activate this define to use binary search in indexes
85512 + */
85513 +#define NTFS3_INDEX_BINARY_SEARCH
85516 + * Check each run for marked clusters
85517 + */
85518 +#define NTFS3_CHECK_FREE_CLST
85520 +#define NTFS_NAME_LEN 255
85523 + * ntfs.sys used 500 maximum links
85524 + * on-disk struct allows up to 0xffff
85525 + */
85526 +#define NTFS_LINK_MAX 0x400
85527 +//#define NTFS_LINK_MAX 0xffff
85530 + * Activate to use 64 bit clusters instead of 32 bits in ntfs.sys
85531 + * Logical and virtual cluster number
85532 + * If needed, may be redefined to use 64 bit value
85533 + */
85534 +//#define CONFIG_NTFS3_64BIT_CLUSTER
85536 +#define NTFS_LZNT_MAX_CLUSTER  4096
85537 +#define NTFS_LZNT_CUNIT                4
85538 +#define NTFS_LZNT_CLUSTERS     (1u<<NTFS_LZNT_CUNIT)
85540 +struct GUID {
85541 +       __le32 Data1;
85542 +       __le16 Data2;
85543 +       __le16 Data3;
85544 +       u8 Data4[8];
85548 + * this struct repeats layout of ATTR_FILE_NAME
85549 + * at offset 0x40
85550 + * it used to store global constants NAME_MFT/NAME_MIRROR...
85551 + * most constant names are shorter than 10
85552 + */
85553 +struct cpu_str {
85554 +       u8 len;
85555 +       u8 unused;
85556 +       u16 name[10];
85559 +struct le_str {
85560 +       u8 len;
85561 +       u8 unused;
85562 +       __le16 name[];
85565 +static_assert(SECTOR_SHIFT == 9);
85567 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
85568 +typedef u64 CLST;
85569 +static_assert(sizeof(size_t) == 8);
85570 +#else
85571 +typedef u32 CLST;
85572 +#endif
85574 +#define SPARSE_LCN64   ((u64)-1)
85575 +#define SPARSE_LCN     ((CLST)-1)
85576 +#define RESIDENT_LCN   ((CLST)-2)
85577 +#define COMPRESSED_LCN ((CLST)-3)
85579 +#define COMPRESSION_UNIT     4
85580 +#define COMPRESS_MAX_CLUSTER 0x1000
85581 +#define MFT_INCREASE_CHUNK   1024
85583 +enum RECORD_NUM {
85584 +       MFT_REC_MFT             = 0,
85585 +       MFT_REC_MIRR            = 1,
85586 +       MFT_REC_LOG             = 2,
85587 +       MFT_REC_VOL             = 3,
85588 +       MFT_REC_ATTR            = 4,
85589 +       MFT_REC_ROOT            = 5,
85590 +       MFT_REC_BITMAP          = 6,
85591 +       MFT_REC_BOOT            = 7,
85592 +       MFT_REC_BADCLUST        = 8,
85593 +       //MFT_REC_QUOTA         = 9,
85594 +       MFT_REC_SECURE          = 9, // NTFS 3.0
85595 +       MFT_REC_UPCASE          = 10,
85596 +       MFT_REC_EXTEND          = 11, // NTFS 3.0
85597 +       MFT_REC_RESERVED        = 11,
85598 +       MFT_REC_FREE            = 16,
85599 +       MFT_REC_USER            = 24,
85602 +enum ATTR_TYPE {
85603 +       ATTR_ZERO               = cpu_to_le32(0x00),
85604 +       ATTR_STD                = cpu_to_le32(0x10),
85605 +       ATTR_LIST               = cpu_to_le32(0x20),
85606 +       ATTR_NAME               = cpu_to_le32(0x30),
85607 +       // ATTR_VOLUME_VERSION on Nt4
85608 +       ATTR_ID                 = cpu_to_le32(0x40),
85609 +       ATTR_SECURE             = cpu_to_le32(0x50),
85610 +       ATTR_LABEL              = cpu_to_le32(0x60),
85611 +       ATTR_VOL_INFO           = cpu_to_le32(0x70),
85612 +       ATTR_DATA               = cpu_to_le32(0x80),
85613 +       ATTR_ROOT               = cpu_to_le32(0x90),
85614 +       ATTR_ALLOC              = cpu_to_le32(0xA0),
85615 +       ATTR_BITMAP             = cpu_to_le32(0xB0),
85616 +       // ATTR_SYMLINK on Nt4
85617 +       ATTR_REPARSE            = cpu_to_le32(0xC0),
85618 +       ATTR_EA_INFO            = cpu_to_le32(0xD0),
85619 +       ATTR_EA                 = cpu_to_le32(0xE0),
85620 +       ATTR_PROPERTYSET        = cpu_to_le32(0xF0),
85621 +       ATTR_LOGGED_UTILITY_STREAM = cpu_to_le32(0x100),
85622 +       ATTR_END                = cpu_to_le32(0xFFFFFFFF)
85625 +static_assert(sizeof(enum ATTR_TYPE) == 4);
85627 +enum FILE_ATTRIBUTE {
85628 +       FILE_ATTRIBUTE_READONLY         = cpu_to_le32(0x00000001),
85629 +       FILE_ATTRIBUTE_HIDDEN           = cpu_to_le32(0x00000002),
85630 +       FILE_ATTRIBUTE_SYSTEM           = cpu_to_le32(0x00000004),
85631 +       FILE_ATTRIBUTE_ARCHIVE          = cpu_to_le32(0x00000020),
85632 +       FILE_ATTRIBUTE_DEVICE           = cpu_to_le32(0x00000040),
85633 +       FILE_ATTRIBUTE_TEMPORARY        = cpu_to_le32(0x00000100),
85634 +       FILE_ATTRIBUTE_SPARSE_FILE      = cpu_to_le32(0x00000200),
85635 +       FILE_ATTRIBUTE_REPARSE_POINT    = cpu_to_le32(0x00000400),
85636 +       FILE_ATTRIBUTE_COMPRESSED       = cpu_to_le32(0x00000800),
85637 +       FILE_ATTRIBUTE_OFFLINE          = cpu_to_le32(0x00001000),
85638 +       FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = cpu_to_le32(0x00002000),
85639 +       FILE_ATTRIBUTE_ENCRYPTED        = cpu_to_le32(0x00004000),
85640 +       FILE_ATTRIBUTE_VALID_FLAGS      = cpu_to_le32(0x00007fb7),
85641 +       FILE_ATTRIBUTE_DIRECTORY        = cpu_to_le32(0x10000000),
85644 +static_assert(sizeof(enum FILE_ATTRIBUTE) == 4);
85646 +extern const struct cpu_str NAME_MFT;
85647 +extern const struct cpu_str NAME_MIRROR;
85648 +extern const struct cpu_str NAME_LOGFILE;
85649 +extern const struct cpu_str NAME_VOLUME;
85650 +extern const struct cpu_str NAME_ATTRDEF;
85651 +extern const struct cpu_str NAME_ROOT;
85652 +extern const struct cpu_str NAME_BITMAP;
85653 +extern const struct cpu_str NAME_BOOT;
85654 +extern const struct cpu_str NAME_BADCLUS;
85655 +extern const struct cpu_str NAME_QUOTA;
85656 +extern const struct cpu_str NAME_SECURE;
85657 +extern const struct cpu_str NAME_UPCASE;
85658 +extern const struct cpu_str NAME_EXTEND;
85659 +extern const struct cpu_str NAME_OBJID;
85660 +extern const struct cpu_str NAME_REPARSE;
85661 +extern const struct cpu_str NAME_USNJRNL;
85663 +extern const __le16 I30_NAME[4];
85664 +extern const __le16 SII_NAME[4];
85665 +extern const __le16 SDH_NAME[4];
85666 +extern const __le16 SO_NAME[2];
85667 +extern const __le16 SQ_NAME[2];
85668 +extern const __le16 SR_NAME[2];
85670 +extern const __le16 BAD_NAME[4];
85671 +extern const __le16 SDS_NAME[4];
85672 +extern const __le16 WOF_NAME[17];      /* WofCompressedData */
85674 +/* MFT record number structure */
85675 +struct MFT_REF {
85676 +       __le32 low;     // The low part of the number
85677 +       __le16 high;    // The high part of the number
85678 +       __le16 seq;     // The sequence number of MFT record
85681 +static_assert(sizeof(__le64) == sizeof(struct MFT_REF));
85683 +static inline CLST ino_get(const struct MFT_REF *ref)
85685 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
85686 +       return le32_to_cpu(ref->low) | ((u64)le16_to_cpu(ref->high) << 32);
85687 +#else
85688 +       return le32_to_cpu(ref->low);
85689 +#endif
85692 +struct NTFS_BOOT {
85693 +       u8 jump_code[3];        // 0x00: Jump to boot code
85694 +       u8 system_id[8];        // 0x03: System ID, equals "NTFS    "
85696 +       // NOTE: this member is not aligned(!)
85697 +       // bytes_per_sector[0] must be 0
85698 +       // bytes_per_sector[1] must be multiplied by 256
85699 +       u8 bytes_per_sector[2]; // 0x0B: Bytes per sector
85701 +       u8 sectors_per_clusters;// 0x0D: Sectors per cluster
85702 +       u8 unused1[7];
85703 +       u8 media_type;          // 0x15: Media type (0xF8 - harddisk)
85704 +       u8 unused2[2];
85705 +       __le16 sct_per_track;   // 0x18: number of sectors per track
85706 +       __le16 heads;           // 0x1A: number of heads per cylinder
85707 +       __le32 hidden_sectors;  // 0x1C: number of 'hidden' sectors
85708 +       u8 unused3[4];
85709 +       u8 bios_drive_num;      // 0x24: BIOS drive number =0x80
85710 +       u8 unused4;
85711 +       u8 signature_ex;        // 0x26: Extended BOOT signature =0x80
85712 +       u8 unused5;
85713 +       __le64 sectors_per_volume;// 0x28: size of volume in sectors
85714 +       __le64 mft_clst;        // 0x30: first cluster of $MFT
85715 +       __le64 mft2_clst;       // 0x38: first cluster of $MFTMirr
85716 +       s8 record_size;         // 0x40: size of MFT record in clusters(sectors)
85717 +       u8 unused6[3];
85718 +       s8 index_size;          // 0x44: size of INDX record in clusters(sectors)
85719 +       u8 unused7[3];
85720 +       __le64 serial_num;      // 0x48: Volume serial number
85721 +       __le32 check_sum;       // 0x50: Simple additive checksum of all
85722 +                               // of the u32's which precede the 'check_sum'
85724 +       u8 boot_code[0x200 - 0x50 - 2 - 4]; // 0x54:
85725 +       u8 boot_magic[2];       // 0x1FE: Boot signature =0x55 + 0xAA
85728 +static_assert(sizeof(struct NTFS_BOOT) == 0x200);
85730 +enum NTFS_SIGNATURE {
85731 +       NTFS_FILE_SIGNATURE = cpu_to_le32(0x454C4946), // 'FILE'
85732 +       NTFS_INDX_SIGNATURE = cpu_to_le32(0x58444E49), // 'INDX'
85733 +       NTFS_CHKD_SIGNATURE = cpu_to_le32(0x444B4843), // 'CHKD'
85734 +       NTFS_RSTR_SIGNATURE = cpu_to_le32(0x52545352), // 'RSTR'
85735 +       NTFS_RCRD_SIGNATURE = cpu_to_le32(0x44524352), // 'RCRD'
85736 +       NTFS_BAAD_SIGNATURE = cpu_to_le32(0x44414142), // 'BAAD'
85737 +       NTFS_HOLE_SIGNATURE = cpu_to_le32(0x454C4F48), // 'HOLE'
85738 +       NTFS_FFFF_SIGNATURE = cpu_to_le32(0xffffffff),
85741 +static_assert(sizeof(enum NTFS_SIGNATURE) == 4);
85743 +/* MFT Record header structure */
85744 +struct NTFS_RECORD_HEADER {
85745 +       /* Record magic number, equals 'FILE'/'INDX'/'RSTR'/'RCRD' */
85746 +       enum NTFS_SIGNATURE sign; // 0x00:
85747 +       __le16 fix_off;         // 0x04:
85748 +       __le16 fix_num;         // 0x06:
85749 +       __le64 lsn;             // 0x08: Log file sequence number
85752 +static_assert(sizeof(struct NTFS_RECORD_HEADER) == 0x10);
85754 +static inline int is_baad(const struct NTFS_RECORD_HEADER *hdr)
85756 +       return hdr->sign == NTFS_BAAD_SIGNATURE;
85759 +/* Possible bits in struct MFT_REC.flags */
85760 +enum RECORD_FLAG {
85761 +       RECORD_FLAG_IN_USE      = cpu_to_le16(0x0001),
85762 +       RECORD_FLAG_DIR         = cpu_to_le16(0x0002),
85763 +       RECORD_FLAG_SYSTEM      = cpu_to_le16(0x0004),
85764 +       RECORD_FLAG_UNKNOWN     = cpu_to_le16(0x0008),
85767 +/* MFT Record structure */
85768 +struct MFT_REC {
85769 +       struct NTFS_RECORD_HEADER rhdr; // 'FILE'
85771 +       __le16 seq;             // 0x10: Sequence number for this record
85772 +       __le16 hard_links;      // 0x12: The number of hard links to record
85773 +       __le16 attr_off;        // 0x14: Offset to attributes
85774 +       __le16 flags;           // 0x16: See RECORD_FLAG
85775 +       __le32 used;            // 0x18: The size of used part
85776 +       __le32 total;           // 0x1C: Total record size
85778 +       struct MFT_REF parent_ref; // 0x20: Parent MFT record
85779 +       __le16 next_attr_id;    // 0x28: The next attribute Id
85781 +       __le16 res;             // 0x2A: High part of mft record?
85782 +       __le32 mft_record;      // 0x2C: Current mft record number
85783 +       __le16 fixups[];        // 0x30:
85786 +#define MFTRECORD_FIXUP_OFFSET_1 offsetof(struct MFT_REC, res)
85787 +#define MFTRECORD_FIXUP_OFFSET_3 offsetof(struct MFT_REC, fixups)
85789 +static_assert(MFTRECORD_FIXUP_OFFSET_1 == 0x2A);
85790 +static_assert(MFTRECORD_FIXUP_OFFSET_3 == 0x30);
85792 +static inline bool is_rec_base(const struct MFT_REC *rec)
85794 +       const struct MFT_REF *r = &rec->parent_ref;
85796 +       return !r->low && !r->high && !r->seq;
85799 +static inline bool is_mft_rec5(const struct MFT_REC *rec)
85801 +       return le16_to_cpu(rec->rhdr.fix_off) >=
85802 +              offsetof(struct MFT_REC, fixups);
85805 +static inline bool is_rec_inuse(const struct MFT_REC *rec)
85807 +       return rec->flags & RECORD_FLAG_IN_USE;
85810 +static inline bool clear_rec_inuse(struct MFT_REC *rec)
85812 +       return rec->flags &= ~RECORD_FLAG_IN_USE;
85815 +/* Possible values of ATTR_RESIDENT.flags */
85816 +#define RESIDENT_FLAG_INDEXED 0x01
85818 +struct ATTR_RESIDENT {
85819 +       __le32 data_size;       // 0x10: The size of data
85820 +       __le16 data_off;        // 0x14: Offset to data
85821 +       u8 flags;               // 0x16: resident flags ( 1 - indexed )
85822 +       u8 res;                 // 0x17:
85823 +}; // sizeof() = 0x18
85825 +struct ATTR_NONRESIDENT {
85826 +       __le64 svcn;            // 0x10: Starting VCN of this segment
85827 +       __le64 evcn;            // 0x18: End VCN of this segment
85828 +       __le16 run_off;         // 0x20: Offset to packed runs
85829 +       //  Unit of Compression size for this stream, expressed
85830 +       //  as a log of the cluster size.
85831 +       //
85832 +       //      0 means file is not compressed
85833 +       //      1, 2, 3, and 4 are potentially legal values if the
85834 +       //          stream is compressed, however the implementation
85835 +       //          may only choose to use 4, or possibly 3.  Note
85836 +       //          that 4 means cluster size time 16.  If convenient
85837 +       //          the implementation may wish to accept a
85838 +       //          reasonable range of legal values here (1-5?),
85839 +       //          even if the implementation only generates
85840 +       //          a smaller set of values itself.
85841 +       u8 c_unit;              // 0x22
85842 +       u8 res1[5];             // 0x23:
85843 +       __le64 alloc_size;      // 0x28: The allocated size of attribute in bytes
85844 +                               // (multiple of cluster size)
85845 +       __le64 data_size;       // 0x30: The size of attribute  in bytes <= alloc_size
85846 +       __le64 valid_size;      // 0x38: The size of valid part in bytes <= data_size
85847 +       __le64 total_size;      // 0x40: The sum of the allocated clusters for a file
85848 +                               // (present only for the first segment (0 == vcn)
85849 +                               // of compressed attribute)
85851 +}; // sizeof()=0x40 or 0x48 (if compressed)
85853 +/* Possible values of ATTRIB.flags: */
85854 +#define ATTR_FLAG_COMPRESSED     cpu_to_le16(0x0001)
85855 +#define ATTR_FLAG_COMPRESSED_MASK cpu_to_le16(0x00FF)
85856 +#define ATTR_FLAG_ENCRYPTED      cpu_to_le16(0x4000)
85857 +#define ATTR_FLAG_SPARSED        cpu_to_le16(0x8000)
85859 +struct ATTRIB {
85860 +       enum ATTR_TYPE type;    // 0x00: The type of this attribute
85861 +       __le32 size;            // 0x04: The size of this attribute
85862 +       u8 non_res;             // 0x08: Is this attribute non-resident ?
85863 +       u8 name_len;            // 0x09: This attribute name length
85864 +       __le16 name_off;        // 0x0A: Offset to the attribute name
85865 +       __le16 flags;           // 0x0C: See ATTR_FLAG_XXX
85866 +       __le16 id;              // 0x0E: unique id (per record)
85868 +       union {
85869 +               struct ATTR_RESIDENT res;     // 0x10
85870 +               struct ATTR_NONRESIDENT nres; // 0x10
85871 +       };
85874 +/* Define attribute sizes */
85875 +#define SIZEOF_RESIDENT                        0x18
85876 +#define SIZEOF_NONRESIDENT_EX          0x48
85877 +#define SIZEOF_NONRESIDENT             0x40
85879 +#define SIZEOF_RESIDENT_LE             cpu_to_le16(0x18)
85880 +#define SIZEOF_NONRESIDENT_EX_LE       cpu_to_le16(0x48)
85881 +#define SIZEOF_NONRESIDENT_LE          cpu_to_le16(0x40)
85883 +static inline u64 attr_ondisk_size(const struct ATTRIB *attr)
85885 +       return attr->non_res ? ((attr->flags &
85886 +                                (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) ?
85887 +                                       le64_to_cpu(attr->nres.total_size) :
85888 +                                       le64_to_cpu(attr->nres.alloc_size)) :
85889 +                              QuadAlign(le32_to_cpu(attr->res.data_size));
85892 +static inline u64 attr_size(const struct ATTRIB *attr)
85894 +       return attr->non_res ? le64_to_cpu(attr->nres.data_size) :
85895 +                              le32_to_cpu(attr->res.data_size);
85898 +static inline bool is_attr_encrypted(const struct ATTRIB *attr)
85900 +       return attr->flags & ATTR_FLAG_ENCRYPTED;
85903 +static inline bool is_attr_sparsed(const struct ATTRIB *attr)
85905 +       return attr->flags & ATTR_FLAG_SPARSED;
85908 +static inline bool is_attr_compressed(const struct ATTRIB *attr)
85910 +       return attr->flags & ATTR_FLAG_COMPRESSED;
85913 +static inline bool is_attr_ext(const struct ATTRIB *attr)
85915 +       return attr->flags & (ATTR_FLAG_SPARSED | ATTR_FLAG_COMPRESSED);
85918 +static inline bool is_attr_indexed(const struct ATTRIB *attr)
85920 +       return !attr->non_res && (attr->res.flags & RESIDENT_FLAG_INDEXED);
85923 +static inline __le16 const *attr_name(const struct ATTRIB *attr)
85925 +       return Add2Ptr(attr, le16_to_cpu(attr->name_off));
85928 +static inline u64 attr_svcn(const struct ATTRIB *attr)
85930 +       return attr->non_res ? le64_to_cpu(attr->nres.svcn) : 0;
85933 +/* the size of resident attribute by its resident size */
85934 +#define BYTES_PER_RESIDENT(b) (0x18 + (b))
85936 +static_assert(sizeof(struct ATTRIB) == 0x48);
85937 +static_assert(sizeof(((struct ATTRIB *)NULL)->res) == 0x08);
85938 +static_assert(sizeof(((struct ATTRIB *)NULL)->nres) == 0x38);
85940 +static inline void *resident_data_ex(const struct ATTRIB *attr, u32 datasize)
85942 +       u32 asize, rsize;
85943 +       u16 off;
85945 +       if (attr->non_res)
85946 +               return NULL;
85948 +       asize = le32_to_cpu(attr->size);
85949 +       off = le16_to_cpu(attr->res.data_off);
85951 +       if (asize < datasize + off)
85952 +               return NULL;
85954 +       rsize = le32_to_cpu(attr->res.data_size);
85955 +       if (rsize < datasize)
85956 +               return NULL;
85958 +       return Add2Ptr(attr, off);
85961 +static inline void *resident_data(const struct ATTRIB *attr)
85963 +       return Add2Ptr(attr, le16_to_cpu(attr->res.data_off));
85966 +static inline void *attr_run(const struct ATTRIB *attr)
85968 +       return Add2Ptr(attr, le16_to_cpu(attr->nres.run_off));
85971 +/* Standard information attribute (0x10) */
85972 +struct ATTR_STD_INFO {
85973 +       __le64 cr_time;         // 0x00: File creation file
85974 +       __le64 m_time;          // 0x08: File modification time
85975 +       __le64 c_time;          // 0x10: Last time any attribute was modified
85976 +       __le64 a_time;          // 0x18: File last access time
85977 +       enum FILE_ATTRIBUTE fa; // 0x20: Standard DOS attributes & more
85978 +       __le32 max_ver_num;     // 0x24: Maximum Number of Versions
85979 +       __le32 ver_num;         // 0x28: Version Number
85980 +       __le32 class_id;        // 0x2C: Class Id from bidirectional Class Id index
85983 +static_assert(sizeof(struct ATTR_STD_INFO) == 0x30);
85985 +#define SECURITY_ID_INVALID 0x00000000
85986 +#define SECURITY_ID_FIRST 0x00000100
85988 +struct ATTR_STD_INFO5 {
85989 +       __le64 cr_time;         // 0x00: File creation file
85990 +       __le64 m_time;          // 0x08: File modification time
85991 +       __le64 c_time;          // 0x10: Last time any attribute was modified
85992 +       __le64 a_time;          // 0x18: File last access time
85993 +       enum FILE_ATTRIBUTE fa; // 0x20: Standard DOS attributes & more
85994 +       __le32 max_ver_num;     // 0x24: Maximum Number of Versions
85995 +       __le32 ver_num;         // 0x28: Version Number
85996 +       __le32 class_id;        // 0x2C: Class Id from bidirectional Class Id index
85998 +       __le32 owner_id;        // 0x30: Owner Id of the user owning the file.
85999 +       __le32 security_id;     // 0x34: The Security Id is a key in the $SII Index and $SDS
86000 +       __le64 quota_charge;    // 0x38:
86001 +       __le64 usn;             // 0x40: Last Update Sequence Number of the file. This is a direct
86002 +                               // index into the file $UsnJrnl. If zero, the USN Journal is
86003 +                               // disabled.
86006 +static_assert(sizeof(struct ATTR_STD_INFO5) == 0x48);
86008 +/* attribute list entry structure (0x20) */
86009 +struct ATTR_LIST_ENTRY {
86010 +       enum ATTR_TYPE type;    // 0x00: The type of attribute
86011 +       __le16 size;            // 0x04: The size of this record
86012 +       u8 name_len;            // 0x06: The length of attribute name
86013 +       u8 name_off;            // 0x07: The offset to attribute name
86014 +       __le64 vcn;             // 0x08: Starting VCN of this attribute
86015 +       struct MFT_REF ref;     // 0x10: MFT record number with attribute
86016 +       __le16 id;              // 0x18: struct ATTRIB ID
86017 +       __le16 name[3];         // 0x1A: Just to align. To get real name can use bNameOffset
86019 +}; // sizeof(0x20)
86021 +static_assert(sizeof(struct ATTR_LIST_ENTRY) == 0x20);
86023 +static inline u32 le_size(u8 name_len)
86025 +       return QuadAlign(offsetof(struct ATTR_LIST_ENTRY, name) +
86026 +                        name_len * sizeof(short));
86029 +/* returns 0 if 'attr' has the same type and name */
86030 +static inline int le_cmp(const struct ATTR_LIST_ENTRY *le,
86031 +                        const struct ATTRIB *attr)
86033 +       return le->type != attr->type || le->name_len != attr->name_len ||
86034 +              (!le->name_len &&
86035 +               memcmp(Add2Ptr(le, le->name_off),
86036 +                      Add2Ptr(attr, le16_to_cpu(attr->name_off)),
86037 +                      le->name_len * sizeof(short)));
86040 +static inline __le16 const *le_name(const struct ATTR_LIST_ENTRY *le)
86042 +       return Add2Ptr(le, le->name_off);
86045 +/* File name types (the field type in struct ATTR_FILE_NAME ) */
86046 +#define FILE_NAME_POSIX   0
86047 +#define FILE_NAME_UNICODE 1
86048 +#define FILE_NAME_DOS    2
86049 +#define FILE_NAME_UNICODE_AND_DOS (FILE_NAME_DOS | FILE_NAME_UNICODE)
86051 +/* Filename attribute structure (0x30) */
86052 +struct NTFS_DUP_INFO {
86053 +       __le64 cr_time;         // 0x00: File creation file
86054 +       __le64 m_time;          // 0x08: File modification time
86055 +       __le64 c_time;          // 0x10: Last time any attribute was modified
86056 +       __le64 a_time;          // 0x18: File last access time
86057 +       __le64 alloc_size;      // 0x20: Data attribute allocated size, multiple of cluster size
86058 +       __le64 data_size;       // 0x28: Data attribute size <= Dataalloc_size
86059 +       enum FILE_ATTRIBUTE fa; // 0x30: Standard DOS attributes & more
86060 +       __le16 ea_size;         // 0x34: Packed EAs
86061 +       __le16 reparse;         // 0x36: Used by Reparse
86063 +}; // 0x38
86065 +struct ATTR_FILE_NAME {
86066 +       struct MFT_REF home;    // 0x00: MFT record for directory
86067 +       struct NTFS_DUP_INFO dup;// 0x08
86068 +       u8 name_len;            // 0x40: File name length in words
86069 +       u8 type;                // 0x41: File name type
86070 +       __le16 name[];          // 0x42: File name
86073 +static_assert(sizeof(((struct ATTR_FILE_NAME *)NULL)->dup) == 0x38);
86074 +static_assert(offsetof(struct ATTR_FILE_NAME, name) == 0x42);
86075 +#define SIZEOF_ATTRIBUTE_FILENAME     0x44
86076 +#define SIZEOF_ATTRIBUTE_FILENAME_MAX (0x42 + 255 * 2)
86078 +static inline struct ATTRIB *attr_from_name(struct ATTR_FILE_NAME *fname)
86080 +       return (struct ATTRIB *)((char *)fname - SIZEOF_RESIDENT);
86083 +static inline u16 fname_full_size(const struct ATTR_FILE_NAME *fname)
86085 +       // don't return struct_size(fname, name, fname->name_len);
86086 +       return offsetof(struct ATTR_FILE_NAME, name) +
86087 +              fname->name_len * sizeof(short);
86090 +static inline u8 paired_name(u8 type)
86092 +       if (type == FILE_NAME_UNICODE)
86093 +               return FILE_NAME_DOS;
86094 +       if (type == FILE_NAME_DOS)
86095 +               return FILE_NAME_UNICODE;
86096 +       return FILE_NAME_POSIX;
86099 +/* Index entry defines ( the field flags in NtfsDirEntry ) */
86100 +#define NTFS_IE_HAS_SUBNODES   cpu_to_le16(1)
86101 +#define NTFS_IE_LAST           cpu_to_le16(2)
86103 +/* Directory entry structure */
86104 +struct NTFS_DE {
86105 +       union {
86106 +               struct MFT_REF ref; // 0x00: MFT record number with this file
86107 +               struct {
86108 +                       __le16 data_off;  // 0x00:
86109 +                       __le16 data_size; // 0x02:
86110 +                       __le32 res;       // 0x04: must be 0
86111 +               } view;
86112 +       };
86113 +       __le16 size;            // 0x08: The size of this entry
86114 +       __le16 key_size;        // 0x0A: The size of File name length in bytes + 0x42
86115 +       __le16 flags;           // 0x0C: Entry flags: NTFS_IE_XXX
86116 +       __le16 res;             // 0x0E:
86118 +       // Here any indexed attribute can be placed
86119 +       // One of them is:
86120 +       // struct ATTR_FILE_NAME AttrFileName;
86121 +       //
86123 +       // The last 8 bytes of this structure contains
86124 +       // the VBN of subnode
86125 +       // !!! Note !!!
86126 +       // This field is presented only if (flags & NTFS_IE_HAS_SUBNODES)
86127 +       // __le64 vbn;
86130 +static_assert(sizeof(struct NTFS_DE) == 0x10);
86132 +static inline void de_set_vbn_le(struct NTFS_DE *e, __le64 vcn)
86134 +       __le64 *v = Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64));
86136 +       *v = vcn;
86139 +static inline void de_set_vbn(struct NTFS_DE *e, CLST vcn)
86141 +       __le64 *v = Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64));
86143 +       *v = cpu_to_le64(vcn);
86146 +static inline __le64 de_get_vbn_le(const struct NTFS_DE *e)
86148 +       return *(__le64 *)Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64));
86151 +static inline CLST de_get_vbn(const struct NTFS_DE *e)
86153 +       __le64 *v = Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64));
86155 +       return le64_to_cpu(*v);
86158 +static inline struct NTFS_DE *de_get_next(const struct NTFS_DE *e)
86160 +       return Add2Ptr(e, le16_to_cpu(e->size));
86163 +static inline struct ATTR_FILE_NAME *de_get_fname(const struct NTFS_DE *e)
86165 +       return le16_to_cpu(e->key_size) >= SIZEOF_ATTRIBUTE_FILENAME ?
86166 +                      Add2Ptr(e, sizeof(struct NTFS_DE)) :
86167 +                      NULL;
86170 +static inline bool de_is_last(const struct NTFS_DE *e)
86172 +       return e->flags & NTFS_IE_LAST;
86175 +static inline bool de_has_vcn(const struct NTFS_DE *e)
86177 +       return e->flags & NTFS_IE_HAS_SUBNODES;
86180 +static inline bool de_has_vcn_ex(const struct NTFS_DE *e)
86182 +       return (e->flags & NTFS_IE_HAS_SUBNODES) &&
86183 +              (u64)(-1) != *((u64 *)Add2Ptr(e, le16_to_cpu(e->size) -
86184 +                                                       sizeof(__le64)));
86187 +#define MAX_BYTES_PER_NAME_ENTRY                                              \
86188 +       QuadAlign(sizeof(struct NTFS_DE) +                                     \
86189 +                 offsetof(struct ATTR_FILE_NAME, name) +                      \
86190 +                 NTFS_NAME_LEN * sizeof(short))
86192 +struct INDEX_HDR {
86193 +       __le32 de_off;  // 0x00: The offset from the start of this structure
86194 +                       // to the first NTFS_DE
86195 +       __le32 used;    // 0x04: The size of this structure plus all
86196 +                       // entries (quad-word aligned)
86197 +       __le32 total;   // 0x08: The allocated size of for this structure plus all entries
86198 +       u8 flags;       // 0x0C: 0x00 = Small directory, 0x01 = Large directory
86199 +       u8 res[3];
86201 +       //
86202 +       // de_off + used <= total
86203 +       //
86206 +static_assert(sizeof(struct INDEX_HDR) == 0x10);
86208 +static inline struct NTFS_DE *hdr_first_de(const struct INDEX_HDR *hdr)
86210 +       u32 de_off = le32_to_cpu(hdr->de_off);
86211 +       u32 used = le32_to_cpu(hdr->used);
86212 +       struct NTFS_DE *e = Add2Ptr(hdr, de_off);
86213 +       u16 esize;
86215 +       if (de_off >= used || de_off >= le32_to_cpu(hdr->total))
86216 +               return NULL;
86218 +       esize = le16_to_cpu(e->size);
86219 +       if (esize < sizeof(struct NTFS_DE) || de_off + esize > used)
86220 +               return NULL;
86222 +       return e;
86225 +static inline struct NTFS_DE *hdr_next_de(const struct INDEX_HDR *hdr,
86226 +                                         const struct NTFS_DE *e)
86228 +       size_t off = PtrOffset(hdr, e);
86229 +       u32 used = le32_to_cpu(hdr->used);
86230 +       u16 esize;
86232 +       if (off >= used)
86233 +               return NULL;
86235 +       esize = le16_to_cpu(e->size);
86237 +       if (esize < sizeof(struct NTFS_DE) ||
86238 +           off + esize + sizeof(struct NTFS_DE) > used)
86239 +               return NULL;
86241 +       return Add2Ptr(e, esize);
86244 +static inline bool hdr_has_subnode(const struct INDEX_HDR *hdr)
86246 +       return hdr->flags & 1;
86249 +struct INDEX_BUFFER {
86250 +       struct NTFS_RECORD_HEADER rhdr; // 'INDX'
86251 +       __le64 vbn; // 0x10: vcn if index >= cluster or vsn id index < cluster
86252 +       struct INDEX_HDR ihdr; // 0x18:
86255 +static_assert(sizeof(struct INDEX_BUFFER) == 0x28);
86257 +static inline bool ib_is_empty(const struct INDEX_BUFFER *ib)
86259 +       const struct NTFS_DE *first = hdr_first_de(&ib->ihdr);
86261 +       return !first || de_is_last(first);
86264 +static inline bool ib_is_leaf(const struct INDEX_BUFFER *ib)
86266 +       return !(ib->ihdr.flags & 1);
86269 +/* Index root structure ( 0x90 ) */
86270 +enum COLLATION_RULE {
86271 +       NTFS_COLLATION_TYPE_BINARY      = cpu_to_le32(0),
86272 +       // $I30
86273 +       NTFS_COLLATION_TYPE_FILENAME    = cpu_to_le32(0x01),
86274 +       // $SII of $Secure and $Q of Quota
86275 +       NTFS_COLLATION_TYPE_UINT        = cpu_to_le32(0x10),
86276 +       // $O of Quota
86277 +       NTFS_COLLATION_TYPE_SID         = cpu_to_le32(0x11),
86278 +       // $SDH of $Secure
86279 +       NTFS_COLLATION_TYPE_SECURITY_HASH = cpu_to_le32(0x12),
86280 +       // $O of ObjId and "$R" for Reparse
86281 +       NTFS_COLLATION_TYPE_UINTS       = cpu_to_le32(0x13)
86284 +static_assert(sizeof(enum COLLATION_RULE) == 4);
86287 +struct INDEX_ROOT {
86288 +       enum ATTR_TYPE type;    // 0x00: The type of attribute to index on
86289 +       enum COLLATION_RULE rule; // 0x04: The rule
86290 +       __le32 index_block_size;// 0x08: The size of index record
86291 +       u8 index_block_clst;    // 0x0C: The number of clusters or sectors per index
86292 +       u8 res[3];
86293 +       struct INDEX_HDR ihdr;  // 0x10:
86296 +static_assert(sizeof(struct INDEX_ROOT) == 0x20);
86297 +static_assert(offsetof(struct INDEX_ROOT, ihdr) == 0x10);
86299 +#define VOLUME_FLAG_DIRTY          cpu_to_le16(0x0001)
86300 +#define VOLUME_FLAG_RESIZE_LOG_FILE cpu_to_le16(0x0002)
86302 +struct VOLUME_INFO {
86303 +       __le64 res1;    // 0x00
86304 +       u8 major_ver;   // 0x08: NTFS major version number (before .)
86305 +       u8 minor_ver;   // 0x09: NTFS minor version number (after .)
86306 +       __le16 flags;   // 0x0A: Volume flags, see VOLUME_FLAG_XXX
86308 +}; // sizeof=0xC
86310 +#define SIZEOF_ATTRIBUTE_VOLUME_INFO 0xc
86312 +#define NTFS_LABEL_MAX_LENGTH          (0x100 / sizeof(short))
86313 +#define NTFS_ATTR_INDEXABLE            cpu_to_le32(0x00000002)
86314 +#define NTFS_ATTR_DUPALLOWED           cpu_to_le32(0x00000004)
86315 +#define NTFS_ATTR_MUST_BE_INDEXED      cpu_to_le32(0x00000010)
86316 +#define NTFS_ATTR_MUST_BE_NAMED                cpu_to_le32(0x00000020)
86317 +#define NTFS_ATTR_MUST_BE_RESIDENT     cpu_to_le32(0x00000040)
86318 +#define NTFS_ATTR_LOG_ALWAYS           cpu_to_le32(0x00000080)
86320 +/* $AttrDef file entry */
86321 +struct ATTR_DEF_ENTRY {
86322 +       __le16 name[0x40];      // 0x00: Attr name
86323 +       enum ATTR_TYPE type;    // 0x80: struct ATTRIB type
86324 +       __le32 res;             // 0x84:
86325 +       enum COLLATION_RULE rule; // 0x88:
86326 +       __le32 flags;           // 0x8C: NTFS_ATTR_XXX (see above)
86327 +       __le64 min_sz;          // 0x90: Minimum attribute data size
86328 +       __le64 max_sz;          // 0x98: Maximum attribute data size
86331 +static_assert(sizeof(struct ATTR_DEF_ENTRY) == 0xa0);
86333 +/* Object ID (0x40) */
86334 +struct OBJECT_ID {
86335 +       struct GUID ObjId;      // 0x00: Unique Id assigned to file
86336 +       struct GUID BirthVolumeId;// 0x10: Birth Volume Id is the Object Id of the Volume on
86337 +                               // which the Object Id was allocated. It never changes
86338 +       struct GUID BirthObjectId; // 0x20: Birth Object Id is the first Object Id that was
86339 +                               // ever assigned to this MFT Record. I.e. If the Object Id
86340 +                               // is changed for some reason, this field will reflect the
86341 +                               // original value of the Object Id.
86342 +       struct GUID DomainId;   // 0x30: Domain Id is currently unused but it is intended to be
86343 +                               // used in a network environment where the local machine is
86344 +                               // part of a Windows 2000 Domain. This may be used in a Windows
86345 +                               // 2000 Advanced Server managed domain.
86348 +static_assert(sizeof(struct OBJECT_ID) == 0x40);
86350 +/* O Directory entry structure ( rule = 0x13 ) */
86351 +struct NTFS_DE_O {
86352 +       struct NTFS_DE de;
86353 +       struct GUID ObjId;      // 0x10: Unique Id assigned to file
86354 +       struct MFT_REF ref;     // 0x20: MFT record number with this file
86355 +       struct GUID BirthVolumeId; // 0x28: Birth Volume Id is the Object Id of the Volume on
86356 +                               // which the Object Id was allocated. It never changes
86357 +       struct GUID BirthObjectId; // 0x38: Birth Object Id is the first Object Id that was
86358 +                               // ever assigned to this MFT Record. I.e. If the Object Id
86359 +                               // is changed for some reason, this field will reflect the
86360 +                               // original value of the Object Id.
86361 +                               // This field is valid if data_size == 0x48
86362 +       struct GUID BirthDomainId; // 0x48: Domain Id is currently unused but it is intended
86363 +                               // to be used in a network environment where the local
86364 +                               // machine is part of a Windows 2000 Domain. This may be
86365 +                               // used in a Windows 2000 Advanced Server managed domain.
86368 +static_assert(sizeof(struct NTFS_DE_O) == 0x58);
86370 +#define NTFS_OBJECT_ENTRY_DATA_SIZE1                                          \
86371 +       0x38 // struct NTFS_DE_O.BirthDomainId is not used
86372 +#define NTFS_OBJECT_ENTRY_DATA_SIZE2                                          \
86373 +       0x48 // struct NTFS_DE_O.BirthDomainId is used
86375 +/* Q Directory entry structure ( rule = 0x11 ) */
86376 +struct NTFS_DE_Q {
86377 +       struct NTFS_DE de;
86378 +       __le32 owner_id;        // 0x10: Unique Id assigned to file
86379 +       __le32 Version;         // 0x14: 0x02
86380 +       __le32 flags2;          // 0x18: Quota flags, see above
86381 +       __le64 BytesUsed;       // 0x1C:
86382 +       __le64 ChangeTime;      // 0x24:
86383 +       __le64 WarningLimit;    // 0x28:
86384 +       __le64 HardLimit;       // 0x34:
86385 +       __le64 ExceededTime;    // 0x3C:
86387 +       // SID is placed here
86388 +}; // sizeof() = 0x44
86390 +#define SIZEOF_NTFS_DE_Q 0x44
86392 +#define SecurityDescriptorsBlockSize 0x40000 // 256K
86393 +#define SecurityDescriptorMaxSize    0x20000 // 128K
86394 +#define Log2OfSecurityDescriptorsBlockSize 18
86396 +struct SECURITY_KEY {
86397 +       __le32 hash; //  Hash value for descriptor
86398 +       __le32 sec_id; //  Security Id (guaranteed unique)
86401 +/* Security descriptors (the content of $Secure::SDS data stream) */
86402 +struct SECURITY_HDR {
86403 +       struct SECURITY_KEY key;        // 0x00: Security Key
86404 +       __le64 off;                     // 0x08: Offset of this entry in the file
86405 +       __le32 size;                    // 0x10: Size of this entry, 8 byte aligned
86406 +       //
86407 +       // Security descriptor itself is placed here
86408 +       // Total size is 16 byte aligned
86409 +       //
86410 +} __packed;
86412 +#define SIZEOF_SECURITY_HDR 0x14
86414 +/* SII Directory entry structure */
86415 +struct NTFS_DE_SII {
86416 +       struct NTFS_DE de;
86417 +       __le32 sec_id;                  // 0x10: Key: sizeof(security_id) = wKeySize
86418 +       struct SECURITY_HDR sec_hdr;    // 0x14:
86419 +} __packed;
86421 +#define SIZEOF_SII_DIRENTRY 0x28
86423 +/* SDH Directory entry structure */
86424 +struct NTFS_DE_SDH {
86425 +       struct NTFS_DE de;
86426 +       struct SECURITY_KEY key;        // 0x10: Key
86427 +       struct SECURITY_HDR sec_hdr;    // 0x18: Data
86428 +       __le16 magic[2];                // 0x2C: 0x00490049 "I I"
86431 +#define SIZEOF_SDH_DIRENTRY 0x30
86433 +struct REPARSE_KEY {
86434 +       __le32 ReparseTag;              // 0x00: Reparse Tag
86435 +       struct MFT_REF ref;             // 0x04: MFT record number with this file
86436 +}; // sizeof() = 0x0C
86438 +static_assert(offsetof(struct REPARSE_KEY, ref) == 0x04);
86439 +#define SIZEOF_REPARSE_KEY 0x0C
86441 +/* Reparse Directory entry structure */
86442 +struct NTFS_DE_R {
86443 +       struct NTFS_DE de;
86444 +       struct REPARSE_KEY key;         // 0x10: Reparse Key
86445 +       u32 zero;                       // 0x1c
86446 +}; // sizeof() = 0x20
86448 +static_assert(sizeof(struct NTFS_DE_R) == 0x20);
86450 +/* CompressReparseBuffer.WofVersion */
86451 +#define WOF_CURRENT_VERSION            cpu_to_le32(1)
86452 +/* CompressReparseBuffer.WofProvider */
86453 +#define WOF_PROVIDER_WIM               cpu_to_le32(1)
86454 +/* CompressReparseBuffer.WofProvider */
86455 +#define WOF_PROVIDER_SYSTEM            cpu_to_le32(2)
86456 +/* CompressReparseBuffer.ProviderVer */
86457 +#define WOF_PROVIDER_CURRENT_VERSION   cpu_to_le32(1)
86459 +#define WOF_COMPRESSION_XPRESS4K       cpu_to_le32(0) // 4k
86460 +#define WOF_COMPRESSION_LZX32K         cpu_to_le32(1) // 32k
86461 +#define WOF_COMPRESSION_XPRESS8K       cpu_to_le32(2) // 8k
86462 +#define WOF_COMPRESSION_XPRESS16K      cpu_to_le32(3) // 16k
86465 + * ATTR_REPARSE (0xC0)
86466 + *
86467 + * The reparse struct GUID structure is used by all 3rd party layered drivers to
86468 + * store data in a reparse point. For non-Microsoft tags, The struct GUID field
86469 + * cannot be GUID_NULL.
86470 + * The constraints on reparse tags are defined below.
86471 + * Microsoft tags can also be used with this format of the reparse point buffer.
86472 + */
86473 +struct REPARSE_POINT {
86474 +       __le32 ReparseTag;      // 0x00:
86475 +       __le16 ReparseDataLength;// 0x04:
86476 +       __le16 Reserved;
86478 +       struct GUID Guid;       // 0x08:
86480 +       //
86481 +       // Here GenericReparseBuffer is placed
86482 +       //
86485 +static_assert(sizeof(struct REPARSE_POINT) == 0x18);
86488 +// Maximum allowed size of the reparse data.
86490 +#define MAXIMUM_REPARSE_DATA_BUFFER_SIZE       (16 * 1024)
86493 +// The value of the following constant needs to satisfy the following
86494 +// conditions:
86495 +//  (1) Be at least as large as the largest of the reserved tags.
86496 +//  (2) Be strictly smaller than all the tags in use.
86498 +#define IO_REPARSE_TAG_RESERVED_RANGE          1
86501 +// The reparse tags are a ULONG. The 32 bits are laid out as follows:
86503 +//   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
86504 +//   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
86505 +//  +-+-+-+-+-----------------------+-------------------------------+
86506 +//  |M|R|N|R|    Reserved bits     |       Reparse Tag Value       |
86507 +//  +-+-+-+-+-----------------------+-------------------------------+
86509 +// M is the Microsoft bit. When set to 1, it denotes a tag owned by Microsoft.
86510 +//   All ISVs must use a tag with a 0 in this position.
86511 +//   Note: If a Microsoft tag is used by non-Microsoft software, the
86512 +//   behavior is not defined.
86514 +// R is reserved.  Must be zero for non-Microsoft tags.
86516 +// N is name surrogate. When set to 1, the file represents another named
86517 +//   entity in the system.
86519 +// The M and N bits are OR-able.
86520 +// The following macros check for the M and N bit values:
86524 +// Macro to determine whether a reparse point tag corresponds to a tag
86525 +// owned by Microsoft.
86527 +#define IsReparseTagMicrosoft(_tag)    (((_tag)&IO_REPARSE_TAG_MICROSOFT))
86530 +// Macro to determine whether a reparse point tag is a name surrogate
86532 +#define IsReparseTagNameSurrogate(_tag)        (((_tag)&IO_REPARSE_TAG_NAME_SURROGATE))
86535 +// The following constant represents the bits that are valid to use in
86536 +// reparse tags.
86538 +#define IO_REPARSE_TAG_VALID_VALUES    0xF000FFFF
86541 +// Macro to determine whether a reparse tag is a valid tag.
86543 +#define IsReparseTagValid(_tag)                                                       \
86544 +       (!((_tag) & ~IO_REPARSE_TAG_VALID_VALUES) &&                           \
86545 +        ((_tag) > IO_REPARSE_TAG_RESERVED_RANGE))
86548 +// Microsoft tags for reparse points.
86551 +enum IO_REPARSE_TAG {
86552 +       IO_REPARSE_TAG_SYMBOLIC_LINK    = cpu_to_le32(0),
86553 +       IO_REPARSE_TAG_NAME_SURROGATE   = cpu_to_le32(0x20000000),
86554 +       IO_REPARSE_TAG_MICROSOFT        = cpu_to_le32(0x80000000),
86555 +       IO_REPARSE_TAG_MOUNT_POINT      = cpu_to_le32(0xA0000003),
86556 +       IO_REPARSE_TAG_SYMLINK          = cpu_to_le32(0xA000000C),
86557 +       IO_REPARSE_TAG_HSM              = cpu_to_le32(0xC0000004),
86558 +       IO_REPARSE_TAG_SIS              = cpu_to_le32(0x80000007),
86559 +       IO_REPARSE_TAG_DEDUP            = cpu_to_le32(0x80000013),
86560 +       IO_REPARSE_TAG_COMPRESS         = cpu_to_le32(0x80000017),
86562 +       //
86563 +       // The reparse tag 0x80000008 is reserved for Microsoft internal use
86564 +       // (may be published in the future)
86565 +       //
86567 +       //
86568 +       // Microsoft reparse tag reserved for DFS
86569 +       //
86570 +       IO_REPARSE_TAG_DFS              = cpu_to_le32(0x8000000A),
86572 +       //
86573 +       // Microsoft reparse tag reserved for the file system filter manager
86574 +       //
86575 +       IO_REPARSE_TAG_FILTER_MANAGER   = cpu_to_le32(0x8000000B),
86577 +       //
86578 +       // Non-Microsoft tags for reparse points
86579 +       //
86581 +       //
86582 +       // Tag allocated to CONGRUENT, May 2000. Used by IFSTEST
86583 +       //
86584 +       IO_REPARSE_TAG_IFSTEST_CONGRUENT = cpu_to_le32(0x00000009),
86586 +       //
86587 +       // Tag allocated to ARKIVIO
86588 +       //
86589 +       IO_REPARSE_TAG_ARKIVIO          = cpu_to_le32(0x0000000C),
86591 +       //
86592 +       //  Tag allocated to SOLUTIONSOFT
86593 +       //
86594 +       IO_REPARSE_TAG_SOLUTIONSOFT     = cpu_to_le32(0x2000000D),
86596 +       //
86597 +       //  Tag allocated to COMMVAULT
86598 +       //
86599 +       IO_REPARSE_TAG_COMMVAULT        = cpu_to_le32(0x0000000E),
86601 +       // OneDrive??
86602 +       IO_REPARSE_TAG_CLOUD            = cpu_to_le32(0x9000001A),
86603 +       IO_REPARSE_TAG_CLOUD_1          = cpu_to_le32(0x9000101A),
86604 +       IO_REPARSE_TAG_CLOUD_2          = cpu_to_le32(0x9000201A),
86605 +       IO_REPARSE_TAG_CLOUD_3          = cpu_to_le32(0x9000301A),
86606 +       IO_REPARSE_TAG_CLOUD_4          = cpu_to_le32(0x9000401A),
86607 +       IO_REPARSE_TAG_CLOUD_5          = cpu_to_le32(0x9000501A),
86608 +       IO_REPARSE_TAG_CLOUD_6          = cpu_to_le32(0x9000601A),
86609 +       IO_REPARSE_TAG_CLOUD_7          = cpu_to_le32(0x9000701A),
86610 +       IO_REPARSE_TAG_CLOUD_8          = cpu_to_le32(0x9000801A),
86611 +       IO_REPARSE_TAG_CLOUD_9          = cpu_to_le32(0x9000901A),
86612 +       IO_REPARSE_TAG_CLOUD_A          = cpu_to_le32(0x9000A01A),
86613 +       IO_REPARSE_TAG_CLOUD_B          = cpu_to_le32(0x9000B01A),
86614 +       IO_REPARSE_TAG_CLOUD_C          = cpu_to_le32(0x9000C01A),
86615 +       IO_REPARSE_TAG_CLOUD_D          = cpu_to_le32(0x9000D01A),
86616 +       IO_REPARSE_TAG_CLOUD_E          = cpu_to_le32(0x9000E01A),
86617 +       IO_REPARSE_TAG_CLOUD_F          = cpu_to_le32(0x9000F01A),
86621 +#define SYMLINK_FLAG_RELATIVE          1
86623 +/* Microsoft reparse buffer. (see DDK for details) */
86624 +struct REPARSE_DATA_BUFFER {
86625 +       __le32 ReparseTag;              // 0x00:
86626 +       __le16 ReparseDataLength;       // 0x04:
86627 +       __le16 Reserved;
86629 +       union {
86630 +               // If ReparseTag == 0xA0000003 (IO_REPARSE_TAG_MOUNT_POINT)
86631 +               struct {
86632 +                       __le16 SubstituteNameOffset; // 0x08
86633 +                       __le16 SubstituteNameLength; // 0x0A
86634 +                       __le16 PrintNameOffset;      // 0x0C
86635 +                       __le16 PrintNameLength;      // 0x0E
86636 +                       __le16 PathBuffer[];         // 0x10
86637 +               } MountPointReparseBuffer;
86639 +               // If ReparseTag == 0xA000000C (IO_REPARSE_TAG_SYMLINK)
86640 +               // https://msdn.microsoft.com/en-us/library/cc232006.aspx
86641 +               struct {
86642 +                       __le16 SubstituteNameOffset; // 0x08
86643 +                       __le16 SubstituteNameLength; // 0x0A
86644 +                       __le16 PrintNameOffset;      // 0x0C
86645 +                       __le16 PrintNameLength;      // 0x0E
86646 +                       // 0-absolute path 1- relative path, SYMLINK_FLAG_RELATIVE
86647 +                       __le32 Flags;                // 0x10
86648 +                       __le16 PathBuffer[];         // 0x14
86649 +               } SymbolicLinkReparseBuffer;
86651 +               // If ReparseTag == 0x80000017U
86652 +               struct {
86653 +                       __le32 WofVersion;  // 0x08 == 1
86654 +                       /* 1 - WIM backing provider ("WIMBoot"),
86655 +                        * 2 - System compressed file provider
86656 +                        */
86657 +                       __le32 WofProvider; // 0x0C
86658 +                       __le32 ProviderVer; // 0x10: == 1 WOF_FILE_PROVIDER_CURRENT_VERSION == 1
86659 +                       __le32 CompressionFormat; // 0x14: 0, 1, 2, 3. See WOF_COMPRESSION_XXX
86660 +               } CompressReparseBuffer;
86662 +               struct {
86663 +                       u8 DataBuffer[1];   // 0x08
86664 +               } GenericReparseBuffer;
86665 +       };
86668 +/* ATTR_EA_INFO (0xD0) */
86670 +#define FILE_NEED_EA 0x80 // See ntifs.h
86671 +/* FILE_NEED_EA, indicates that the file to which the EA belongs cannot be
86672 + * interpreted without understanding the associated extended attributes.
86673 + */
86674 +struct EA_INFO {
86675 +       __le16 size_pack;       // 0x00: Size of buffer to hold in packed form
86676 +       __le16 count;           // 0x02: Count of EA's with FILE_NEED_EA bit set
86677 +       __le32 size;            // 0x04: Size of buffer to hold in unpacked form
86680 +static_assert(sizeof(struct EA_INFO) == 8);
86682 +/* ATTR_EA (0xE0) */
86683 +struct EA_FULL {
86684 +       __le32 size;            // 0x00: (not in packed)
86685 +       u8 flags;               // 0x04
86686 +       u8 name_len;            // 0x05
86687 +       __le16 elength;         // 0x06
86688 +       u8 name[];              // 0x08
86691 +static_assert(offsetof(struct EA_FULL, name) == 8);
86693 +#define ACL_REVISION   2
86694 +#define ACL_REVISION_DS 4
86696 +#define SE_SELF_RELATIVE cpu_to_le16(0x8000)
86698 +struct SECURITY_DESCRIPTOR_RELATIVE {
86699 +       u8 Revision;
86700 +       u8 Sbz1;
86701 +       __le16 Control;
86702 +       __le32 Owner;
86703 +       __le32 Group;
86704 +       __le32 Sacl;
86705 +       __le32 Dacl;
86707 +static_assert(sizeof(struct SECURITY_DESCRIPTOR_RELATIVE) == 0x14);
86709 +struct ACE_HEADER {
86710 +       u8 AceType;
86711 +       u8 AceFlags;
86712 +       __le16 AceSize;
86714 +static_assert(sizeof(struct ACE_HEADER) == 4);
86716 +struct ACL {
86717 +       u8 AclRevision;
86718 +       u8 Sbz1;
86719 +       __le16 AclSize;
86720 +       __le16 AceCount;
86721 +       __le16 Sbz2;
86723 +static_assert(sizeof(struct ACL) == 8);
86725 +struct SID {
86726 +       u8 Revision;
86727 +       u8 SubAuthorityCount;
86728 +       u8 IdentifierAuthority[6];
86729 +       __le32 SubAuthority[];
86731 +static_assert(offsetof(struct SID, SubAuthority) == 8);
86733 +// clang-format on
86734 diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
86735 new file mode 100644
86736 index 000000000000..5e1dd628d3cc
86737 --- /dev/null
86738 +++ b/fs/ntfs3/ntfs_fs.h
86739 @@ -0,0 +1,1085 @@
86740 +/* SPDX-License-Identifier: GPL-2.0 */
86742 + *
86743 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
86744 + *
86745 + */
86747 +// clang-format off
86748 +#define MINUS_ONE_T                    ((size_t)(-1))
86749 +/* Biggest MFT / smallest cluster */
86750 +#define MAXIMUM_BYTES_PER_MFT          4096
86751 +#define NTFS_BLOCKS_PER_MFT_RECORD     (MAXIMUM_BYTES_PER_MFT / 512)
86753 +#define MAXIMUM_BYTES_PER_INDEX                4096
86754 +#define NTFS_BLOCKS_PER_INODE          (MAXIMUM_BYTES_PER_INDEX / 512)
86756 +/* ntfs specific error code when fixup failed*/
86757 +#define E_NTFS_FIXUP                   555
86758 +/* ntfs specific error code about resident->nonresident*/
86759 +#define E_NTFS_NONRESIDENT             556
86761 +/* sbi->flags */
86762 +#define NTFS_FLAGS_NODISCARD           0x00000001
86763 +/* Set when LogFile is replaying */
86764 +#define NTFS_FLAGS_LOG_REPLAYING       0x00000008
86765 +/* Set when we changed first MFT's which copy must be updated in $MftMirr */
86766 +#define NTFS_FLAGS_MFTMIRR             0x00001000
86767 +#define NTFS_FLAGS_NEED_REPLAY         0x04000000
86770 +/* ni->ni_flags */
86772 + * Data attribute is external compressed (lzx/xpress)
86773 + * 1 - WOF_COMPRESSION_XPRESS4K
86774 + * 2 - WOF_COMPRESSION_XPRESS8K
86775 + * 3 - WOF_COMPRESSION_XPRESS16K
86776 + * 4 - WOF_COMPRESSION_LZX32K
86777 + */
86778 +#define NI_FLAG_COMPRESSED_MASK                0x0000000f
86779 +/* Data attribute is deduplicated */
86780 +#define NI_FLAG_DEDUPLICATED           0x00000010
86781 +#define NI_FLAG_EA                     0x00000020
86782 +#define NI_FLAG_DIR                    0x00000040
86783 +#define NI_FLAG_RESIDENT               0x00000080
86784 +#define NI_FLAG_UPDATE_PARENT          0x00000100
86785 +// clang-format on
86787 +struct ntfs_mount_options {
86788 +       struct nls_table *nls;
86790 +       kuid_t fs_uid;
86791 +       kgid_t fs_gid;
86792 +       u16 fs_fmask_inv;
86793 +       u16 fs_dmask_inv;
86795 +       unsigned uid : 1, /* uid was set */
86796 +               gid : 1, /* gid was set */
86797 +               fmask : 1, /* fmask was set */
86798 +               dmask : 1, /*dmask was set*/
86799 +               sys_immutable : 1, /* immutable system files */
86800 +               discard : 1, /* issue discard requests on deletions */
86801 +               sparse : 1, /*create sparse files*/
86802 +               showmeta : 1, /*show meta files*/
86803 +               nohidden : 1, /*do not show hidden files*/
86804 +               force : 1, /*rw mount dirty volume*/
86805 +               no_acs_rules : 1, /*exclude acs rules*/
86806 +               prealloc : 1 /*preallocate space when file is growing*/
86807 +               ;
86810 +/* special value to unpack and deallocate*/
86811 +#define RUN_DEALLOCATE ((struct runs_tree *)(size_t)1)
86813 +/* TODO: use rb tree instead of array */
86814 +struct runs_tree {
86815 +       struct ntfs_run *runs;
86816 +       size_t count; // Currently used size a ntfs_run storage.
86817 +       size_t allocated; // Currently allocated ntfs_run storage size.
86820 +struct ntfs_buffers {
86821 +       /* Biggest MFT / smallest cluster = 4096 / 512 = 8 */
86822 +       /* Biggest index / smallest cluster = 4096 / 512 = 8 */
86823 +       struct buffer_head *bh[PAGE_SIZE >> SECTOR_SHIFT];
86824 +       u32 bytes;
86825 +       u32 nbufs;
86826 +       u32 off;
86829 +enum ALLOCATE_OPT {
86830 +       ALLOCATE_DEF = 0, // Allocate all clusters
86831 +       ALLOCATE_MFT = 1, // Allocate for MFT
86834 +enum bitmap_mutex_classes {
86835 +       BITMAP_MUTEX_CLUSTERS = 0,
86836 +       BITMAP_MUTEX_MFT = 1,
86839 +struct wnd_bitmap {
86840 +       struct super_block *sb;
86841 +       struct rw_semaphore rw_lock;
86843 +       struct runs_tree run;
86844 +       size_t nbits;
86846 +       size_t total_zeroes; // total number of free bits
86847 +       u16 *free_bits; // free bits in each window
86848 +       size_t nwnd;
86849 +       u32 bits_last; // bits in last window
86851 +       struct rb_root start_tree; // extents, sorted by 'start'
86852 +       struct rb_root count_tree; // extents, sorted by 'count + start'
86853 +       size_t count; // extents count
86855 +       /*
86856 +        * -1 Tree is activated but not updated (too many fragments)
86857 +        * 0 - Tree is not activated
86858 +        * 1 - Tree is activated and updated
86859 +        */
86860 +       int uptodated;
86861 +       size_t extent_min; // Minimal extent used while building
86862 +       size_t extent_max; // Upper estimate of biggest free block
86864 +       /* Zone [bit, end) */
86865 +       size_t zone_bit;
86866 +       size_t zone_end;
86868 +       bool set_tail; // not necessary in driver
86869 +       bool inited;
86872 +typedef int (*NTFS_CMP_FUNC)(const void *key1, size_t len1, const void *key2,
86873 +                            size_t len2, const void *param);
86875 +enum index_mutex_classed {
86876 +       INDEX_MUTEX_I30 = 0,
86877 +       INDEX_MUTEX_SII = 1,
86878 +       INDEX_MUTEX_SDH = 2,
86879 +       INDEX_MUTEX_SO = 3,
86880 +       INDEX_MUTEX_SQ = 4,
86881 +       INDEX_MUTEX_SR = 5,
86882 +       INDEX_MUTEX_TOTAL
86885 +/* ntfs_index - allocation unit inside directory */
86886 +struct ntfs_index {
86887 +       struct runs_tree bitmap_run;
86888 +       struct runs_tree alloc_run;
86889 +       /* read/write access to 'bitmap_run'/'alloc_run' while ntfs_readdir */
86890 +       struct rw_semaphore run_lock;
86892 +       /*TODO: remove 'cmp'*/
86893 +       NTFS_CMP_FUNC cmp;
86895 +       u8 index_bits; // log2(root->index_block_size)
86896 +       u8 idx2vbn_bits; // log2(root->index_block_clst)
86897 +       u8 vbn2vbo_bits; // index_block_size < cluster? 9 : cluster_bits
86898 +       u8 type; // index_mutex_classed
86901 +/* Minimum mft zone */
86902 +#define NTFS_MIN_MFT_ZONE 100
86904 +/* ntfs file system in-core superblock data */
86905 +struct ntfs_sb_info {
86906 +       struct super_block *sb;
86908 +       u32 discard_granularity;
86909 +       u64 discard_granularity_mask_inv; // ~(discard_granularity_mask_inv-1)
86911 +       u32 cluster_size; // bytes per cluster
86912 +       u32 cluster_mask; // == cluster_size - 1
86913 +       u64 cluster_mask_inv; // ~(cluster_size - 1)
86914 +       u32 block_mask; // sb->s_blocksize - 1
86915 +       u32 blocks_per_cluster; // cluster_size / sb->s_blocksize
86917 +       u32 record_size;
86918 +       u32 sector_size;
86919 +       u32 index_size;
86921 +       u8 sector_bits;
86922 +       u8 cluster_bits;
86923 +       u8 record_bits;
86925 +       u64 maxbytes; // Maximum size for normal files
86926 +       u64 maxbytes_sparse; // Maximum size for sparse file
86928 +       u32 flags; // See NTFS_FLAGS_XXX
86930 +       CLST bad_clusters; // The count of marked bad clusters
86932 +       u16 max_bytes_per_attr; // maximum attribute size in record
86933 +       u16 attr_size_tr; // attribute size threshold (320 bytes)
86935 +       /* Records in $Extend */
86936 +       CLST objid_no;
86937 +       CLST quota_no;
86938 +       CLST reparse_no;
86939 +       CLST usn_jrnl_no;
86941 +       struct ATTR_DEF_ENTRY *def_table; // attribute definition table
86942 +       u32 def_entries;
86943 +       u32 ea_max_size;
86945 +       struct MFT_REC *new_rec;
86947 +       u16 *upcase;
86949 +       struct {
86950 +               u64 lbo, lbo2;
86951 +               struct ntfs_inode *ni;
86952 +               struct wnd_bitmap bitmap; // $MFT::Bitmap
86953 +               /*
86954 +                * MFT records [11-24) used to expand MFT itself
86955 +                * They always marked as used in $MFT::Bitmap
86956 +                * 'reserved_bitmap' contains real bitmap of these records
86957 +                */
86958 +               ulong reserved_bitmap; // bitmap of used records [11 - 24)
86959 +               size_t next_free; // The next record to allocate from
86960 +               size_t used; // mft valid size in records
86961 +               u32 recs_mirr; // Number of records in MFTMirr
86962 +               u8 next_reserved;
86963 +               u8 reserved_bitmap_inited;
86964 +       } mft;
86966 +       struct {
86967 +               struct wnd_bitmap bitmap; // $Bitmap::Data
86968 +               CLST next_free_lcn;
86969 +       } used;
86971 +       struct {
86972 +               u64 size; // in bytes
86973 +               u64 blocks; // in blocks
86974 +               u64 ser_num;
86975 +               struct ntfs_inode *ni;
86976 +               __le16 flags; // cached current VOLUME_INFO::flags, VOLUME_FLAG_DIRTY
86977 +               u8 major_ver;
86978 +               u8 minor_ver;
86979 +               char label[65];
86980 +               bool real_dirty; /* real fs state*/
86981 +       } volume;
86983 +       struct {
86984 +               struct ntfs_index index_sii;
86985 +               struct ntfs_index index_sdh;
86986 +               struct ntfs_inode *ni;
86987 +               u32 next_id;
86988 +               u64 next_off;
86990 +               __le32 def_security_id;
86991 +       } security;
86993 +       struct {
86994 +               struct ntfs_index index_r;
86995 +               struct ntfs_inode *ni;
86996 +               u64 max_size; // 16K
86997 +       } reparse;
86999 +       struct {
87000 +               struct ntfs_index index_o;
87001 +               struct ntfs_inode *ni;
87002 +       } objid;
87004 +       struct {
87005 +               struct mutex mtx_lznt;
87006 +               struct lznt *lznt;
87007 +#ifdef CONFIG_NTFS3_LZX_XPRESS
87008 +               struct mutex mtx_xpress;
87009 +               struct xpress_decompressor *xpress;
87010 +               struct mutex mtx_lzx;
87011 +               struct lzx_decompressor *lzx;
87012 +#endif
87013 +       } compress;
87015 +       struct ntfs_mount_options options;
87016 +       struct ratelimit_state msg_ratelimit;
87020 + * one MFT record(usually 1024 bytes), consists of attributes
87021 + */
87022 +struct mft_inode {
87023 +       struct rb_node node;
87024 +       struct ntfs_sb_info *sbi;
87026 +       struct MFT_REC *mrec;
87027 +       struct ntfs_buffers nb;
87029 +       CLST rno;
87030 +       bool dirty;
87033 +/* nested class for ntfs_inode::ni_lock */
87034 +enum ntfs_inode_mutex_lock_class {
87035 +       NTFS_INODE_MUTEX_DIRTY,
87036 +       NTFS_INODE_MUTEX_SECURITY,
87037 +       NTFS_INODE_MUTEX_OBJID,
87038 +       NTFS_INODE_MUTEX_REPARSE,
87039 +       NTFS_INODE_MUTEX_NORMAL,
87040 +       NTFS_INODE_MUTEX_PARENT,
87044 + * ntfs inode - extends linux inode. consists of one or more mft inodes
87045 + */
87046 +struct ntfs_inode {
87047 +       struct mft_inode mi; // base record
87049 +       /*
87050 +        * Valid size: [0 - i_valid) - these range in file contains valid data
87051 +        * Range [i_valid - inode->i_size) - contains 0
87052 +        * Usually i_valid <= inode->i_size
87053 +        */
87054 +       u64 i_valid;
87055 +       struct timespec64 i_crtime;
87057 +       struct mutex ni_lock;
87059 +       /* file attributes from std */
87060 +       enum FILE_ATTRIBUTE std_fa;
87061 +       __le32 std_security_id;
87063 +       /*
87064 +        * tree of mft_inode
87065 +        * not empty when primary MFT record (usually 1024 bytes) can't save all attributes
87066 +        * e.g. file becomes too fragmented or contains a lot of names
87067 +        */
87068 +       struct rb_root mi_tree;
87070 +       /*
87071 +        * This member is used in ntfs_readdir to ensure that all subrecords are loaded
87072 +        */
87073 +       u8 mi_loaded;
87075 +       union {
87076 +               struct ntfs_index dir;
87077 +               struct {
87078 +                       struct rw_semaphore run_lock;
87079 +                       struct runs_tree run;
87080 +#ifdef CONFIG_NTFS3_LZX_XPRESS
87081 +                       struct page *offs_page;
87082 +#endif
87083 +               } file;
87084 +       };
87086 +       struct {
87087 +               struct runs_tree run;
87088 +               struct ATTR_LIST_ENTRY *le; // 1K aligned memory
87089 +               size_t size;
87090 +               bool dirty;
87091 +       } attr_list;
87093 +       size_t ni_flags; // NI_FLAG_XXX
87095 +       struct inode vfs_inode;
87098 +struct indx_node {
87099 +       struct ntfs_buffers nb;
87100 +       struct INDEX_BUFFER *index;
87103 +struct ntfs_fnd {
87104 +       int level;
87105 +       struct indx_node *nodes[20];
87106 +       struct NTFS_DE *de[20];
87107 +       struct NTFS_DE *root_de;
87110 +enum REPARSE_SIGN {
87111 +       REPARSE_NONE = 0,
87112 +       REPARSE_COMPRESSED = 1,
87113 +       REPARSE_DEDUPLICATED = 2,
87114 +       REPARSE_LINK = 3
87117 +/* functions from attrib.c*/
87118 +int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
87119 +                  struct runs_tree *run, const CLST *vcn);
87120 +int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
87121 +                          CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
87122 +                          enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
87123 +                          CLST *new_lcn);
87124 +int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
87125 +                         struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
87126 +                         u64 new_size, struct runs_tree *run,
87127 +                         struct ATTRIB **ins_attr, struct page *page);
87128 +int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
87129 +                 const __le16 *name, u8 name_len, struct runs_tree *run,
87130 +                 u64 new_size, const u64 *new_valid, bool keep_prealloc,
87131 +                 struct ATTRIB **ret);
87132 +int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
87133 +                       CLST *len, bool *new);
87134 +int attr_data_read_resident(struct ntfs_inode *ni, struct page *page);
87135 +int attr_data_write_resident(struct ntfs_inode *ni, struct page *page);
87136 +int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
87137 +                      const __le16 *name, u8 name_len, struct runs_tree *run,
87138 +                      CLST vcn);
87139 +int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
87140 +                        const __le16 *name, u8 name_len, struct runs_tree *run,
87141 +                        u64 from, u64 to);
87142 +int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
87143 +                       struct runs_tree *run, u64 frame, u64 frames,
87144 +                       u8 frame_bits, u32 *ondisk_size, u64 *vbo_data);
87145 +int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
87146 +                            CLST frame, CLST *clst_data);
87147 +int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
87148 +                       u64 new_valid);
87149 +int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
87150 +int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes);
87152 +/* functions from attrlist.c*/
87153 +void al_destroy(struct ntfs_inode *ni);
87154 +bool al_verify(struct ntfs_inode *ni);
87155 +int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr);
87156 +struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
87157 +                                    struct ATTR_LIST_ENTRY *le);
87158 +struct ATTR_LIST_ENTRY *al_find_le(struct ntfs_inode *ni,
87159 +                                  struct ATTR_LIST_ENTRY *le,
87160 +                                  const struct ATTRIB *attr);
87161 +struct ATTR_LIST_ENTRY *al_find_ex(struct ntfs_inode *ni,
87162 +                                  struct ATTR_LIST_ENTRY *le,
87163 +                                  enum ATTR_TYPE type, const __le16 *name,
87164 +                                  u8 name_len, const CLST *vcn);
87165 +int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
87166 +             u8 name_len, CLST svcn, __le16 id, const struct MFT_REF *ref,
87167 +             struct ATTR_LIST_ENTRY **new_le);
87168 +bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le);
87169 +bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
87170 +                 const __le16 *name, size_t name_len,
87171 +                 const struct MFT_REF *ref);
87172 +int al_update(struct ntfs_inode *ni);
87173 +static inline size_t al_aligned(size_t size)
87175 +       return (size + 1023) & ~(size_t)1023;
87178 +/* globals from bitfunc.c */
87179 +bool are_bits_clear(const ulong *map, size_t bit, size_t nbits);
87180 +bool are_bits_set(const ulong *map, size_t bit, size_t nbits);
87181 +size_t get_set_bits_ex(const ulong *map, size_t bit, size_t nbits);
87183 +/* globals from dir.c */
87184 +int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
87185 +                     u8 *buf, int buf_len);
87186 +int ntfs_nls_to_utf16(struct ntfs_sb_info *sbi, const u8 *name, u32 name_len,
87187 +                     struct cpu_str *uni, u32 max_ulen,
87188 +                     enum utf16_endian endian);
87189 +struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
87190 +                          struct ntfs_fnd *fnd);
87191 +bool dir_is_empty(struct inode *dir);
87192 +extern const struct file_operations ntfs_dir_operations;
87194 +/* globals from file.c*/
87195 +int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
87196 +                struct kstat *stat, u32 request_mask, u32 flags);
87197 +void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
87198 +                        CLST len);
87199 +int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
87200 +                 struct iattr *attr);
87201 +int ntfs_file_open(struct inode *inode, struct file *file);
87202 +int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
87203 +               __u64 start, __u64 len);
87204 +extern const struct inode_operations ntfs_special_inode_operations;
87205 +extern const struct inode_operations ntfs_file_inode_operations;
87206 +extern const struct file_operations ntfs_file_operations;
87208 +/* globals from frecord.c */
87209 +void ni_remove_mi(struct ntfs_inode *ni, struct mft_inode *mi);
87210 +struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni);
87211 +struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni);
87212 +void ni_clear(struct ntfs_inode *ni);
87213 +int ni_load_mi_ex(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi);
87214 +int ni_load_mi(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
87215 +              struct mft_inode **mi);
87216 +struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
87217 +                           struct ATTR_LIST_ENTRY **entry_o,
87218 +                           enum ATTR_TYPE type, const __le16 *name,
87219 +                           u8 name_len, const CLST *vcn,
87220 +                           struct mft_inode **mi);
87221 +struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
87222 +                              struct ATTR_LIST_ENTRY **le,
87223 +                              struct mft_inode **mi);
87224 +struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
87225 +                           const __le16 *name, u8 name_len, CLST vcn,
87226 +                           struct mft_inode **pmi);
87227 +int ni_load_all_mi(struct ntfs_inode *ni);
87228 +bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi);
87229 +int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
87230 +                  const __le16 *name, size_t name_len, bool base_only,
87231 +                  const __le16 *id);
87232 +int ni_create_attr_list(struct ntfs_inode *ni);
87233 +int ni_expand_list(struct ntfs_inode *ni);
87234 +int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
87235 +                         const __le16 *name, u8 name_len,
87236 +                         const struct runs_tree *run, CLST svcn, CLST len,
87237 +                         __le16 flags, struct ATTRIB **new_attr,
87238 +                         struct mft_inode **mi);
87239 +int ni_insert_resident(struct ntfs_inode *ni, u32 data_size,
87240 +                      enum ATTR_TYPE type, const __le16 *name, u8 name_len,
87241 +                      struct ATTRIB **new_attr, struct mft_inode **mi);
87242 +int ni_remove_attr_le(struct ntfs_inode *ni, struct ATTRIB *attr,
87243 +                     struct ATTR_LIST_ENTRY *le);
87244 +int ni_delete_all(struct ntfs_inode *ni);
87245 +struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
87246 +                                    const struct cpu_str *uni,
87247 +                                    const struct MFT_REF *home,
87248 +                                    struct ATTR_LIST_ENTRY **entry);
87249 +struct ATTR_FILE_NAME *ni_fname_type(struct ntfs_inode *ni, u8 name_type,
87250 +                                    struct ATTR_LIST_ENTRY **entry);
87251 +int ni_new_attr_flags(struct ntfs_inode *ni, enum FILE_ATTRIBUTE new_fa);
87252 +enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
87253 +                                  void *buffer);
87254 +int ni_write_inode(struct inode *inode, int sync, const char *hint);
87255 +#define _ni_write_inode(i, w) ni_write_inode(i, w, __func__)
87256 +int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
87257 +             __u64 vbo, __u64 len);
87258 +int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page);
87259 +int ni_decompress_file(struct ntfs_inode *ni);
87260 +int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
87261 +                 u32 pages_per_frame);
87262 +int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
87263 +                  u32 pages_per_frame);
87265 +/* globals from fslog.c */
87266 +int log_replay(struct ntfs_inode *ni, bool *initialized);
87268 +/* globals from fsntfs.c */
87269 +bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes);
87270 +int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
87271 +                      bool simple);
87272 +int ntfs_extend_init(struct ntfs_sb_info *sbi);
87273 +int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi);
87274 +const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
87275 +                                           enum ATTR_TYPE Type);
87276 +int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
87277 +                            CLST *new_lcn, CLST *new_len,
87278 +                            enum ALLOCATE_OPT opt);
87279 +int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
87280 +                      struct ntfs_inode *ni, struct mft_inode **mi);
87281 +void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno);
87282 +int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to);
87283 +int ntfs_refresh_zone(struct ntfs_sb_info *sbi);
87284 +int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait);
87285 +enum NTFS_DIRTY_FLAGS {
87286 +       NTFS_DIRTY_CLEAR = 0,
87287 +       NTFS_DIRTY_DIRTY = 1,
87288 +       NTFS_DIRTY_ERROR = 2,
87290 +int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty);
87291 +int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer);
87292 +int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
87293 +                 const void *buffer, int wait);
87294 +int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
87295 +                     u64 vbo, const void *buf, size_t bytes);
87296 +struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
87297 +                                  const struct runs_tree *run, u64 vbo);
87298 +int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
87299 +                    u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb);
87300 +int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
87301 +                struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
87302 +                struct ntfs_buffers *nb);
87303 +int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
87304 +               u32 bytes, struct ntfs_buffers *nb);
87305 +int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
87306 +                 struct ntfs_buffers *nb, int sync);
87307 +int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
87308 +                  struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
87309 +                  u32 op);
87310 +int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run);
87311 +int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
87312 +                   u64 vbo, u64 *lbo, u64 *bytes);
87313 +struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST nRec,
87314 +                                 bool dir);
87315 +extern const u8 s_default_security[0x50];
87316 +bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len);
87317 +int ntfs_security_init(struct ntfs_sb_info *sbi);
87318 +int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
87319 +                           struct SECURITY_DESCRIPTOR_RELATIVE **sd,
87320 +                           size_t *size);
87321 +int ntfs_insert_security(struct ntfs_sb_info *sbi,
87322 +                        const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
87323 +                        u32 size, __le32 *security_id, bool *inserted);
87324 +int ntfs_reparse_init(struct ntfs_sb_info *sbi);
87325 +int ntfs_objid_init(struct ntfs_sb_info *sbi);
87326 +int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid);
87327 +int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
87328 +                       const struct MFT_REF *ref);
87329 +int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
87330 +                       const struct MFT_REF *ref);
87331 +void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim);
87332 +int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim);
87334 +/* globals from index.c */
87335 +int indx_used_bit(struct ntfs_index *indx, struct ntfs_inode *ni, size_t *bit);
87336 +void fnd_clear(struct ntfs_fnd *fnd);
87337 +static inline struct ntfs_fnd *fnd_get(void)
87339 +       return ntfs_zalloc(sizeof(struct ntfs_fnd));
87341 +static inline void fnd_put(struct ntfs_fnd *fnd)
87343 +       if (fnd) {
87344 +               fnd_clear(fnd);
87345 +               ntfs_free(fnd);
87346 +       }
87348 +void indx_clear(struct ntfs_index *idx);
87349 +int indx_init(struct ntfs_index *indx, struct ntfs_sb_info *sbi,
87350 +             const struct ATTRIB *attr, enum index_mutex_classed type);
87351 +struct INDEX_ROOT *indx_get_root(struct ntfs_index *indx, struct ntfs_inode *ni,
87352 +                                struct ATTRIB **attr, struct mft_inode **mi);
87353 +int indx_read(struct ntfs_index *idx, struct ntfs_inode *ni, CLST vbn,
87354 +             struct indx_node **node);
87355 +int indx_find(struct ntfs_index *indx, struct ntfs_inode *dir,
87356 +             const struct INDEX_ROOT *root, const void *Key, size_t KeyLen,
87357 +             const void *param, int *diff, struct NTFS_DE **entry,
87358 +             struct ntfs_fnd *fnd);
87359 +int indx_find_sort(struct ntfs_index *indx, struct ntfs_inode *ni,
87360 +                  const struct INDEX_ROOT *root, struct NTFS_DE **entry,
87361 +                  struct ntfs_fnd *fnd);
87362 +int indx_find_raw(struct ntfs_index *indx, struct ntfs_inode *ni,
87363 +                 const struct INDEX_ROOT *root, struct NTFS_DE **entry,
87364 +                 size_t *off, struct ntfs_fnd *fnd);
87365 +int indx_insert_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
87366 +                     const struct NTFS_DE *new_de, const void *param,
87367 +                     struct ntfs_fnd *fnd);
87368 +int indx_delete_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
87369 +                     const void *key, u32 key_len, const void *param);
87370 +int indx_update_dup(struct ntfs_inode *ni, struct ntfs_sb_info *sbi,
87371 +                   const struct ATTR_FILE_NAME *fname,
87372 +                   const struct NTFS_DUP_INFO *dup, int sync);
87374 +/* globals from inode.c */
87375 +struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
87376 +                        const struct cpu_str *name);
87377 +int ntfs_set_size(struct inode *inode, u64 new_size);
87378 +int reset_log_file(struct inode *inode);
87379 +int ntfs_get_block(struct inode *inode, sector_t vbn,
87380 +                  struct buffer_head *bh_result, int create);
87381 +int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc);
87382 +int ntfs_sync_inode(struct inode *inode);
87383 +int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
87384 +                     struct inode *i2);
87385 +int inode_write_data(struct inode *inode, const void *data, size_t bytes);
87386 +struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
87387 +                               struct inode *dir, struct dentry *dentry,
87388 +                               const struct cpu_str *uni, umode_t mode,
87389 +                               dev_t dev, const char *symname, u32 size,
87390 +                               int excl, struct ntfs_fnd *fnd);
87391 +int ntfs_link_inode(struct inode *inode, struct dentry *dentry);
87392 +int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry);
87393 +void ntfs_evict_inode(struct inode *inode);
87394 +extern const struct inode_operations ntfs_link_inode_operations;
87395 +extern const struct address_space_operations ntfs_aops;
87396 +extern const struct address_space_operations ntfs_aops_cmpr;
87398 +/* globals from name_i.c*/
87399 +int fill_name_de(struct ntfs_sb_info *sbi, void *buf, const struct qstr *name,
87400 +                const struct cpu_str *uni);
87401 +struct dentry *ntfs3_get_parent(struct dentry *child);
87403 +extern const struct inode_operations ntfs_dir_inode_operations;
87405 +/* globals from record.c */
87406 +int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi);
87407 +void mi_put(struct mft_inode *mi);
87408 +int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno);
87409 +int mi_read(struct mft_inode *mi, bool is_mft);
87410 +struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr);
87411 +// TODO: id?
87412 +struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
87413 +                           enum ATTR_TYPE type, const __le16 *name,
87414 +                           size_t name_len, const __le16 *id);
87415 +static inline struct ATTRIB *rec_find_attr_le(struct mft_inode *rec,
87416 +                                             struct ATTR_LIST_ENTRY *le)
87418 +       return mi_find_attr(rec, NULL, le->type, le_name(le), le->name_len,
87419 +                           &le->id);
87421 +int mi_write(struct mft_inode *mi, int wait);
87422 +int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
87423 +                 __le16 flags, bool is_mft);
87424 +void mi_mark_free(struct mft_inode *mi);
87425 +struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
87426 +                             const __le16 *name, u8 name_len, u32 asize,
87427 +                             u16 name_off);
87429 +bool mi_remove_attr(struct mft_inode *mi, struct ATTRIB *attr);
87430 +bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes);
87431 +int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
87432 +                struct runs_tree *run, CLST len);
87433 +static inline bool mi_is_ref(const struct mft_inode *mi,
87434 +                            const struct MFT_REF *ref)
87436 +       if (le32_to_cpu(ref->low) != mi->rno)
87437 +               return false;
87438 +       if (ref->seq != mi->mrec->seq)
87439 +               return false;
87441 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
87442 +       return le16_to_cpu(ref->high) == (mi->rno >> 32);
87443 +#else
87444 +       return !ref->high;
87445 +#endif
87448 +static inline void mi_get_ref(const struct mft_inode *mi, struct MFT_REF *ref)
87450 +       ref->low = cpu_to_le32(mi->rno);
87451 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
87452 +       ref->high = cpu_to_le16(mi->rno >> 32);
87453 +#else
87454 +       ref->high = 0;
87455 +#endif
87456 +       ref->seq = mi->mrec->seq;
87459 +/* globals from run.c */
87460 +bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
87461 +                     CLST *len, size_t *index);
87462 +void run_truncate(struct runs_tree *run, CLST vcn);
87463 +void run_truncate_head(struct runs_tree *run, CLST vcn);
87464 +void run_truncate_around(struct runs_tree *run, CLST vcn);
87465 +bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *Index);
87466 +bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
87467 +                  bool is_mft);
87468 +bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len);
87469 +bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
87470 +                  CLST *lcn, CLST *len);
87471 +bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn);
87473 +int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
87474 +            u32 run_buf_size, CLST *packed_vcns);
87475 +int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
87476 +              CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
87477 +              u32 run_buf_size);
87479 +#ifdef NTFS3_CHECK_FREE_CLST
87480 +int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
87481 +                 CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
87482 +                 u32 run_buf_size);
87483 +#else
87484 +#define run_unpack_ex run_unpack
87485 +#endif
87486 +int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn);
87488 +/* globals from super.c */
87489 +void *ntfs_set_shared(void *ptr, u32 bytes);
87490 +void *ntfs_put_shared(void *ptr);
87491 +void ntfs_unmap_meta(struct super_block *sb, CLST lcn, CLST len);
87492 +int ntfs_discard(struct ntfs_sb_info *sbi, CLST Lcn, CLST Len);
87494 +/* globals from bitmap.c*/
87495 +int __init ntfs3_init_bitmap(void);
87496 +void ntfs3_exit_bitmap(void);
87497 +void wnd_close(struct wnd_bitmap *wnd);
87498 +static inline size_t wnd_zeroes(const struct wnd_bitmap *wnd)
87500 +       return wnd->total_zeroes;
87502 +int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits);
87503 +int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits);
87504 +int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits);
87505 +bool wnd_is_free(struct wnd_bitmap *wnd, size_t bit, size_t bits);
87506 +bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits);
87508 +/* Possible values for 'flags' 'wnd_find' */
87509 +#define BITMAP_FIND_MARK_AS_USED 0x01
87510 +#define BITMAP_FIND_FULL 0x02
87511 +size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
87512 +               size_t flags, size_t *allocated);
87513 +int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits);
87514 +void wnd_zone_set(struct wnd_bitmap *wnd, size_t Lcn, size_t Len);
87515 +int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range);
87517 +/* globals from upcase.c */
87518 +int ntfs_cmp_names(const __le16 *s1, size_t l1, const __le16 *s2, size_t l2,
87519 +                  const u16 *upcase, bool bothcase);
87520 +int ntfs_cmp_names_cpu(const struct cpu_str *uni1, const struct le_str *uni2,
87521 +                      const u16 *upcase, bool bothcase);
87523 +/* globals from xattr.c */
87524 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
87525 +struct posix_acl *ntfs_get_acl(struct inode *inode, int type);
87526 +int ntfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
87527 +                struct posix_acl *acl, int type);
87528 +int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
87529 +                 struct inode *dir);
87530 +#else
87531 +#define ntfs_get_acl NULL
87532 +#define ntfs_set_acl NULL
87533 +#endif
87535 +int ntfs_acl_chmod(struct user_namespace *mnt_userns, struct inode *inode);
87536 +int ntfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
87537 +                   int mask);
87538 +ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
87539 +extern const struct xattr_handler *ntfs_xattr_handlers[];
87541 +/* globals from lznt.c */
87542 +struct lznt *get_lznt_ctx(int level);
87543 +size_t compress_lznt(const void *uncompressed, size_t uncompressed_size,
87544 +                    void *compressed, size_t compressed_size,
87545 +                    struct lznt *ctx);
87546 +ssize_t decompress_lznt(const void *compressed, size_t compressed_size,
87547 +                       void *uncompressed, size_t uncompressed_size);
87549 +static inline bool is_ntfs3(struct ntfs_sb_info *sbi)
87551 +       return sbi->volume.major_ver >= 3;
87554 +/*(sb->s_flags & SB_ACTIVE)*/
87555 +static inline bool is_mounted(struct ntfs_sb_info *sbi)
87557 +       return !!sbi->sb->s_root;
87560 +static inline bool ntfs_is_meta_file(struct ntfs_sb_info *sbi, CLST rno)
87562 +       return rno < MFT_REC_FREE || rno == sbi->objid_no ||
87563 +              rno == sbi->quota_no || rno == sbi->reparse_no ||
87564 +              rno == sbi->usn_jrnl_no;
87567 +static inline void ntfs_unmap_page(struct page *page)
87569 +       kunmap(page);
87570 +       put_page(page);
87573 +static inline struct page *ntfs_map_page(struct address_space *mapping,
87574 +                                        unsigned long index)
87576 +       struct page *page = read_mapping_page(mapping, index, NULL);
87578 +       if (!IS_ERR(page)) {
87579 +               kmap(page);
87580 +               if (!PageError(page))
87581 +                       return page;
87582 +               ntfs_unmap_page(page);
87583 +               return ERR_PTR(-EIO);
87584 +       }
87585 +       return page;
87588 +static inline size_t wnd_zone_bit(const struct wnd_bitmap *wnd)
87590 +       return wnd->zone_bit;
87593 +static inline size_t wnd_zone_len(const struct wnd_bitmap *wnd)
87595 +       return wnd->zone_end - wnd->zone_bit;
87598 +static inline void run_init(struct runs_tree *run)
87600 +       run->runs = NULL;
87601 +       run->count = 0;
87602 +       run->allocated = 0;
87605 +static inline struct runs_tree *run_alloc(void)
87607 +       return ntfs_zalloc(sizeof(struct runs_tree));
87610 +static inline void run_close(struct runs_tree *run)
87612 +       ntfs_vfree(run->runs);
87613 +       memset(run, 0, sizeof(*run));
87616 +static inline void run_free(struct runs_tree *run)
87618 +       if (run) {
87619 +               ntfs_vfree(run->runs);
87620 +               ntfs_free(run);
87621 +       }
87624 +static inline bool run_is_empty(struct runs_tree *run)
87626 +       return !run->count;
87629 +/* NTFS uses quad aligned bitmaps */
87630 +static inline size_t bitmap_size(size_t bits)
87632 +       return QuadAlign((bits + 7) >> 3);
87635 +#define _100ns2seconds 10000000
87636 +#define SecondsToStartOf1970 0x00000002B6109100
87638 +#define NTFS_TIME_GRAN 100
87641 + * kernel2nt
87642 + *
87643 + * converts in-memory kernel timestamp into nt time
87644 + */
87645 +static inline __le64 kernel2nt(const struct timespec64 *ts)
87647 +       // 10^7 units of 100 nanoseconds one second
87648 +       return cpu_to_le64(_100ns2seconds *
87649 +                                  (ts->tv_sec + SecondsToStartOf1970) +
87650 +                          ts->tv_nsec / NTFS_TIME_GRAN);
87654 + * nt2kernel
87655 + *
87656 + * converts on-disk nt time into kernel timestamp
87657 + */
87658 +static inline void nt2kernel(const __le64 tm, struct timespec64 *ts)
87660 +       u64 t = le64_to_cpu(tm) - _100ns2seconds * SecondsToStartOf1970;
87662 +       // WARNING: do_div changes its first argument(!)
87663 +       ts->tv_nsec = do_div(t, _100ns2seconds) * 100;
87664 +       ts->tv_sec = t;
87667 +static inline struct ntfs_sb_info *ntfs_sb(struct super_block *sb)
87669 +       return sb->s_fs_info;
87672 +/* Align up on cluster boundary */
87673 +static inline u64 ntfs_up_cluster(const struct ntfs_sb_info *sbi, u64 size)
87675 +       return (size + sbi->cluster_mask) & sbi->cluster_mask_inv;
87678 +/* Align up on cluster boundary */
87679 +static inline u64 ntfs_up_block(const struct super_block *sb, u64 size)
87681 +       return (size + sb->s_blocksize - 1) & ~(u64)(sb->s_blocksize - 1);
87684 +static inline CLST bytes_to_cluster(const struct ntfs_sb_info *sbi, u64 size)
87686 +       return (size + sbi->cluster_mask) >> sbi->cluster_bits;
87689 +static inline u64 bytes_to_block(const struct super_block *sb, u64 size)
87691 +       return (size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
87694 +static inline struct buffer_head *ntfs_bread(struct super_block *sb,
87695 +                                            sector_t block)
87697 +       struct buffer_head *bh = sb_bread(sb, block);
87699 +       if (bh)
87700 +               return bh;
87702 +       ntfs_err(sb, "failed to read volume at offset 0x%llx",
87703 +                (u64)block << sb->s_blocksize_bits);
87704 +       return NULL;
87707 +static inline bool is_power_of2(size_t v)
87709 +       return v && !(v & (v - 1));
87712 +static inline struct ntfs_inode *ntfs_i(struct inode *inode)
87714 +       return container_of(inode, struct ntfs_inode, vfs_inode);
87717 +static inline bool is_compressed(const struct ntfs_inode *ni)
87719 +       return (ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) ||
87720 +              (ni->ni_flags & NI_FLAG_COMPRESSED_MASK);
87723 +static inline int ni_ext_compress_bits(const struct ntfs_inode *ni)
87725 +       return 0xb + (ni->ni_flags & NI_FLAG_COMPRESSED_MASK);
87728 +/* bits - 0xc, 0xd, 0xe, 0xf, 0x10 */
87729 +static inline void ni_set_ext_compress_bits(struct ntfs_inode *ni, u8 bits)
87731 +       ni->ni_flags |= (bits - 0xb) & NI_FLAG_COMPRESSED_MASK;
87734 +static inline bool is_dedup(const struct ntfs_inode *ni)
87736 +       return ni->ni_flags & NI_FLAG_DEDUPLICATED;
87739 +static inline bool is_encrypted(const struct ntfs_inode *ni)
87741 +       return ni->std_fa & FILE_ATTRIBUTE_ENCRYPTED;
87744 +static inline bool is_sparsed(const struct ntfs_inode *ni)
87746 +       return ni->std_fa & FILE_ATTRIBUTE_SPARSE_FILE;
87749 +static inline int is_resident(struct ntfs_inode *ni)
87751 +       return ni->ni_flags & NI_FLAG_RESIDENT;
87754 +static inline void le16_sub_cpu(__le16 *var, u16 val)
87756 +       *var = cpu_to_le16(le16_to_cpu(*var) - val);
87759 +static inline void le32_sub_cpu(__le32 *var, u32 val)
87761 +       *var = cpu_to_le32(le32_to_cpu(*var) - val);
87764 +static inline void nb_put(struct ntfs_buffers *nb)
87766 +       u32 i, nbufs = nb->nbufs;
87768 +       if (!nbufs)
87769 +               return;
87771 +       for (i = 0; i < nbufs; i++)
87772 +               put_bh(nb->bh[i]);
87773 +       nb->nbufs = 0;
87776 +static inline void put_indx_node(struct indx_node *in)
87778 +       if (!in)
87779 +               return;
87781 +       ntfs_free(in->index);
87782 +       nb_put(&in->nb);
87783 +       ntfs_free(in);
87786 +static inline void mi_clear(struct mft_inode *mi)
87788 +       nb_put(&mi->nb);
87789 +       ntfs_free(mi->mrec);
87790 +       mi->mrec = NULL;
87793 +static inline void ni_lock(struct ntfs_inode *ni)
87795 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_NORMAL);
87798 +static inline void ni_lock_dir(struct ntfs_inode *ni)
87800 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_PARENT);
87803 +static inline void ni_unlock(struct ntfs_inode *ni)
87805 +       mutex_unlock(&ni->ni_lock);
87808 +static inline int ni_trylock(struct ntfs_inode *ni)
87810 +       return mutex_trylock(&ni->ni_lock);
87813 +static inline int attr_load_runs_attr(struct ntfs_inode *ni,
87814 +                                     struct ATTRIB *attr,
87815 +                                     struct runs_tree *run, CLST vcn)
87817 +       return attr_load_runs_vcn(ni, attr->type, attr_name(attr),
87818 +                                 attr->name_len, run, vcn);
87821 +static inline void le64_sub_cpu(__le64 *var, u64 val)
87823 +       *var = cpu_to_le64(le64_to_cpu(*var) - val);
87825 diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
87826 new file mode 100644
87827 index 000000000000..0d4a6251bddc
87828 --- /dev/null
87829 +++ b/fs/ntfs3/record.c
87830 @@ -0,0 +1,609 @@
87831 +// SPDX-License-Identifier: GPL-2.0
87833 + *
87834 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
87835 + *
87836 + */
87838 +#include <linux/blkdev.h>
87839 +#include <linux/buffer_head.h>
87840 +#include <linux/fs.h>
87841 +#include <linux/nls.h>
87843 +#include "debug.h"
87844 +#include "ntfs.h"
87845 +#include "ntfs_fs.h"
87847 +static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type,
87848 +                              const __le16 *name, u8 name_len,
87849 +                              const u16 *upcase)
87851 +       /* First, compare the type codes: */
87852 +       int diff = le32_to_cpu(left->type) - le32_to_cpu(type);
87854 +       if (diff)
87855 +               return diff;
87857 +       /*
87858 +        * They have the same type code, so we have to compare the names.
87859 +        */
87860 +       return ntfs_cmp_names(attr_name(left), left->name_len, name, name_len,
87861 +                             upcase, true);
87865 + * mi_new_attt_id
87866 + *
87867 + * returns unused attribute id that is less than mrec->next_attr_id
87868 + */
87869 +static __le16 mi_new_attt_id(struct mft_inode *mi)
87871 +       u16 free_id, max_id, t16;
87872 +       struct MFT_REC *rec = mi->mrec;
87873 +       struct ATTRIB *attr;
87874 +       __le16 id;
87876 +       id = rec->next_attr_id;
87877 +       free_id = le16_to_cpu(id);
87878 +       if (free_id < 0x7FFF) {
87879 +               rec->next_attr_id = cpu_to_le16(free_id + 1);
87880 +               return id;
87881 +       }
87883 +       /* One record can store up to 1024/24 ~= 42 attributes */
87884 +       free_id = 0;
87885 +       max_id = 0;
87887 +       attr = NULL;
87889 +       for (;;) {
87890 +               attr = mi_enum_attr(mi, attr);
87891 +               if (!attr) {
87892 +                       rec->next_attr_id = cpu_to_le16(max_id + 1);
87893 +                       mi->dirty = true;
87894 +                       return cpu_to_le16(free_id);
87895 +               }
87897 +               t16 = le16_to_cpu(attr->id);
87898 +               if (t16 == free_id) {
87899 +                       free_id += 1;
87900 +                       attr = NULL;
87901 +               } else if (max_id < t16)
87902 +                       max_id = t16;
87903 +       }
87906 +int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi)
87908 +       int err;
87909 +       struct mft_inode *m = ntfs_zalloc(sizeof(struct mft_inode));
87911 +       if (!m)
87912 +               return -ENOMEM;
87914 +       err = mi_init(m, sbi, rno);
87915 +       if (err) {
87916 +               ntfs_free(m);
87917 +               return err;
87918 +       }
87920 +       err = mi_read(m, false);
87921 +       if (err) {
87922 +               mi_put(m);
87923 +               return err;
87924 +       }
87926 +       *mi = m;
87927 +       return 0;
87930 +void mi_put(struct mft_inode *mi)
87932 +       mi_clear(mi);
87933 +       ntfs_free(mi);
87936 +int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno)
87938 +       mi->sbi = sbi;
87939 +       mi->rno = rno;
87940 +       mi->mrec = ntfs_malloc(sbi->record_size);
87941 +       if (!mi->mrec)
87942 +               return -ENOMEM;
87944 +       return 0;
87948 + * mi_read
87949 + *
87950 + * reads MFT data
87951 + */
87952 +int mi_read(struct mft_inode *mi, bool is_mft)
87954 +       int err;
87955 +       struct MFT_REC *rec = mi->mrec;
87956 +       struct ntfs_sb_info *sbi = mi->sbi;
87957 +       u32 bpr = sbi->record_size;
87958 +       u64 vbo = (u64)mi->rno << sbi->record_bits;
87959 +       struct ntfs_inode *mft_ni = sbi->mft.ni;
87960 +       struct runs_tree *run = mft_ni ? &mft_ni->file.run : NULL;
87961 +       struct rw_semaphore *rw_lock = NULL;
87963 +       if (is_mounted(sbi)) {
87964 +               if (!is_mft) {
87965 +                       rw_lock = &mft_ni->file.run_lock;
87966 +                       down_read(rw_lock);
87967 +               }
87968 +       }
87970 +       err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
87971 +       if (rw_lock)
87972 +               up_read(rw_lock);
87973 +       if (!err)
87974 +               goto ok;
87976 +       if (err == -E_NTFS_FIXUP) {
87977 +               mi->dirty = true;
87978 +               goto ok;
87979 +       }
87981 +       if (err != -ENOENT)
87982 +               goto out;
87984 +       if (rw_lock) {
87985 +               ni_lock(mft_ni);
87986 +               down_write(rw_lock);
87987 +       }
87988 +       err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, &mft_ni->file.run,
87989 +                                vbo >> sbi->cluster_bits);
87990 +       if (rw_lock) {
87991 +               up_write(rw_lock);
87992 +               ni_unlock(mft_ni);
87993 +       }
87994 +       if (err)
87995 +               goto out;
87997 +       if (rw_lock)
87998 +               down_read(rw_lock);
87999 +       err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
88000 +       if (rw_lock)
88001 +               up_read(rw_lock);
88003 +       if (err == -E_NTFS_FIXUP) {
88004 +               mi->dirty = true;
88005 +               goto ok;
88006 +       }
88007 +       if (err)
88008 +               goto out;
88010 +ok:
88011 +       /* check field 'total' only here */
88012 +       if (le32_to_cpu(rec->total) != bpr) {
88013 +               err = -EINVAL;
88014 +               goto out;
88015 +       }
88017 +       return 0;
88019 +out:
88020 +       return err;
88023 +struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
88025 +       const struct MFT_REC *rec = mi->mrec;
88026 +       u32 used = le32_to_cpu(rec->used);
88027 +       u32 t32, off, asize;
88028 +       u16 t16;
88030 +       if (!attr) {
88031 +               u32 total = le32_to_cpu(rec->total);
88033 +               off = le16_to_cpu(rec->attr_off);
88035 +               if (used > total)
88036 +                       return NULL;
88038 +               if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 ||
88039 +                   !IsDwordAligned(off)) {
88040 +                       return NULL;
88041 +               }
88043 +               /* Skip non-resident records */
88044 +               if (!is_rec_inuse(rec))
88045 +                       return NULL;
88047 +               attr = Add2Ptr(rec, off);
88048 +       } else {
88049 +               /* Check if input attr inside record */
88050 +               off = PtrOffset(rec, attr);
88051 +               if (off >= used)
88052 +                       return NULL;
88054 +               asize = le32_to_cpu(attr->size);
88055 +               if (asize < SIZEOF_RESIDENT) {
88056 +                       /* Impossible 'cause we should not return such attribute */
88057 +                       return NULL;
88058 +               }
88060 +               attr = Add2Ptr(attr, asize);
88061 +               off += asize;
88062 +       }
88064 +       asize = le32_to_cpu(attr->size);
88066 +       /* Can we use the first field (attr->type) */
88067 +       if (off + 8 > used) {
88068 +               static_assert(QuadAlign(sizeof(enum ATTR_TYPE)) == 8);
88069 +               return NULL;
88070 +       }
88072 +       if (attr->type == ATTR_END) {
88073 +               /* end of enumeration */
88074 +               return NULL;
88075 +       }
88077 +       /* 0x100 is last known attribute for now*/
88078 +       t32 = le32_to_cpu(attr->type);
88079 +       if ((t32 & 0xf) || (t32 > 0x100))
88080 +               return NULL;
88082 +       /* Check boundary */
88083 +       if (off + asize > used)
88084 +               return NULL;
88086 +       /* Check size of attribute */
88087 +       if (!attr->non_res) {
88088 +               if (asize < SIZEOF_RESIDENT)
88089 +                       return NULL;
88091 +               t16 = le16_to_cpu(attr->res.data_off);
88093 +               if (t16 > asize)
88094 +                       return NULL;
88096 +               t32 = le32_to_cpu(attr->res.data_size);
88097 +               if (t16 + t32 > asize)
88098 +                       return NULL;
88100 +               return attr;
88101 +       }
88103 +       /* Check some nonresident fields */
88104 +       if (attr->name_len &&
88105 +           le16_to_cpu(attr->name_off) + sizeof(short) * attr->name_len >
88106 +                   le16_to_cpu(attr->nres.run_off)) {
88107 +               return NULL;
88108 +       }
88110 +       if (attr->nres.svcn || !is_attr_ext(attr)) {
88111 +               if (asize + 8 < SIZEOF_NONRESIDENT)
88112 +                       return NULL;
88114 +               if (attr->nres.c_unit)
88115 +                       return NULL;
88116 +       } else if (asize + 8 < SIZEOF_NONRESIDENT_EX)
88117 +               return NULL;
88119 +       return attr;
88123 + * mi_find_attr
88124 + *
88125 + * finds the attribute by type and name and id
88126 + */
88127 +struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
88128 +                           enum ATTR_TYPE type, const __le16 *name,
88129 +                           size_t name_len, const __le16 *id)
88131 +       u32 type_in = le32_to_cpu(type);
88132 +       u32 atype;
88134 +next_attr:
88135 +       attr = mi_enum_attr(mi, attr);
88136 +       if (!attr)
88137 +               return NULL;
88139 +       atype = le32_to_cpu(attr->type);
88140 +       if (atype > type_in)
88141 +               return NULL;
88143 +       if (atype < type_in)
88144 +               goto next_attr;
88146 +       if (attr->name_len != name_len)
88147 +               goto next_attr;
88149 +       if (name_len && memcmp(attr_name(attr), name, name_len * sizeof(short)))
88150 +               goto next_attr;
88152 +       if (id && *id != attr->id)
88153 +               goto next_attr;
88155 +       return attr;
88158 +int mi_write(struct mft_inode *mi, int wait)
88160 +       struct MFT_REC *rec;
88161 +       int err;
88162 +       struct ntfs_sb_info *sbi;
88164 +       if (!mi->dirty)
88165 +               return 0;
88167 +       sbi = mi->sbi;
88168 +       rec = mi->mrec;
88170 +       err = ntfs_write_bh(sbi, &rec->rhdr, &mi->nb, wait);
88171 +       if (err)
88172 +               return err;
88174 +       if (mi->rno < sbi->mft.recs_mirr)
88175 +               sbi->flags |= NTFS_FLAGS_MFTMIRR;
88177 +       mi->dirty = false;
88179 +       return 0;
88182 +int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
88183 +                 __le16 flags, bool is_mft)
88185 +       int err;
88186 +       u16 seq = 1;
88187 +       struct MFT_REC *rec;
88188 +       u64 vbo = (u64)rno << sbi->record_bits;
88190 +       err = mi_init(mi, sbi, rno);
88191 +       if (err)
88192 +               return err;
88194 +       rec = mi->mrec;
88196 +       if (rno == MFT_REC_MFT) {
88197 +               ;
88198 +       } else if (rno < MFT_REC_FREE) {
88199 +               seq = rno;
88200 +       } else if (rno >= sbi->mft.used) {
88201 +               ;
88202 +       } else if (mi_read(mi, is_mft)) {
88203 +               ;
88204 +       } else if (rec->rhdr.sign == NTFS_FILE_SIGNATURE) {
88205 +               /* Record is reused. Update its sequence number */
88206 +               seq = le16_to_cpu(rec->seq) + 1;
88207 +               if (!seq)
88208 +                       seq = 1;
88209 +       }
88211 +       memcpy(rec, sbi->new_rec, sbi->record_size);
88213 +       rec->seq = cpu_to_le16(seq);
88214 +       rec->flags = RECORD_FLAG_IN_USE | flags;
88216 +       mi->dirty = true;
88218 +       if (!mi->nb.nbufs) {
88219 +               struct ntfs_inode *ni = sbi->mft.ni;
88220 +               bool lock = false;
88222 +               if (is_mounted(sbi) && !is_mft) {
88223 +                       down_read(&ni->file.run_lock);
88224 +                       lock = true;
88225 +               }
88227 +               err = ntfs_get_bh(sbi, &ni->file.run, vbo, sbi->record_size,
88228 +                                 &mi->nb);
88229 +               if (lock)
88230 +                       up_read(&ni->file.run_lock);
88231 +       }
88233 +       return err;
88237 + * mi_mark_free
88238 + *
88239 + * marks record as unused and marks it as free in bitmap
88240 + */
88241 +void mi_mark_free(struct mft_inode *mi)
88243 +       CLST rno = mi->rno;
88244 +       struct ntfs_sb_info *sbi = mi->sbi;
88246 +       if (rno >= MFT_REC_RESERVED && rno < MFT_REC_FREE) {
88247 +               ntfs_clear_mft_tail(sbi, rno, rno + 1);
88248 +               mi->dirty = false;
88249 +               return;
88250 +       }
88252 +       if (mi->mrec) {
88253 +               clear_rec_inuse(mi->mrec);
88254 +               mi->dirty = true;
88255 +               mi_write(mi, 0);
88256 +       }
88257 +       ntfs_mark_rec_free(sbi, rno);
88261 + * mi_insert_attr
88262 + *
88263 + * reserves space for new attribute
88264 + * returns not full constructed attribute or NULL if not possible to create
88265 + */
88266 +struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
88267 +                             const __le16 *name, u8 name_len, u32 asize,
88268 +                             u16 name_off)
88270 +       size_t tail;
88271 +       struct ATTRIB *attr;
88272 +       __le16 id;
88273 +       struct MFT_REC *rec = mi->mrec;
88274 +       struct ntfs_sb_info *sbi = mi->sbi;
88275 +       u32 used = le32_to_cpu(rec->used);
88276 +       const u16 *upcase = sbi->upcase;
88277 +       int diff;
88279 +       /* Can we insert mi attribute? */
88280 +       if (used + asize > mi->sbi->record_size)
88281 +               return NULL;
88283 +       /*
88284 +        * Scan through the list of attributes to find the point
88285 +        * at which we should insert it.
88286 +        */
88287 +       attr = NULL;
88288 +       while ((attr = mi_enum_attr(mi, attr))) {
88289 +               diff = compare_attr(attr, type, name, name_len, upcase);
88290 +               if (diff > 0)
88291 +                       break;
88292 +               if (diff < 0)
88293 +                       continue;
88295 +               if (!is_attr_indexed(attr))
88296 +                       return NULL;
88297 +               break;
88298 +       }
88300 +       if (!attr) {
88301 +               tail = 8; /* not used, just to suppress warning */
88302 +               attr = Add2Ptr(rec, used - 8);
88303 +       } else {
88304 +               tail = used - PtrOffset(rec, attr);
88305 +       }
88307 +       id = mi_new_attt_id(mi);
88309 +       memmove(Add2Ptr(attr, asize), attr, tail);
88310 +       memset(attr, 0, asize);
88312 +       attr->type = type;
88313 +       attr->size = cpu_to_le32(asize);
88314 +       attr->name_len = name_len;
88315 +       attr->name_off = cpu_to_le16(name_off);
88316 +       attr->id = id;
88318 +       memmove(Add2Ptr(attr, name_off), name, name_len * sizeof(short));
88319 +       rec->used = cpu_to_le32(used + asize);
88321 +       mi->dirty = true;
88323 +       return attr;
88327 + * mi_remove_attr
88328 + *
88329 + * removes the attribute from record
88330 + * NOTE: The source attr will point to next attribute
88331 + */
88332 +bool mi_remove_attr(struct mft_inode *mi, struct ATTRIB *attr)
88334 +       struct MFT_REC *rec = mi->mrec;
88335 +       u32 aoff = PtrOffset(rec, attr);
88336 +       u32 used = le32_to_cpu(rec->used);
88337 +       u32 asize = le32_to_cpu(attr->size);
88339 +       if (aoff + asize > used)
88340 +               return false;
88342 +       used -= asize;
88343 +       memmove(attr, Add2Ptr(attr, asize), used - aoff);
88344 +       rec->used = cpu_to_le32(used);
88345 +       mi->dirty = true;
88347 +       return true;
88350 +/* bytes = "new attribute size" - "old attribute size" */
88351 +bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes)
88353 +       struct MFT_REC *rec = mi->mrec;
88354 +       u32 aoff = PtrOffset(rec, attr);
88355 +       u32 total, used = le32_to_cpu(rec->used);
88356 +       u32 nsize, asize = le32_to_cpu(attr->size);
88357 +       u32 rsize = le32_to_cpu(attr->res.data_size);
88358 +       int tail = (int)(used - aoff - asize);
88359 +       int dsize;
88360 +       char *next;
88362 +       if (tail < 0 || aoff >= used)
88363 +               return false;
88365 +       if (!bytes)
88366 +               return true;
88368 +       total = le32_to_cpu(rec->total);
88369 +       next = Add2Ptr(attr, asize);
88371 +       if (bytes > 0) {
88372 +               dsize = QuadAlign(bytes);
88373 +               if (used + dsize > total)
88374 +                       return false;
88375 +               nsize = asize + dsize;
88376 +               // move tail
88377 +               memmove(next + dsize, next, tail);
88378 +               memset(next, 0, dsize);
88379 +               used += dsize;
88380 +               rsize += dsize;
88381 +       } else {
88382 +               dsize = QuadAlign(-bytes);
88383 +               if (dsize > asize)
88384 +                       return false;
88385 +               nsize = asize - dsize;
88386 +               memmove(next - dsize, next, tail);
88387 +               used -= dsize;
88388 +               rsize -= dsize;
88389 +       }
88391 +       rec->used = cpu_to_le32(used);
88392 +       attr->size = cpu_to_le32(nsize);
88393 +       if (!attr->non_res)
88394 +               attr->res.data_size = cpu_to_le32(rsize);
88395 +       mi->dirty = true;
88397 +       return true;
88400 +int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
88401 +                struct runs_tree *run, CLST len)
88403 +       int err = 0;
88404 +       struct ntfs_sb_info *sbi = mi->sbi;
88405 +       u32 new_run_size;
88406 +       CLST plen;
88407 +       struct MFT_REC *rec = mi->mrec;
88408 +       CLST svcn = le64_to_cpu(attr->nres.svcn);
88409 +       u32 used = le32_to_cpu(rec->used);
88410 +       u32 aoff = PtrOffset(rec, attr);
88411 +       u32 asize = le32_to_cpu(attr->size);
88412 +       char *next = Add2Ptr(attr, asize);
88413 +       u16 run_off = le16_to_cpu(attr->nres.run_off);
88414 +       u32 run_size = asize - run_off;
88415 +       u32 tail = used - aoff - asize;
88416 +       u32 dsize = sbi->record_size - used;
88418 +       /* Make a maximum gap in current record */
88419 +       memmove(next + dsize, next, tail);
88421 +       /* Pack as much as possible */
88422 +       err = run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size + dsize,
88423 +                      &plen);
88424 +       if (err < 0) {
88425 +               memmove(next, next + dsize, tail);
88426 +               return err;
88427 +       }
88429 +       new_run_size = QuadAlign(err);
88431 +       memmove(next + new_run_size - run_size, next + dsize, tail);
88433 +       attr->size = cpu_to_le32(asize + new_run_size - run_size);
88434 +       attr->nres.evcn = cpu_to_le64(svcn + plen - 1);
88435 +       rec->used = cpu_to_le32(used + new_run_size - run_size);
88436 +       mi->dirty = true;
88438 +       return 0;
88440 diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c
88441 new file mode 100644
88442 index 000000000000..5cdf6efe67e0
88443 --- /dev/null
88444 +++ b/fs/ntfs3/run.c
88445 @@ -0,0 +1,1111 @@
88446 +// SPDX-License-Identifier: GPL-2.0
88448 + *
88449 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
88450 + *
88451 + * TODO: try to use extents tree (instead of array)
88452 + */
88454 +#include <linux/blkdev.h>
88455 +#include <linux/buffer_head.h>
88456 +#include <linux/fs.h>
88457 +#include <linux/nls.h>
88459 +#include "debug.h"
88460 +#include "ntfs.h"
88461 +#include "ntfs_fs.h"
88463 +/* runs_tree is a continues memory. Try to avoid big size  */
88464 +#define NTFS3_RUN_MAX_BYTES 0x10000
88466 +struct ntfs_run {
88467 +       CLST vcn; /* virtual cluster number */
88468 +       CLST len; /* length in clusters */
88469 +       CLST lcn; /* logical cluster number */
88473 + * run_lookup
88474 + *
88475 + * Lookup the index of a MCB entry that is first <= vcn.
88476 + * case of success it will return non-zero value and set
88477 + * 'index' parameter to index of entry been found.
88478 + * case of entry missing from list 'index' will be set to
88479 + * point to insertion position for the entry question.
88480 + */
88481 +bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
88483 +       size_t min_idx, max_idx, mid_idx;
88484 +       struct ntfs_run *r;
88486 +       if (!run->count) {
88487 +               *index = 0;
88488 +               return false;
88489 +       }
88491 +       min_idx = 0;
88492 +       max_idx = run->count - 1;
88494 +       /* Check boundary cases specially, 'cause they cover the often requests */
88495 +       r = run->runs;
88496 +       if (vcn < r->vcn) {
88497 +               *index = 0;
88498 +               return false;
88499 +       }
88501 +       if (vcn < r->vcn + r->len) {
88502 +               *index = 0;
88503 +               return true;
88504 +       }
88506 +       r += max_idx;
88507 +       if (vcn >= r->vcn + r->len) {
88508 +               *index = run->count;
88509 +               return false;
88510 +       }
88512 +       if (vcn >= r->vcn) {
88513 +               *index = max_idx;
88514 +               return true;
88515 +       }
88517 +       do {
88518 +               mid_idx = min_idx + ((max_idx - min_idx) >> 1);
88519 +               r = run->runs + mid_idx;
88521 +               if (vcn < r->vcn) {
88522 +                       max_idx = mid_idx - 1;
88523 +                       if (!mid_idx)
88524 +                               break;
88525 +               } else if (vcn >= r->vcn + r->len) {
88526 +                       min_idx = mid_idx + 1;
88527 +               } else {
88528 +                       *index = mid_idx;
88529 +                       return true;
88530 +               }
88531 +       } while (min_idx <= max_idx);
88533 +       *index = max_idx + 1;
88534 +       return false;
88538 + * run_consolidate
88539 + *
88540 + * consolidate runs starting from a given one.
88541 + */
88542 +static void run_consolidate(struct runs_tree *run, size_t index)
88544 +       size_t i;
88545 +       struct ntfs_run *r = run->runs + index;
88547 +       while (index + 1 < run->count) {
88548 +               /*
88549 +                * I should merge current run with next
88550 +                * if start of the next run lies inside one being tested.
88551 +                */
88552 +               struct ntfs_run *n = r + 1;
88553 +               CLST end = r->vcn + r->len;
88554 +               CLST dl;
88556 +               /* Stop if runs are not aligned one to another. */
88557 +               if (n->vcn > end)
88558 +                       break;
88560 +               dl = end - n->vcn;
88562 +               /*
88563 +                * If range at index overlaps with next one
88564 +                * then I will either adjust it's start position
88565 +                * or (if completely matches) dust remove one from the list.
88566 +                */
88567 +               if (dl > 0) {
88568 +                       if (n->len <= dl)
88569 +                               goto remove_next_range;
88571 +                       n->len -= dl;
88572 +                       n->vcn += dl;
88573 +                       if (n->lcn != SPARSE_LCN)
88574 +                               n->lcn += dl;
88575 +                       dl = 0;
88576 +               }
88578 +               /*
88579 +                * Stop if sparse mode does not match
88580 +                * both current and next runs.
88581 +                */
88582 +               if ((n->lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) {
88583 +                       index += 1;
88584 +                       r = n;
88585 +                       continue;
88586 +               }
88588 +               /*
88589 +                * Check if volume block
88590 +                * of a next run lcn does not match
88591 +                * last volume block of the current run.
88592 +                */
88593 +               if (n->lcn != SPARSE_LCN && n->lcn != r->lcn + r->len)
88594 +                       break;
88596 +               /*
88597 +                * Next and current are siblings.
88598 +                * Eat/join.
88599 +                */
88600 +               r->len += n->len - dl;
88602 +remove_next_range:
88603 +               i = run->count - (index + 1);
88604 +               if (i > 1)
88605 +                       memmove(n, n + 1, sizeof(*n) * (i - 1));
88607 +               run->count -= 1;
88608 +       }
88611 +/* returns true if range [svcn - evcn] is mapped*/
88612 +bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn)
88614 +       size_t i;
88615 +       const struct ntfs_run *r, *end;
88616 +       CLST next_vcn;
88618 +       if (!run_lookup(run, svcn, &i))
88619 +               return false;
88621 +       end = run->runs + run->count;
88622 +       r = run->runs + i;
88624 +       for (;;) {
88625 +               next_vcn = r->vcn + r->len;
88626 +               if (next_vcn > evcn)
88627 +                       return true;
88629 +               if (++r >= end)
88630 +                       return false;
88632 +               if (r->vcn != next_vcn)
88633 +                       return false;
88634 +       }
88637 +bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
88638 +                     CLST *len, size_t *index)
88640 +       size_t idx;
88641 +       CLST gap;
88642 +       struct ntfs_run *r;
88644 +       /* Fail immediately if nrun was not touched yet. */
88645 +       if (!run->runs)
88646 +               return false;
88648 +       if (!run_lookup(run, vcn, &idx))
88649 +               return false;
88651 +       r = run->runs + idx;
88653 +       if (vcn >= r->vcn + r->len)
88654 +               return false;
88656 +       gap = vcn - r->vcn;
88657 +       if (r->len <= gap)
88658 +               return false;
88660 +       *lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + gap);
88662 +       if (len)
88663 +               *len = r->len - gap;
88664 +       if (index)
88665 +               *index = idx;
88667 +       return true;
88671 + * run_truncate_head
88672 + *
88673 + * decommit the range before vcn
88674 + */
88675 +void run_truncate_head(struct runs_tree *run, CLST vcn)
88677 +       size_t index;
88678 +       struct ntfs_run *r;
88680 +       if (run_lookup(run, vcn, &index)) {
88681 +               r = run->runs + index;
88683 +               if (vcn > r->vcn) {
88684 +                       CLST dlen = vcn - r->vcn;
88686 +                       r->vcn = vcn;
88687 +                       r->len -= dlen;
88688 +                       if (r->lcn != SPARSE_LCN)
88689 +                               r->lcn += dlen;
88690 +               }
88692 +               if (!index)
88693 +                       return;
88694 +       }
88695 +       r = run->runs;
88696 +       memmove(r, r + index, sizeof(*r) * (run->count - index));
88698 +       run->count -= index;
88700 +       if (!run->count) {
88701 +               ntfs_vfree(run->runs);
88702 +               run->runs = NULL;
88703 +               run->allocated = 0;
88704 +       }
88708 + * run_truncate
88709 + *
88710 + * decommit the range after vcn
88711 + */
88712 +void run_truncate(struct runs_tree *run, CLST vcn)
88714 +       size_t index;
88716 +       /*
88717 +        * If I hit the range then
88718 +        * I have to truncate one.
88719 +        * If range to be truncated is becoming empty
88720 +        * then it will entirely be removed.
88721 +        */
88722 +       if (run_lookup(run, vcn, &index)) {
88723 +               struct ntfs_run *r = run->runs + index;
88725 +               r->len = vcn - r->vcn;
88727 +               if (r->len > 0)
88728 +                       index += 1;
88729 +       }
88731 +       /*
88732 +        * At this point 'index' is set to
88733 +        * position that should be thrown away (including index itself)
88734 +        * Simple one - just set the limit.
88735 +        */
88736 +       run->count = index;
88738 +       /* Do not reallocate array 'runs'. Only free if possible */
88739 +       if (!index) {
88740 +               ntfs_vfree(run->runs);
88741 +               run->runs = NULL;
88742 +               run->allocated = 0;
88743 +       }
88746 +/* trim head and tail if necessary*/
88747 +void run_truncate_around(struct runs_tree *run, CLST vcn)
88749 +       run_truncate_head(run, vcn);
88751 +       if (run->count >= NTFS3_RUN_MAX_BYTES / sizeof(struct ntfs_run) / 2)
88752 +               run_truncate(run, (run->runs + (run->count >> 1))->vcn);
88756 + * run_add_entry
88757 + *
88758 + * sets location to known state.
88759 + * run to be added may overlap with existing location.
88760 + * returns false if of memory
88761 + */
88762 +bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
88763 +                  bool is_mft)
88765 +       size_t used, index;
88766 +       struct ntfs_run *r;
88767 +       bool inrange;
88768 +       CLST tail_vcn = 0, tail_len = 0, tail_lcn = 0;
88769 +       bool should_add_tail = false;
88771 +       /*
88772 +        * Lookup the insertion point.
88773 +        *
88774 +        * Execute bsearch for the entry containing
88775 +        * start position question.
88776 +        */
88777 +       inrange = run_lookup(run, vcn, &index);
88779 +       /*
88780 +        * Shortcut here would be case of
88781 +        * range not been found but one been added
88782 +        * continues previous run.
88783 +        * this case I can directly make use of
88784 +        * existing range as my start point.
88785 +        */
88786 +       if (!inrange && index > 0) {
88787 +               struct ntfs_run *t = run->runs + index - 1;
88789 +               if (t->vcn + t->len == vcn &&
88790 +                   (t->lcn == SPARSE_LCN) == (lcn == SPARSE_LCN) &&
88791 +                   (lcn == SPARSE_LCN || lcn == t->lcn + t->len)) {
88792 +                       inrange = true;
88793 +                       index -= 1;
88794 +               }
88795 +       }
88797 +       /*
88798 +        * At this point 'index' either points to the range
88799 +        * containing start position or to the insertion position
88800 +        * for a new range.
88801 +        * So first let's check if range I'm probing is here already.
88802 +        */
88803 +       if (!inrange) {
88804 +requires_new_range:
88805 +               /*
88806 +                * Range was not found.
88807 +                * Insert at position 'index'
88808 +                */
88809 +               used = run->count * sizeof(struct ntfs_run);
88811 +               /*
88812 +                * Check allocated space.
88813 +                * If one is not enough to get one more entry
88814 +                * then it will be reallocated
88815 +                */
88816 +               if (run->allocated < used + sizeof(struct ntfs_run)) {
88817 +                       size_t bytes;
88818 +                       struct ntfs_run *new_ptr;
88820 +                       /* Use power of 2 for 'bytes'*/
88821 +                       if (!used) {
88822 +                               bytes = 64;
88823 +                       } else if (used <= 16 * PAGE_SIZE) {
88824 +                               if (is_power_of2(run->allocated))
88825 +                                       bytes = run->allocated << 1;
88826 +                               else
88827 +                                       bytes = (size_t)1
88828 +                                               << (2 + blksize_bits(used));
88829 +                       } else {
88830 +                               bytes = run->allocated + (16 * PAGE_SIZE);
88831 +                       }
88833 +                       WARN_ON(!is_mft && bytes > NTFS3_RUN_MAX_BYTES);
88835 +                       new_ptr = ntfs_vmalloc(bytes);
88837 +                       if (!new_ptr)
88838 +                               return false;
88840 +                       r = new_ptr + index;
88841 +                       memcpy(new_ptr, run->runs,
88842 +                              index * sizeof(struct ntfs_run));
88843 +                       memcpy(r + 1, run->runs + index,
88844 +                              sizeof(struct ntfs_run) * (run->count - index));
88846 +                       ntfs_vfree(run->runs);
88847 +                       run->runs = new_ptr;
88848 +                       run->allocated = bytes;
88850 +               } else {
88851 +                       size_t i = run->count - index;
88853 +                       r = run->runs + index;
88855 +                       /* memmove appears to be a bottle neck here... */
88856 +                       if (i > 0)
88857 +                               memmove(r + 1, r, sizeof(struct ntfs_run) * i);
88858 +               }
88860 +               r->vcn = vcn;
88861 +               r->lcn = lcn;
88862 +               r->len = len;
88863 +               run->count += 1;
88864 +       } else {
88865 +               r = run->runs + index;
88867 +               /*
88868 +                * If one of ranges was not allocated
88869 +                * then I have to split location I just matched.
88870 +                * and insert current one
88871 +                * a common case this requires tail to be reinserted
88872 +                * a recursive call.
88873 +                */
88874 +               if (((lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) ||
88875 +                   (lcn != SPARSE_LCN && lcn != r->lcn + (vcn - r->vcn))) {
88876 +                       CLST to_eat = vcn - r->vcn;
88877 +                       CLST Tovcn = to_eat + len;
88879 +                       should_add_tail = Tovcn < r->len;
88881 +                       if (should_add_tail) {
88882 +                               tail_lcn = r->lcn == SPARSE_LCN
88883 +                                                  ? SPARSE_LCN
88884 +                                                  : (r->lcn + Tovcn);
88885 +                               tail_vcn = r->vcn + Tovcn;
88886 +                               tail_len = r->len - Tovcn;
88887 +                       }
88889 +                       if (to_eat > 0) {
88890 +                               r->len = to_eat;
88891 +                               inrange = false;
88892 +                               index += 1;
88893 +                               goto requires_new_range;
88894 +                       }
88896 +                       /* lcn should match one I'm going to add. */
88897 +                       r->lcn = lcn;
88898 +               }
88900 +               /*
88901 +                * If existing range fits then I'm done.
88902 +                * Otherwise extend found one and fall back to range jocode.
88903 +                */
88904 +               if (r->vcn + r->len < vcn + len)
88905 +                       r->len += len - ((r->vcn + r->len) - vcn);
88906 +       }
88908 +       /*
88909 +        * And normalize it starting from insertion point.
88910 +        * It's possible that no insertion needed case if
88911 +        * start point lies within the range of an entry
88912 +        * that 'index' points to.
88913 +        */
88914 +       if (inrange && index > 0)
88915 +               index -= 1;
88916 +       run_consolidate(run, index);
88917 +       run_consolidate(run, index + 1);
88919 +       /*
88920 +        * a special case
88921 +        * I have to add extra range a tail.
88922 +        */
88923 +       if (should_add_tail &&
88924 +           !run_add_entry(run, tail_vcn, tail_lcn, tail_len, is_mft))
88925 +               return false;
88927 +       return true;
88930 +/*helper for attr_collapse_range, which is helper for fallocate(collapse_range)*/
88931 +bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
88933 +       size_t index, eat;
88934 +       struct ntfs_run *r, *e, *eat_start, *eat_end;
88935 +       CLST end;
88937 +       if (WARN_ON(!run_lookup(run, vcn, &index)))
88938 +               return true; /* should never be here */
88940 +       e = run->runs + run->count;
88941 +       r = run->runs + index;
88942 +       end = vcn + len;
88944 +       if (vcn > r->vcn) {
88945 +               if (r->vcn + r->len <= end) {
88946 +                       /* collapse tail of run */
88947 +                       r->len = vcn - r->vcn;
88948 +               } else if (r->lcn == SPARSE_LCN) {
88949 +                       /* collapse a middle part of sparsed run */
88950 +                       r->len -= len;
88951 +               } else {
88952 +                       /* collapse a middle part of normal run, split */
88953 +                       if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
88954 +                               return false;
88955 +                       return run_collapse_range(run, vcn, len);
88956 +               }
88958 +               r += 1;
88959 +       }
88961 +       eat_start = r;
88962 +       eat_end = r;
88964 +       for (; r < e; r++) {
88965 +               CLST d;
88967 +               if (r->vcn >= end) {
88968 +                       r->vcn -= len;
88969 +                       continue;
88970 +               }
88972 +               if (r->vcn + r->len <= end) {
88973 +                       /* eat this run */
88974 +                       eat_end = r + 1;
88975 +                       continue;
88976 +               }
88978 +               d = end - r->vcn;
88979 +               if (r->lcn != SPARSE_LCN)
88980 +                       r->lcn += d;
88981 +               r->len -= d;
88982 +               r->vcn -= len - d;
88983 +       }
88985 +       eat = eat_end - eat_start;
88986 +       memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r));
88987 +       run->count -= eat;
88989 +       return true;
88993 + * run_get_entry
88994 + *
88995 + * returns index-th mapped region
88996 + */
88997 +bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
88998 +                  CLST *lcn, CLST *len)
89000 +       const struct ntfs_run *r;
89002 +       if (index >= run->count)
89003 +               return false;
89005 +       r = run->runs + index;
89007 +       if (!r->len)
89008 +               return false;
89010 +       if (vcn)
89011 +               *vcn = r->vcn;
89012 +       if (lcn)
89013 +               *lcn = r->lcn;
89014 +       if (len)
89015 +               *len = r->len;
89016 +       return true;
89020 + * run_packed_size
89021 + *
89022 + * calculates the size of packed int64
89023 + */
89024 +#ifdef __BIG_ENDIAN
89025 +static inline int run_packed_size(const s64 n)
89027 +       const u8 *p = (const u8 *)&n + sizeof(n) - 1;
89029 +       if (n >= 0) {
89030 +               if (p[-7] || p[-6] || p[-5] || p[-4])
89031 +                       p -= 4;
89032 +               if (p[-3] || p[-2])
89033 +                       p -= 2;
89034 +               if (p[-1])
89035 +                       p -= 1;
89036 +               if (p[0] & 0x80)
89037 +                       p -= 1;
89038 +       } else {
89039 +               if (p[-7] != 0xff || p[-6] != 0xff || p[-5] != 0xff ||
89040 +                   p[-4] != 0xff)
89041 +                       p -= 4;
89042 +               if (p[-3] != 0xff || p[-2] != 0xff)
89043 +                       p -= 2;
89044 +               if (p[-1] != 0xff)
89045 +                       p -= 1;
89046 +               if (!(p[0] & 0x80))
89047 +                       p -= 1;
89048 +       }
89049 +       return (const u8 *)&n + sizeof(n) - p;
89052 +/* full trusted function. It does not check 'size' for errors */
89053 +static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
89055 +       const u8 *p = (u8 *)&v;
89057 +       switch (size) {
89058 +       case 8:
89059 +               run_buf[7] = p[0];
89060 +               fallthrough;
89061 +       case 7:
89062 +               run_buf[6] = p[1];
89063 +               fallthrough;
89064 +       case 6:
89065 +               run_buf[5] = p[2];
89066 +               fallthrough;
89067 +       case 5:
89068 +               run_buf[4] = p[3];
89069 +               fallthrough;
89070 +       case 4:
89071 +               run_buf[3] = p[4];
89072 +               fallthrough;
89073 +       case 3:
89074 +               run_buf[2] = p[5];
89075 +               fallthrough;
89076 +       case 2:
89077 +               run_buf[1] = p[6];
89078 +               fallthrough;
89079 +       case 1:
89080 +               run_buf[0] = p[7];
89081 +       }
89084 +/* full trusted function. It does not check 'size' for errors */
89085 +static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
89087 +       u8 *p = (u8 *)&v;
89089 +       switch (size) {
89090 +       case 8:
89091 +               p[0] = run_buf[7];
89092 +               fallthrough;
89093 +       case 7:
89094 +               p[1] = run_buf[6];
89095 +               fallthrough;
89096 +       case 6:
89097 +               p[2] = run_buf[5];
89098 +               fallthrough;
89099 +       case 5:
89100 +               p[3] = run_buf[4];
89101 +               fallthrough;
89102 +       case 4:
89103 +               p[4] = run_buf[3];
89104 +               fallthrough;
89105 +       case 3:
89106 +               p[5] = run_buf[2];
89107 +               fallthrough;
89108 +       case 2:
89109 +               p[6] = run_buf[1];
89110 +               fallthrough;
89111 +       case 1:
89112 +               p[7] = run_buf[0];
89113 +       }
89114 +       return v;
89117 +#else
89119 +static inline int run_packed_size(const s64 n)
89121 +       const u8 *p = (const u8 *)&n;
89123 +       if (n >= 0) {
89124 +               if (p[7] || p[6] || p[5] || p[4])
89125 +                       p += 4;
89126 +               if (p[3] || p[2])
89127 +                       p += 2;
89128 +               if (p[1])
89129 +                       p += 1;
89130 +               if (p[0] & 0x80)
89131 +                       p += 1;
89132 +       } else {
89133 +               if (p[7] != 0xff || p[6] != 0xff || p[5] != 0xff ||
89134 +                   p[4] != 0xff)
89135 +                       p += 4;
89136 +               if (p[3] != 0xff || p[2] != 0xff)
89137 +                       p += 2;
89138 +               if (p[1] != 0xff)
89139 +                       p += 1;
89140 +               if (!(p[0] & 0x80))
89141 +                       p += 1;
89142 +       }
89144 +       return 1 + p - (const u8 *)&n;
89147 +/* full trusted function. It does not check 'size' for errors */
89148 +static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
89150 +       const u8 *p = (u8 *)&v;
89152 +       /* memcpy( run_buf, &v, size); is it faster? */
89153 +       switch (size) {
89154 +       case 8:
89155 +               run_buf[7] = p[7];
89156 +               fallthrough;
89157 +       case 7:
89158 +               run_buf[6] = p[6];
89159 +               fallthrough;
89160 +       case 6:
89161 +               run_buf[5] = p[5];
89162 +               fallthrough;
89163 +       case 5:
89164 +               run_buf[4] = p[4];
89165 +               fallthrough;
89166 +       case 4:
89167 +               run_buf[3] = p[3];
89168 +               fallthrough;
89169 +       case 3:
89170 +               run_buf[2] = p[2];
89171 +               fallthrough;
89172 +       case 2:
89173 +               run_buf[1] = p[1];
89174 +               fallthrough;
89175 +       case 1:
89176 +               run_buf[0] = p[0];
89177 +       }
89180 +/* full trusted function. It does not check 'size' for errors */
89181 +static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
89183 +       u8 *p = (u8 *)&v;
89185 +       /* memcpy( &v, run_buf, size); is it faster? */
89186 +       switch (size) {
89187 +       case 8:
89188 +               p[7] = run_buf[7];
89189 +               fallthrough;
89190 +       case 7:
89191 +               p[6] = run_buf[6];
89192 +               fallthrough;
89193 +       case 6:
89194 +               p[5] = run_buf[5];
89195 +               fallthrough;
89196 +       case 5:
89197 +               p[4] = run_buf[4];
89198 +               fallthrough;
89199 +       case 4:
89200 +               p[3] = run_buf[3];
89201 +               fallthrough;
89202 +       case 3:
89203 +               p[2] = run_buf[2];
89204 +               fallthrough;
89205 +       case 2:
89206 +               p[1] = run_buf[1];
89207 +               fallthrough;
89208 +       case 1:
89209 +               p[0] = run_buf[0];
89210 +       }
89211 +       return v;
89213 +#endif
89216 + * run_pack
89217 + *
89218 + * packs runs into buffer
89219 + * packed_vcns - how much runs we have packed
89220 + * packed_size - how much bytes we have used run_buf
89221 + */
89222 +int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
89223 +            u32 run_buf_size, CLST *packed_vcns)
89225 +       CLST next_vcn, vcn, lcn;
89226 +       CLST prev_lcn = 0;
89227 +       CLST evcn1 = svcn + len;
89228 +       int packed_size = 0;
89229 +       size_t i;
89230 +       bool ok;
89231 +       s64 dlcn;
89232 +       int offset_size, size_size, tmp;
89234 +       next_vcn = vcn = svcn;
89236 +       *packed_vcns = 0;
89238 +       if (!len)
89239 +               goto out;
89241 +       ok = run_lookup_entry(run, vcn, &lcn, &len, &i);
89243 +       if (!ok)
89244 +               goto error;
89246 +       if (next_vcn != vcn)
89247 +               goto error;
89249 +       for (;;) {
89250 +               next_vcn = vcn + len;
89251 +               if (next_vcn > evcn1)
89252 +                       len = evcn1 - vcn;
89254 +               /* how much bytes required to pack len */
89255 +               size_size = run_packed_size(len);
89257 +               /* offset_size - how much bytes is packed dlcn */
89258 +               if (lcn == SPARSE_LCN) {
89259 +                       offset_size = 0;
89260 +                       dlcn = 0;
89261 +               } else {
89262 +                       /* NOTE: lcn can be less than prev_lcn! */
89263 +                       dlcn = (s64)lcn - prev_lcn;
89264 +                       offset_size = run_packed_size(dlcn);
89265 +                       prev_lcn = lcn;
89266 +               }
89268 +               tmp = run_buf_size - packed_size - 2 - offset_size;
89269 +               if (tmp <= 0)
89270 +                       goto out;
89272 +               /* can we store this entire run */
89273 +               if (tmp < size_size)
89274 +                       goto out;
89276 +               if (run_buf) {
89277 +                       /* pack run header */
89278 +                       run_buf[0] = ((u8)(size_size | (offset_size << 4)));
89279 +                       run_buf += 1;
89281 +                       /* Pack the length of run */
89282 +                       run_pack_s64(run_buf, size_size, len);
89284 +                       run_buf += size_size;
89285 +                       /* Pack the offset from previous lcn */
89286 +                       run_pack_s64(run_buf, offset_size, dlcn);
89287 +                       run_buf += offset_size;
89288 +               }
89290 +               packed_size += 1 + offset_size + size_size;
89291 +               *packed_vcns += len;
89293 +               if (packed_size + 1 >= run_buf_size || next_vcn >= evcn1)
89294 +                       goto out;
89296 +               ok = run_get_entry(run, ++i, &vcn, &lcn, &len);
89297 +               if (!ok)
89298 +                       goto error;
89300 +               if (next_vcn != vcn)
89301 +                       goto error;
89302 +       }
89304 +out:
89305 +       /* Store last zero */
89306 +       if (run_buf)
89307 +               run_buf[0] = 0;
89309 +       return packed_size + 1;
89311 +error:
89312 +       return -EOPNOTSUPP;
89316 + * run_unpack
89317 + *
89318 + * unpacks packed runs from "run_buf"
89319 + * returns error, if negative, or real used bytes
89320 + */
89321 +int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
89322 +              CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
89323 +              u32 run_buf_size)
89325 +       u64 prev_lcn, vcn64, lcn, next_vcn;
89326 +       const u8 *run_last, *run_0;
89327 +       bool is_mft = ino == MFT_REC_MFT;
89329 +       /* Check for empty */
89330 +       if (evcn + 1 == svcn)
89331 +               return 0;
89333 +       if (evcn < svcn)
89334 +               return -EINVAL;
89336 +       run_0 = run_buf;
89337 +       run_last = run_buf + run_buf_size;
89338 +       prev_lcn = 0;
89339 +       vcn64 = svcn;
89341 +       /* Read all runs the chain */
89342 +       /* size_size - how much bytes is packed len */
89343 +       while (run_buf < run_last) {
89344 +               /* size_size - how much bytes is packed len */
89345 +               u8 size_size = *run_buf & 0xF;
89346 +               /* offset_size - how much bytes is packed dlcn */
89347 +               u8 offset_size = *run_buf++ >> 4;
89348 +               u64 len;
89350 +               if (!size_size)
89351 +                       break;
89353 +               /*
89354 +                * Unpack runs.
89355 +                * NOTE: runs are stored little endian order
89356 +                * "len" is unsigned value, "dlcn" is signed
89357 +                * Large positive number requires to store 5 bytes
89358 +                * e.g.: 05 FF 7E FF FF 00 00 00
89359 +                */
89360 +               if (size_size > 8)
89361 +                       return -EINVAL;
89363 +               len = run_unpack_s64(run_buf, size_size, 0);
89364 +               /* skip size_size */
89365 +               run_buf += size_size;
89367 +               if (!len)
89368 +                       return -EINVAL;
89370 +               if (!offset_size)
89371 +                       lcn = SPARSE_LCN64;
89372 +               else if (offset_size <= 8) {
89373 +                       s64 dlcn;
89375 +                       /* initial value of dlcn is -1 or 0 */
89376 +                       dlcn = (run_buf[offset_size - 1] & 0x80) ? (s64)-1 : 0;
89377 +                       dlcn = run_unpack_s64(run_buf, offset_size, dlcn);
89378 +                       /* skip offset_size */
89379 +                       run_buf += offset_size;
89381 +                       if (!dlcn)
89382 +                               return -EINVAL;
89383 +                       lcn = prev_lcn + dlcn;
89384 +                       prev_lcn = lcn;
89385 +               } else
89386 +                       return -EINVAL;
89388 +               next_vcn = vcn64 + len;
89389 +               /* check boundary */
89390 +               if (next_vcn > evcn + 1)
89391 +                       return -EINVAL;
89393 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
89394 +               if (next_vcn > 0x100000000ull || (lcn + len) > 0x100000000ull) {
89395 +                       ntfs_err(
89396 +                               sbi->sb,
89397 +                               "This driver is compiled whitout CONFIG_NTFS3_64BIT_CLUSTER (like windows driver).\n"
89398 +                               "Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx.\n"
89399 +                               "Activate CONFIG_NTFS3_64BIT_CLUSTER to process this case",
89400 +                               vcn64, lcn, len);
89401 +                       return -EOPNOTSUPP;
89402 +               }
89403 +#endif
89404 +               if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) {
89405 +                       /* lcn range is out of volume */
89406 +                       return -EINVAL;
89407 +               }
89409 +               if (!run)
89410 +                       ; /* called from check_attr(fslog.c) to check run */
89411 +               else if (run == RUN_DEALLOCATE) {
89412 +                       /* called from ni_delete_all to free clusters without storing in run */
89413 +                       if (lcn != SPARSE_LCN64)
89414 +                               mark_as_free_ex(sbi, lcn, len, true);
89415 +               } else if (vcn64 >= vcn) {
89416 +                       if (!run_add_entry(run, vcn64, lcn, len, is_mft))
89417 +                               return -ENOMEM;
89418 +               } else if (next_vcn > vcn) {
89419 +                       u64 dlen = vcn - vcn64;
89421 +                       if (!run_add_entry(run, vcn, lcn + dlen, len - dlen,
89422 +                                          is_mft))
89423 +                               return -ENOMEM;
89424 +               }
89426 +               vcn64 = next_vcn;
89427 +       }
89429 +       if (vcn64 != evcn + 1) {
89430 +               /* not expected length of unpacked runs */
89431 +               return -EINVAL;
89432 +       }
89434 +       return run_buf - run_0;
89437 +#ifdef NTFS3_CHECK_FREE_CLST
89439 + * run_unpack_ex
89440 + *
89441 + * unpacks packed runs from "run_buf"
89442 + * checks unpacked runs to be used in bitmap
89443 + * returns error, if negative, or real used bytes
89444 + */
89445 +int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
89446 +                 CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
89447 +                 u32 run_buf_size)
89449 +       int ret, err;
89450 +       CLST next_vcn, lcn, len;
89451 +       size_t index;
89452 +       bool ok;
89453 +       struct wnd_bitmap *wnd;
89455 +       ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size);
89456 +       if (ret <= 0)
89457 +               return ret;
89459 +       if (!sbi->used.bitmap.sb || !run || run == RUN_DEALLOCATE)
89460 +               return ret;
89462 +       if (ino == MFT_REC_BADCLUST)
89463 +               return ret;
89465 +       next_vcn = vcn = svcn;
89466 +       wnd = &sbi->used.bitmap;
89468 +       for (ok = run_lookup_entry(run, vcn, &lcn, &len, &index);
89469 +            next_vcn <= evcn;
89470 +            ok = run_get_entry(run, ++index, &vcn, &lcn, &len)) {
89471 +               if (!ok || next_vcn != vcn)
89472 +                       return -EINVAL;
89474 +               next_vcn = vcn + len;
89476 +               if (lcn == SPARSE_LCN)
89477 +                       continue;
89479 +               if (sbi->flags & NTFS_FLAGS_NEED_REPLAY)
89480 +                       continue;
89482 +               down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
89483 +               /* Check for free blocks */
89484 +               ok = wnd_is_used(wnd, lcn, len);
89485 +               up_read(&wnd->rw_lock);
89486 +               if (ok)
89487 +                       continue;
89489 +               /* Looks like volume is corrupted */
89490 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
89492 +               if (down_write_trylock(&wnd->rw_lock)) {
89493 +                       /* mark all zero bits as used in range [lcn, lcn+len) */
89494 +                       CLST i, lcn_f = 0, len_f = 0;
89496 +                       err = 0;
89497 +                       for (i = 0; i < len; i++) {
89498 +                               if (wnd_is_free(wnd, lcn + i, 1)) {
89499 +                                       if (!len_f)
89500 +                                               lcn_f = lcn + i;
89501 +                                       len_f += 1;
89502 +                               } else if (len_f) {
89503 +                                       err = wnd_set_used(wnd, lcn_f, len_f);
89504 +                                       len_f = 0;
89505 +                                       if (err)
89506 +                                               break;
89507 +                               }
89508 +                       }
89510 +                       if (len_f)
89511 +                               err = wnd_set_used(wnd, lcn_f, len_f);
89513 +                       up_write(&wnd->rw_lock);
89514 +                       if (err)
89515 +                               return err;
89516 +               }
89517 +       }
89519 +       return ret;
89521 +#endif
89524 + * run_get_highest_vcn
89525 + *
89526 + * returns the highest vcn from a mapping pairs array
89527 + * it used while replaying log file
89528 + */
89529 +int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
89531 +       u64 vcn64 = vcn;
89532 +       u8 size_size;
89534 +       while ((size_size = *run_buf & 0xF)) {
89535 +               u8 offset_size = *run_buf++ >> 4;
89536 +               u64 len;
89538 +               if (size_size > 8 || offset_size > 8)
89539 +                       return -EINVAL;
89541 +               len = run_unpack_s64(run_buf, size_size, 0);
89542 +               if (!len)
89543 +                       return -EINVAL;
89545 +               run_buf += size_size + offset_size;
89546 +               vcn64 += len;
89548 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
89549 +               if (vcn64 > 0x100000000ull)
89550 +                       return -EINVAL;
89551 +#endif
89552 +       }
89554 +       *highest_vcn = vcn64 - 1;
89555 +       return 0;
89557 diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
89558 new file mode 100644
89559 index 000000000000..c563431248bf
89560 --- /dev/null
89561 +++ b/fs/ntfs3/super.c
89562 @@ -0,0 +1,1500 @@
89563 +// SPDX-License-Identifier: GPL-2.0
89565 + *
89566 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
89567 + *
89568 + *
89569 + *                 terminology
89570 + *
89571 + * cluster - allocation unit     - 512,1K,2K,4K,...,2M
89572 + * vcn - virtual cluster number  - offset inside the file in clusters
89573 + * vbo - virtual byte offset     - offset inside the file in bytes
89574 + * lcn - logical cluster number  - 0 based cluster in clusters heap
89575 + * lbo - logical byte offset     - absolute position inside volume
89576 + * run - maps vcn to lcn         - stored in attributes in packed form
89577 + * attr - attribute segment      - std/name/data etc records inside MFT
89578 + * mi  - mft inode               - one MFT record(usually 1024 bytes or 4K), consists of attributes
89579 + * ni  - ntfs inode              - extends linux inode. consists of one or more mft inodes
89580 + * index - unit inside directory - 2K, 4K, <=page size, does not depend on cluster size
89581 + *
89582 + * TODO: Implement
89583 + * https://docs.microsoft.com/en-us/windows/wsl/file-permissions
89584 + */
89586 +#include <linux/backing-dev.h>
89587 +#include <linux/blkdev.h>
89588 +#include <linux/buffer_head.h>
89589 +#include <linux/exportfs.h>
89590 +#include <linux/fs.h>
89591 +#include <linux/iversion.h>
89592 +#include <linux/module.h>
89593 +#include <linux/nls.h>
89594 +#include <linux/parser.h>
89595 +#include <linux/seq_file.h>
89596 +#include <linux/statfs.h>
89598 +#include "debug.h"
89599 +#include "ntfs.h"
89600 +#include "ntfs_fs.h"
89601 +#ifdef CONFIG_NTFS3_LZX_XPRESS
89602 +#include "lib/lib.h"
89603 +#endif
89605 +#ifdef CONFIG_PRINTK
89607 + * Trace warnings/notices/errors
89608 + * Thanks Joe Perches <joe@perches.com> for implementation
89609 + */
89610 +void ntfs_printk(const struct super_block *sb, const char *fmt, ...)
89612 +       struct va_format vaf;
89613 +       va_list args;
89614 +       int level;
89615 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
89617 +       /*should we use different ratelimits for warnings/notices/errors? */
89618 +       if (!___ratelimit(&sbi->msg_ratelimit, "ntfs3"))
89619 +               return;
89621 +       va_start(args, fmt);
89623 +       level = printk_get_level(fmt);
89624 +       vaf.fmt = printk_skip_level(fmt);
89625 +       vaf.va = &args;
89626 +       printk("%c%cntfs3: %s: %pV\n", KERN_SOH_ASCII, level, sb->s_id, &vaf);
89628 +       va_end(args);
89631 +static char s_name_buf[512];
89632 +static atomic_t s_name_buf_cnt = ATOMIC_INIT(1); // 1 means 'free s_name_buf'
89634 +/* print warnings/notices/errors about inode using name or inode number */
89635 +void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
89637 +       struct super_block *sb = inode->i_sb;
89638 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
89639 +       char *name;
89640 +       va_list args;
89641 +       struct va_format vaf;
89642 +       int level;
89644 +       if (!___ratelimit(&sbi->msg_ratelimit, "ntfs3"))
89645 +               return;
89647 +       /* use static allocated buffer, if possible */
89648 +       name = atomic_dec_and_test(&s_name_buf_cnt)
89649 +                      ? s_name_buf
89650 +                      : kmalloc(sizeof(s_name_buf), GFP_NOFS);
89652 +       if (name) {
89653 +               struct dentry *de = d_find_alias(inode);
89654 +               const u32 name_len = ARRAY_SIZE(s_name_buf) - 1;
89656 +               if (de) {
89657 +                       spin_lock(&de->d_lock);
89658 +                       snprintf(name, name_len, " \"%s\"", de->d_name.name);
89659 +                       spin_unlock(&de->d_lock);
89660 +                       name[name_len] = 0; /* to be sure*/
89661 +               } else {
89662 +                       name[0] = 0;
89663 +               }
89664 +               dput(de); /* cocci warns if placed in branch "if (de)" */
89665 +       }
89667 +       va_start(args, fmt);
89669 +       level = printk_get_level(fmt);
89670 +       vaf.fmt = printk_skip_level(fmt);
89671 +       vaf.va = &args;
89673 +       printk("%c%cntfs3: %s: ino=%lx,%s %pV\n", KERN_SOH_ASCII, level,
89674 +              sb->s_id, inode->i_ino, name ? name : "", &vaf);
89676 +       va_end(args);
89678 +       atomic_inc(&s_name_buf_cnt);
89679 +       if (name != s_name_buf)
89680 +               kfree(name);
89682 +#endif
89685 + * Shared memory struct.
89686 + *
89687 + * on-disk ntfs's upcase table is created by ntfs formater
89688 + * 'upcase' table is 128K bytes of memory
89689 + * we should read it into memory when mounting
89690 + * Several ntfs volumes likely use the same 'upcase' table
89691 + * It is good idea to share in-memory 'upcase' table between different volumes
89692 + * Unfortunately winxp/vista/win7 use different upcase tables
89693 + */
89694 +static DEFINE_SPINLOCK(s_shared_lock);
89696 +static struct {
89697 +       void *ptr;
89698 +       u32 len;
89699 +       int cnt;
89700 +} s_shared[8];
89703 + * ntfs_set_shared
89704 + *
89705 + * Returns 'ptr' if pointer was saved in shared memory
89706 + * Returns NULL if pointer was not shared
89707 + */
89708 +void *ntfs_set_shared(void *ptr, u32 bytes)
89710 +       void *ret = NULL;
89711 +       int i, j = -1;
89713 +       spin_lock(&s_shared_lock);
89714 +       for (i = 0; i < ARRAY_SIZE(s_shared); i++) {
89715 +               if (!s_shared[i].cnt) {
89716 +                       j = i;
89717 +               } else if (bytes == s_shared[i].len &&
89718 +                          !memcmp(s_shared[i].ptr, ptr, bytes)) {
89719 +                       s_shared[i].cnt += 1;
89720 +                       ret = s_shared[i].ptr;
89721 +                       break;
89722 +               }
89723 +       }
89725 +       if (!ret && j != -1) {
89726 +               s_shared[j].ptr = ptr;
89727 +               s_shared[j].len = bytes;
89728 +               s_shared[j].cnt = 1;
89729 +               ret = ptr;
89730 +       }
89731 +       spin_unlock(&s_shared_lock);
89733 +       return ret;
89737 + * ntfs_put_shared
89738 + *
89739 + * Returns 'ptr' if pointer is not shared anymore
89740 + * Returns NULL if pointer is still shared
89741 + */
89742 +void *ntfs_put_shared(void *ptr)
89744 +       void *ret = ptr;
89745 +       int i;
89747 +       spin_lock(&s_shared_lock);
89748 +       for (i = 0; i < ARRAY_SIZE(s_shared); i++) {
89749 +               if (s_shared[i].cnt && s_shared[i].ptr == ptr) {
89750 +                       if (--s_shared[i].cnt)
89751 +                               ret = NULL;
89752 +                       break;
89753 +               }
89754 +       }
89755 +       spin_unlock(&s_shared_lock);
89757 +       return ret;
89760 +static inline void clear_mount_options(struct ntfs_mount_options *options)
89762 +       unload_nls(options->nls);
89765 +enum Opt {
89766 +       Opt_uid,
89767 +       Opt_gid,
89768 +       Opt_umask,
89769 +       Opt_dmask,
89770 +       Opt_fmask,
89771 +       Opt_immutable,
89772 +       Opt_discard,
89773 +       Opt_force,
89774 +       Opt_sparse,
89775 +       Opt_nohidden,
89776 +       Opt_showmeta,
89777 +       Opt_acl,
89778 +       Opt_noatime,
89779 +       Opt_nls,
89780 +       Opt_prealloc,
89781 +       Opt_no_acs_rules,
89782 +       Opt_err,
89785 +static const match_table_t ntfs_tokens = {
89786 +       { Opt_uid, "uid=%u" },
89787 +       { Opt_gid, "gid=%u" },
89788 +       { Opt_umask, "umask=%o" },
89789 +       { Opt_dmask, "dmask=%o" },
89790 +       { Opt_fmask, "fmask=%o" },
89791 +       { Opt_immutable, "sys_immutable" },
89792 +       { Opt_discard, "discard" },
89793 +       { Opt_force, "force" },
89794 +       { Opt_sparse, "sparse" },
89795 +       { Opt_nohidden, "nohidden" },
89796 +       { Opt_acl, "acl" },
89797 +       { Opt_noatime, "noatime" },
89798 +       { Opt_showmeta, "showmeta" },
89799 +       { Opt_nls, "nls=%s" },
89800 +       { Opt_prealloc, "prealloc" },
89801 +       { Opt_no_acs_rules, "no_acs_rules" },
89802 +       { Opt_err, NULL },
89805 +static noinline int ntfs_parse_options(struct super_block *sb, char *options,
89806 +                                      int silent,
89807 +                                      struct ntfs_mount_options *opts)
89809 +       char *p;
89810 +       substring_t args[MAX_OPT_ARGS];
89811 +       int option;
89812 +       char nls_name[30];
89813 +       struct nls_table *nls;
89815 +       opts->fs_uid = current_uid();
89816 +       opts->fs_gid = current_gid();
89817 +       opts->fs_fmask_inv = opts->fs_dmask_inv = ~current_umask();
89818 +       nls_name[0] = 0;
89820 +       if (!options)
89821 +               goto out;
89823 +       while ((p = strsep(&options, ","))) {
89824 +               int token;
89826 +               if (!*p)
89827 +                       continue;
89829 +               token = match_token(p, ntfs_tokens, args);
89830 +               switch (token) {
89831 +               case Opt_immutable:
89832 +                       opts->sys_immutable = 1;
89833 +                       break;
89834 +               case Opt_uid:
89835 +                       if (match_int(&args[0], &option))
89836 +                               return -EINVAL;
89837 +                       opts->fs_uid = make_kuid(current_user_ns(), option);
89838 +                       if (!uid_valid(opts->fs_uid))
89839 +                               return -EINVAL;
89840 +                       opts->uid = 1;
89841 +                       break;
89842 +               case Opt_gid:
89843 +                       if (match_int(&args[0], &option))
89844 +                               return -EINVAL;
89845 +                       opts->fs_gid = make_kgid(current_user_ns(), option);
89846 +                       if (!gid_valid(opts->fs_gid))
89847 +                               return -EINVAL;
89848 +                       opts->gid = 1;
89849 +                       break;
89850 +               case Opt_umask:
89851 +                       if (match_octal(&args[0], &option))
89852 +                               return -EINVAL;
89853 +                       opts->fs_fmask_inv = opts->fs_dmask_inv = ~option;
89854 +                       opts->fmask = opts->dmask = 1;
89855 +                       break;
89856 +               case Opt_dmask:
89857 +                       if (match_octal(&args[0], &option))
89858 +                               return -EINVAL;
89859 +                       opts->fs_dmask_inv = ~option;
89860 +                       opts->dmask = 1;
89861 +                       break;
89862 +               case Opt_fmask:
89863 +                       if (match_octal(&args[0], &option))
89864 +                               return -EINVAL;
89865 +                       opts->fs_fmask_inv = ~option;
89866 +                       opts->fmask = 1;
89867 +                       break;
89868 +               case Opt_discard:
89869 +                       opts->discard = 1;
89870 +                       break;
89871 +               case Opt_force:
89872 +                       opts->force = 1;
89873 +                       break;
89874 +               case Opt_sparse:
89875 +                       opts->sparse = 1;
89876 +                       break;
89877 +               case Opt_nohidden:
89878 +                       opts->nohidden = 1;
89879 +                       break;
89880 +               case Opt_acl:
89881 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
89882 +                       sb->s_flags |= SB_POSIXACL;
89883 +                       break;
89884 +#else
89885 +                       ntfs_err(sb, "support for ACL not compiled in!");
89886 +                       return -EINVAL;
89887 +#endif
89888 +               case Opt_noatime:
89889 +                       sb->s_flags |= SB_NOATIME;
89890 +                       break;
89891 +               case Opt_showmeta:
89892 +                       opts->showmeta = 1;
89893 +                       break;
89894 +               case Opt_nls:
89895 +                       match_strlcpy(nls_name, &args[0], sizeof(nls_name));
89896 +                       break;
89897 +               case Opt_prealloc:
89898 +                       opts->prealloc = 1;
89899 +                       break;
89900 +               case Opt_no_acs_rules:
89901 +                       opts->no_acs_rules = 1;
89902 +                       break;
89903 +               default:
89904 +                       if (!silent)
89905 +                               ntfs_err(
89906 +                                       sb,
89907 +                                       "Unrecognized mount option \"%s\" or missing value",
89908 +                                       p);
89909 +                       //return -EINVAL;
89910 +               }
89911 +       }
89913 +out:
89914 +       if (!strcmp(nls_name[0] ? nls_name : CONFIG_NLS_DEFAULT, "utf8")) {
89915 +               /* For UTF-8 use utf16s_to_utf8s/utf8s_to_utf16s instead of nls */
89916 +               nls = NULL;
89917 +       } else if (nls_name[0]) {
89918 +               nls = load_nls(nls_name);
89919 +               if (!nls) {
89920 +                       ntfs_err(sb, "failed to load \"%s\"", nls_name);
89921 +                       return -EINVAL;
89922 +               }
89923 +       } else {
89924 +               nls = load_nls_default();
89925 +               if (!nls) {
89926 +                       ntfs_err(sb, "failed to load default nls");
89927 +                       return -EINVAL;
89928 +               }
89929 +       }
89930 +       opts->nls = nls;
89932 +       return 0;
89935 +static int ntfs_remount(struct super_block *sb, int *flags, char *data)
89937 +       int err, ro_rw;
89938 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
89939 +       struct ntfs_mount_options old_opts;
89940 +       char *orig_data = kstrdup(data, GFP_KERNEL);
89942 +       if (data && !orig_data)
89943 +               return -ENOMEM;
89945 +       /* Store  original options */
89946 +       memcpy(&old_opts, &sbi->options, sizeof(old_opts));
89947 +       clear_mount_options(&sbi->options);
89948 +       memset(&sbi->options, 0, sizeof(sbi->options));
89950 +       err = ntfs_parse_options(sb, data, 0, &sbi->options);
89951 +       if (err)
89952 +               goto restore_opts;
89954 +       ro_rw = sb_rdonly(sb) && !(*flags & SB_RDONLY);
89955 +       if (ro_rw && (sbi->flags & NTFS_FLAGS_NEED_REPLAY)) {
89956 +               ntfs_warn(
89957 +                       sb,
89958 +                       "Couldn't remount rw because journal is not replayed. Please umount/remount instead\n");
89959 +               err = -EINVAL;
89960 +               goto restore_opts;
89961 +       }
89963 +       sync_filesystem(sb);
89965 +       if (ro_rw && (sbi->volume.flags & VOLUME_FLAG_DIRTY) &&
89966 +           !sbi->options.force) {
89967 +               ntfs_warn(sb, "volume is dirty and \"force\" flag is not set!");
89968 +               err = -EINVAL;
89969 +               goto restore_opts;
89970 +       }
89972 +       clear_mount_options(&old_opts);
89974 +       *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME) |
89975 +                SB_NODIRATIME | SB_NOATIME;
89976 +       ntfs_info(sb, "re-mounted. Opts: %s", orig_data);
89977 +       err = 0;
89978 +       goto out;
89980 +restore_opts:
89981 +       clear_mount_options(&sbi->options);
89982 +       memcpy(&sbi->options, &old_opts, sizeof(old_opts));
89984 +out:
89985 +       kfree(orig_data);
89986 +       return err;
89989 +static struct kmem_cache *ntfs_inode_cachep;
89991 +static struct inode *ntfs_alloc_inode(struct super_block *sb)
89993 +       struct ntfs_inode *ni = kmem_cache_alloc(ntfs_inode_cachep, GFP_NOFS);
89995 +       if (!ni)
89996 +               return NULL;
89998 +       memset(ni, 0, offsetof(struct ntfs_inode, vfs_inode));
90000 +       mutex_init(&ni->ni_lock);
90002 +       return &ni->vfs_inode;
90005 +static void ntfs_i_callback(struct rcu_head *head)
90007 +       struct inode *inode = container_of(head, struct inode, i_rcu);
90008 +       struct ntfs_inode *ni = ntfs_i(inode);
90010 +       mutex_destroy(&ni->ni_lock);
90012 +       kmem_cache_free(ntfs_inode_cachep, ni);
90015 +static void ntfs_destroy_inode(struct inode *inode)
90017 +       call_rcu(&inode->i_rcu, ntfs_i_callback);
90020 +static void init_once(void *foo)
90022 +       struct ntfs_inode *ni = foo;
90024 +       inode_init_once(&ni->vfs_inode);
90027 +/* noinline to reduce binary size*/
90028 +static noinline void put_ntfs(struct ntfs_sb_info *sbi)
90030 +       ntfs_free(sbi->new_rec);
90031 +       ntfs_vfree(ntfs_put_shared(sbi->upcase));
90032 +       ntfs_free(sbi->def_table);
90034 +       wnd_close(&sbi->mft.bitmap);
90035 +       wnd_close(&sbi->used.bitmap);
90037 +       if (sbi->mft.ni)
90038 +               iput(&sbi->mft.ni->vfs_inode);
90040 +       if (sbi->security.ni)
90041 +               iput(&sbi->security.ni->vfs_inode);
90043 +       if (sbi->reparse.ni)
90044 +               iput(&sbi->reparse.ni->vfs_inode);
90046 +       if (sbi->objid.ni)
90047 +               iput(&sbi->objid.ni->vfs_inode);
90049 +       if (sbi->volume.ni)
90050 +               iput(&sbi->volume.ni->vfs_inode);
90052 +       ntfs_update_mftmirr(sbi, 0);
90054 +       indx_clear(&sbi->security.index_sii);
90055 +       indx_clear(&sbi->security.index_sdh);
90056 +       indx_clear(&sbi->reparse.index_r);
90057 +       indx_clear(&sbi->objid.index_o);
90058 +       ntfs_free(sbi->compress.lznt);
90059 +#ifdef CONFIG_NTFS3_LZX_XPRESS
90060 +       xpress_free_decompressor(sbi->compress.xpress);
90061 +       lzx_free_decompressor(sbi->compress.lzx);
90062 +#endif
90063 +       clear_mount_options(&sbi->options);
90065 +       ntfs_free(sbi);
90068 +static void ntfs_put_super(struct super_block *sb)
90070 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
90072 +       /*mark rw ntfs as clear, if possible*/
90073 +       ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
90075 +       put_ntfs(sbi);
90077 +       sync_blockdev(sb->s_bdev);
90080 +static int ntfs_statfs(struct dentry *dentry, struct kstatfs *buf)
90082 +       struct super_block *sb = dentry->d_sb;
90083 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
90084 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
90086 +       buf->f_type = sb->s_magic;
90087 +       buf->f_bsize = sbi->cluster_size;
90088 +       buf->f_blocks = wnd->nbits;
90090 +       buf->f_bfree = buf->f_bavail = wnd_zeroes(wnd);
90091 +       buf->f_fsid.val[0] = sbi->volume.ser_num;
90092 +       buf->f_fsid.val[1] = (sbi->volume.ser_num >> 32);
90093 +       buf->f_namelen = NTFS_NAME_LEN;
90095 +       return 0;
90098 +static int ntfs_show_options(struct seq_file *m, struct dentry *root)
90100 +       struct super_block *sb = root->d_sb;
90101 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
90102 +       struct ntfs_mount_options *opts = &sbi->options;
90103 +       struct user_namespace *user_ns = seq_user_ns(m);
90105 +       if (opts->uid)
90106 +               seq_printf(m, ",uid=%u",
90107 +                          from_kuid_munged(user_ns, opts->fs_uid));
90108 +       if (opts->gid)
90109 +               seq_printf(m, ",gid=%u",
90110 +                          from_kgid_munged(user_ns, opts->fs_gid));
90111 +       if (opts->fmask)
90112 +               seq_printf(m, ",fmask=%04o", ~opts->fs_fmask_inv);
90113 +       if (opts->dmask)
90114 +               seq_printf(m, ",dmask=%04o", ~opts->fs_dmask_inv);
90115 +       if (opts->nls)
90116 +               seq_printf(m, ",nls=%s", opts->nls->charset);
90117 +       else
90118 +               seq_puts(m, ",nls=utf8");
90119 +       if (opts->sys_immutable)
90120 +               seq_puts(m, ",sys_immutable");
90121 +       if (opts->discard)
90122 +               seq_puts(m, ",discard");
90123 +       if (opts->sparse)
90124 +               seq_puts(m, ",sparse");
90125 +       if (opts->showmeta)
90126 +               seq_puts(m, ",showmeta");
90127 +       if (opts->nohidden)
90128 +               seq_puts(m, ",nohidden");
90129 +       if (opts->force)
90130 +               seq_puts(m, ",force");
90131 +       if (opts->no_acs_rules)
90132 +               seq_puts(m, ",no_acs_rules");
90133 +       if (opts->prealloc)
90134 +               seq_puts(m, ",prealloc");
90135 +       if (sb->s_flags & SB_POSIXACL)
90136 +               seq_puts(m, ",acl");
90137 +       if (sb->s_flags & SB_NOATIME)
90138 +               seq_puts(m, ",noatime");
90140 +       return 0;
90143 +/*super_operations::sync_fs*/
90144 +static int ntfs_sync_fs(struct super_block *sb, int wait)
90146 +       int err = 0, err2;
90147 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
90148 +       struct ntfs_inode *ni;
90149 +       struct inode *inode;
90151 +       ni = sbi->security.ni;
90152 +       if (ni) {
90153 +               inode = &ni->vfs_inode;
90154 +               err2 = _ni_write_inode(inode, wait);
90155 +               if (err2 && !err)
90156 +                       err = err2;
90157 +       }
90159 +       ni = sbi->objid.ni;
90160 +       if (ni) {
90161 +               inode = &ni->vfs_inode;
90162 +               err2 = _ni_write_inode(inode, wait);
90163 +               if (err2 && !err)
90164 +                       err = err2;
90165 +       }
90167 +       ni = sbi->reparse.ni;
90168 +       if (ni) {
90169 +               inode = &ni->vfs_inode;
90170 +               err2 = _ni_write_inode(inode, wait);
90171 +               if (err2 && !err)
90172 +                       err = err2;
90173 +       }
90175 +       if (!err)
90176 +               ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
90178 +       ntfs_update_mftmirr(sbi, wait);
90180 +       return err;
90183 +static const struct super_operations ntfs_sops = {
90184 +       .alloc_inode = ntfs_alloc_inode,
90185 +       .destroy_inode = ntfs_destroy_inode,
90186 +       .evict_inode = ntfs_evict_inode,
90187 +       .put_super = ntfs_put_super,
90188 +       .statfs = ntfs_statfs,
90189 +       .show_options = ntfs_show_options,
90190 +       .sync_fs = ntfs_sync_fs,
90191 +       .remount_fs = ntfs_remount,
90192 +       .write_inode = ntfs3_write_inode,
90195 +static struct inode *ntfs_export_get_inode(struct super_block *sb, u64 ino,
90196 +                                          u32 generation)
90198 +       struct MFT_REF ref;
90199 +       struct inode *inode;
90201 +       ref.low = cpu_to_le32(ino);
90202 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
90203 +       ref.high = cpu_to_le16(ino >> 32);
90204 +#else
90205 +       ref.high = 0;
90206 +#endif
90207 +       ref.seq = cpu_to_le16(generation);
90209 +       inode = ntfs_iget5(sb, &ref, NULL);
90210 +       if (!IS_ERR(inode) && is_bad_inode(inode)) {
90211 +               iput(inode);
90212 +               inode = ERR_PTR(-ESTALE);
90213 +       }
90215 +       return inode;
90218 +static struct dentry *ntfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
90219 +                                       int fh_len, int fh_type)
90221 +       return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
90222 +                                   ntfs_export_get_inode);
90225 +static struct dentry *ntfs_fh_to_parent(struct super_block *sb, struct fid *fid,
90226 +                                       int fh_len, int fh_type)
90228 +       return generic_fh_to_parent(sb, fid, fh_len, fh_type,
90229 +                                   ntfs_export_get_inode);
90232 +/* TODO: == ntfs_sync_inode */
90233 +static int ntfs_nfs_commit_metadata(struct inode *inode)
90235 +       return _ni_write_inode(inode, 1);
90238 +static const struct export_operations ntfs_export_ops = {
90239 +       .fh_to_dentry = ntfs_fh_to_dentry,
90240 +       .fh_to_parent = ntfs_fh_to_parent,
90241 +       .get_parent = ntfs3_get_parent,
90242 +       .commit_metadata = ntfs_nfs_commit_metadata,
90245 +/* Returns Gb,Mb to print with "%u.%02u Gb" */
90246 +static u32 format_size_gb(const u64 bytes, u32 *mb)
90248 +       /* Do simple right 30 bit shift of 64 bit value */
90249 +       u64 kbytes = bytes >> 10;
90250 +       u32 kbytes32 = kbytes;
90252 +       *mb = (100 * (kbytes32 & 0xfffff) + 0x7ffff) >> 20;
90253 +       if (*mb >= 100)
90254 +               *mb = 99;
90256 +       return (kbytes32 >> 20) | (((u32)(kbytes >> 32)) << 12);
90259 +static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot)
90261 +       return boot->sectors_per_clusters <= 0x80
90262 +                      ? boot->sectors_per_clusters
90263 +                      : (1u << (0 - boot->sectors_per_clusters));
90266 +/* inits internal info from on-disk boot sector*/
90267 +static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
90268 +                              u64 dev_size)
90270 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
90271 +       int err;
90272 +       u32 mb, gb, boot_sector_size, sct_per_clst, record_size;
90273 +       u64 sectors, clusters, fs_size, mlcn, mlcn2;
90274 +       struct NTFS_BOOT *boot;
90275 +       struct buffer_head *bh;
90276 +       struct MFT_REC *rec;
90277 +       u16 fn, ao;
90279 +       sbi->volume.blocks = dev_size >> PAGE_SHIFT;
90281 +       bh = ntfs_bread(sb, 0);
90282 +       if (!bh)
90283 +               return -EIO;
90285 +       err = -EINVAL;
90286 +       boot = (struct NTFS_BOOT *)bh->b_data;
90288 +       if (memcmp(boot->system_id, "NTFS    ", sizeof("NTFS    ") - 1))
90289 +               goto out;
90291 +       /* 0x55AA is not mandaroty. Thanks Maxim Suhanov*/
90292 +       /*if (0x55 != boot->boot_magic[0] || 0xAA != boot->boot_magic[1])
90293 +        *      goto out;
90294 +        */
90296 +       boot_sector_size = (u32)boot->bytes_per_sector[1] << 8;
90297 +       if (boot->bytes_per_sector[0] || boot_sector_size < SECTOR_SIZE ||
90298 +           !is_power_of2(boot_sector_size)) {
90299 +               goto out;
90300 +       }
90302 +       /* cluster size: 512, 1K, 2K, 4K, ... 2M */
90303 +       sct_per_clst = true_sectors_per_clst(boot);
90304 +       if (!is_power_of2(sct_per_clst))
90305 +               goto out;
90307 +       mlcn = le64_to_cpu(boot->mft_clst);
90308 +       mlcn2 = le64_to_cpu(boot->mft2_clst);
90309 +       sectors = le64_to_cpu(boot->sectors_per_volume);
90311 +       if (mlcn * sct_per_clst >= sectors)
90312 +               goto out;
90314 +       if (mlcn2 * sct_per_clst >= sectors)
90315 +               goto out;
90317 +       /* Check MFT record size */
90318 +       if ((boot->record_size < 0 &&
90319 +            SECTOR_SIZE > (2U << (-boot->record_size))) ||
90320 +           (boot->record_size >= 0 && !is_power_of2(boot->record_size))) {
90321 +               goto out;
90322 +       }
90324 +       /* Check index record size */
90325 +       if ((boot->index_size < 0 &&
90326 +            SECTOR_SIZE > (2U << (-boot->index_size))) ||
90327 +           (boot->index_size >= 0 && !is_power_of2(boot->index_size))) {
90328 +               goto out;
90329 +       }
90331 +       sbi->sector_size = boot_sector_size;
90332 +       sbi->sector_bits = blksize_bits(boot_sector_size);
90333 +       fs_size = (sectors + 1) << sbi->sector_bits;
90335 +       gb = format_size_gb(fs_size, &mb);
90337 +       /*
90338 +        * - Volume formatted and mounted with the same sector size
90339 +        * - Volume formatted 4K and mounted as 512
90340 +        * - Volume formatted 512 and mounted as 4K
90341 +        */
90342 +       if (sbi->sector_size != sector_size) {
90343 +               ntfs_warn(sb,
90344 +                         "Different NTFS' sector size and media sector size");
90345 +               dev_size += sector_size - 1;
90346 +       }
90348 +       sbi->cluster_size = boot_sector_size * sct_per_clst;
90349 +       sbi->cluster_bits = blksize_bits(sbi->cluster_size);
90351 +       sbi->mft.lbo = mlcn << sbi->cluster_bits;
90352 +       sbi->mft.lbo2 = mlcn2 << sbi->cluster_bits;
90354 +       if (sbi->cluster_size < sbi->sector_size)
90355 +               goto out;
90357 +       sbi->cluster_mask = sbi->cluster_size - 1;
90358 +       sbi->cluster_mask_inv = ~(u64)sbi->cluster_mask;
90359 +       sbi->record_size = record_size = boot->record_size < 0
90360 +                                                ? 1 << (-boot->record_size)
90361 +                                                : (u32)boot->record_size
90362 +                                                          << sbi->cluster_bits;
90364 +       if (record_size > MAXIMUM_BYTES_PER_MFT)
90365 +               goto out;
90367 +       sbi->record_bits = blksize_bits(record_size);
90368 +       sbi->attr_size_tr = (5 * record_size >> 4); // ~320 bytes
90370 +       sbi->max_bytes_per_attr =
90371 +               record_size - QuadAlign(MFTRECORD_FIXUP_OFFSET_1) -
90372 +               QuadAlign(((record_size >> SECTOR_SHIFT) * sizeof(short))) -
90373 +               QuadAlign(sizeof(enum ATTR_TYPE));
90375 +       sbi->index_size = boot->index_size < 0
90376 +                                 ? 1u << (-boot->index_size)
90377 +                                 : (u32)boot->index_size << sbi->cluster_bits;
90379 +       sbi->volume.ser_num = le64_to_cpu(boot->serial_num);
90380 +       sbi->volume.size = sectors << sbi->sector_bits;
90382 +       /* warning if RAW volume */
90383 +       if (dev_size < fs_size) {
90384 +               u32 mb0, gb0;
90386 +               gb0 = format_size_gb(dev_size, &mb0);
90387 +               ntfs_warn(
90388 +                       sb,
90389 +                       "RAW NTFS volume: Filesystem size %u.%02u Gb > volume size %u.%02u Gb. Mount in read-only",
90390 +                       gb, mb, gb0, mb0);
90391 +               sb->s_flags |= SB_RDONLY;
90392 +       }
90394 +       clusters = sbi->volume.size >> sbi->cluster_bits;
90395 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
90396 +       /* 32 bits per cluster */
90397 +       if (clusters >> 32) {
90398 +               ntfs_notice(
90399 +                       sb,
90400 +                       "NTFS %u.%02u Gb is too big to use 32 bits per cluster",
90401 +                       gb, mb);
90402 +               goto out;
90403 +       }
90404 +#elif BITS_PER_LONG < 64
90405 +#error "CONFIG_NTFS3_64BIT_CLUSTER incompatible in 32 bit OS"
90406 +#endif
90408 +       sbi->used.bitmap.nbits = clusters;
90410 +       rec = ntfs_zalloc(record_size);
90411 +       if (!rec) {
90412 +               err = -ENOMEM;
90413 +               goto out;
90414 +       }
90416 +       sbi->new_rec = rec;
90417 +       rec->rhdr.sign = NTFS_FILE_SIGNATURE;
90418 +       rec->rhdr.fix_off = cpu_to_le16(MFTRECORD_FIXUP_OFFSET_1);
90419 +       fn = (sbi->record_size >> SECTOR_SHIFT) + 1;
90420 +       rec->rhdr.fix_num = cpu_to_le16(fn);
90421 +       ao = QuadAlign(MFTRECORD_FIXUP_OFFSET_1 + sizeof(short) * fn);
90422 +       rec->attr_off = cpu_to_le16(ao);
90423 +       rec->used = cpu_to_le32(ao + QuadAlign(sizeof(enum ATTR_TYPE)));
90424 +       rec->total = cpu_to_le32(sbi->record_size);
90425 +       ((struct ATTRIB *)Add2Ptr(rec, ao))->type = ATTR_END;
90427 +       if (sbi->cluster_size < PAGE_SIZE)
90428 +               sb_set_blocksize(sb, sbi->cluster_size);
90430 +       sbi->block_mask = sb->s_blocksize - 1;
90431 +       sbi->blocks_per_cluster = sbi->cluster_size >> sb->s_blocksize_bits;
90432 +       sbi->volume.blocks = sbi->volume.size >> sb->s_blocksize_bits;
90434 +       /* Maximum size for normal files */
90435 +       sbi->maxbytes = (clusters << sbi->cluster_bits) - 1;
90437 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
90438 +       if (clusters >= (1ull << (64 - sbi->cluster_bits)))
90439 +               sbi->maxbytes = -1;
90440 +       sbi->maxbytes_sparse = -1;
90441 +#else
90442 +       /* Maximum size for sparse file */
90443 +       sbi->maxbytes_sparse = (1ull << (sbi->cluster_bits + 32)) - 1;
90444 +#endif
90446 +       err = 0;
90448 +out:
90449 +       brelse(bh);
90451 +       return err;
90454 +/* try to mount*/
90455 +static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
90457 +       int err;
90458 +       struct ntfs_sb_info *sbi;
90459 +       struct block_device *bdev = sb->s_bdev;
90460 +       struct inode *bd_inode = bdev->bd_inode;
90461 +       struct request_queue *rq = bdev_get_queue(bdev);
90462 +       struct inode *inode = NULL;
90463 +       struct ntfs_inode *ni;
90464 +       size_t i, tt;
90465 +       CLST vcn, lcn, len;
90466 +       struct ATTRIB *attr;
90467 +       const struct VOLUME_INFO *info;
90468 +       u32 idx, done, bytes;
90469 +       struct ATTR_DEF_ENTRY *t;
90470 +       u16 *upcase = NULL;
90471 +       u16 *shared;
90472 +       bool is_ro;
90473 +       struct MFT_REF ref;
90475 +       ref.high = 0;
90477 +       sbi = ntfs_zalloc(sizeof(struct ntfs_sb_info));
90478 +       if (!sbi)
90479 +               return -ENOMEM;
90481 +       sb->s_fs_info = sbi;
90482 +       sbi->sb = sb;
90483 +       sb->s_flags |= SB_NODIRATIME;
90484 +       sb->s_magic = 0x7366746e; // "ntfs"
90485 +       sb->s_op = &ntfs_sops;
90486 +       sb->s_export_op = &ntfs_export_ops;
90487 +       sb->s_time_gran = NTFS_TIME_GRAN; // 100 nsec
90488 +       sb->s_xattr = ntfs_xattr_handlers;
90490 +       ratelimit_state_init(&sbi->msg_ratelimit, DEFAULT_RATELIMIT_INTERVAL,
90491 +                            DEFAULT_RATELIMIT_BURST);
90493 +       err = ntfs_parse_options(sb, data, silent, &sbi->options);
90494 +       if (err)
90495 +               goto out;
90497 +       if (!rq || !blk_queue_discard(rq) || !rq->limits.discard_granularity) {
90498 +               ;
90499 +       } else {
90500 +               sbi->discard_granularity = rq->limits.discard_granularity;
90501 +               sbi->discard_granularity_mask_inv =
90502 +                       ~(u64)(sbi->discard_granularity - 1);
90503 +       }
90505 +       sb_set_blocksize(sb, PAGE_SIZE);
90507 +       /* parse boot */
90508 +       err = ntfs_init_from_boot(sb, rq ? queue_logical_block_size(rq) : 512,
90509 +                                 bd_inode->i_size);
90510 +       if (err)
90511 +               goto out;
90513 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
90514 +       sb->s_maxbytes = MAX_LFS_FILESIZE;
90515 +#else
90516 +       sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits;
90517 +#endif
90519 +       mutex_init(&sbi->compress.mtx_lznt);
90520 +#ifdef CONFIG_NTFS3_LZX_XPRESS
90521 +       mutex_init(&sbi->compress.mtx_xpress);
90522 +       mutex_init(&sbi->compress.mtx_lzx);
90523 +#endif
90525 +       /*
90526 +        * Load $Volume. This should be done before LogFile
90527 +        * 'cause 'sbi->volume.ni' is used 'ntfs_set_state'
90528 +        */
90529 +       ref.low = cpu_to_le32(MFT_REC_VOL);
90530 +       ref.seq = cpu_to_le16(MFT_REC_VOL);
90531 +       inode = ntfs_iget5(sb, &ref, &NAME_VOLUME);
90532 +       if (IS_ERR(inode)) {
90533 +               err = PTR_ERR(inode);
90534 +               ntfs_err(sb, "Failed to load $Volume.");
90535 +               inode = NULL;
90536 +               goto out;
90537 +       }
90539 +       ni = ntfs_i(inode);
90541 +       /* Load and save label (not necessary) */
90542 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_LABEL, NULL, 0, NULL, NULL);
90544 +       if (!attr) {
90545 +               /* It is ok if no ATTR_LABEL */
90546 +       } else if (!attr->non_res && !is_attr_ext(attr)) {
90547 +               /* $AttrDef allows labels to be up to 128 symbols */
90548 +               err = utf16s_to_utf8s(resident_data(attr),
90549 +                                     le32_to_cpu(attr->res.data_size) >> 1,
90550 +                                     UTF16_LITTLE_ENDIAN, sbi->volume.label,
90551 +                                     sizeof(sbi->volume.label));
90552 +               if (err < 0)
90553 +                       sbi->volume.label[0] = 0;
90554 +       } else {
90555 +               /* should we break mounting here? */
90556 +               //err = -EINVAL;
90557 +               //goto out;
90558 +       }
90560 +       attr = ni_find_attr(ni, attr, NULL, ATTR_VOL_INFO, NULL, 0, NULL, NULL);
90561 +       if (!attr || is_attr_ext(attr)) {
90562 +               err = -EINVAL;
90563 +               goto out;
90564 +       }
90566 +       info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
90567 +       if (!info) {
90568 +               err = -EINVAL;
90569 +               goto out;
90570 +       }
90572 +       sbi->volume.major_ver = info->major_ver;
90573 +       sbi->volume.minor_ver = info->minor_ver;
90574 +       sbi->volume.flags = info->flags;
90576 +       sbi->volume.ni = ni;
90577 +       inode = NULL;
90579 +       /* Load $MFTMirr to estimate recs_mirr */
90580 +       ref.low = cpu_to_le32(MFT_REC_MIRR);
90581 +       ref.seq = cpu_to_le16(MFT_REC_MIRR);
90582 +       inode = ntfs_iget5(sb, &ref, &NAME_MIRROR);
90583 +       if (IS_ERR(inode)) {
90584 +               err = PTR_ERR(inode);
90585 +               ntfs_err(sb, "Failed to load $MFTMirr.");
90586 +               inode = NULL;
90587 +               goto out;
90588 +       }
90590 +       sbi->mft.recs_mirr =
90591 +               ntfs_up_cluster(sbi, inode->i_size) >> sbi->record_bits;
90593 +       iput(inode);
90595 +       /* Load LogFile to replay */
90596 +       ref.low = cpu_to_le32(MFT_REC_LOG);
90597 +       ref.seq = cpu_to_le16(MFT_REC_LOG);
90598 +       inode = ntfs_iget5(sb, &ref, &NAME_LOGFILE);
90599 +       if (IS_ERR(inode)) {
90600 +               err = PTR_ERR(inode);
90601 +               ntfs_err(sb, "Failed to load \x24LogFile.");
90602 +               inode = NULL;
90603 +               goto out;
90604 +       }
90606 +       ni = ntfs_i(inode);
90608 +       err = ntfs_loadlog_and_replay(ni, sbi);
90609 +       if (err)
90610 +               goto out;
90612 +       iput(inode);
90613 +       inode = NULL;
90615 +       is_ro = sb_rdonly(sbi->sb);
90617 +       if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
90618 +               if (!is_ro) {
90619 +                       ntfs_warn(sb,
90620 +                                 "failed to replay log file. Can't mount rw!");
90621 +                       err = -EINVAL;
90622 +                       goto out;
90623 +               }
90624 +       } else if (sbi->volume.flags & VOLUME_FLAG_DIRTY) {
90625 +               if (!is_ro && !sbi->options.force) {
90626 +                       ntfs_warn(
90627 +                               sb,
90628 +                               "volume is dirty and \"force\" flag is not set!");
90629 +                       err = -EINVAL;
90630 +                       goto out;
90631 +               }
90632 +       }
90634 +       /* Load $MFT */
90635 +       ref.low = cpu_to_le32(MFT_REC_MFT);
90636 +       ref.seq = cpu_to_le16(1);
90638 +       inode = ntfs_iget5(sb, &ref, &NAME_MFT);
90639 +       if (IS_ERR(inode)) {
90640 +               err = PTR_ERR(inode);
90641 +               ntfs_err(sb, "Failed to load $MFT.");
90642 +               inode = NULL;
90643 +               goto out;
90644 +       }
90646 +       ni = ntfs_i(inode);
90648 +       sbi->mft.used = ni->i_valid >> sbi->record_bits;
90649 +       tt = inode->i_size >> sbi->record_bits;
90650 +       sbi->mft.next_free = MFT_REC_USER;
90652 +       err = wnd_init(&sbi->mft.bitmap, sb, tt);
90653 +       if (err)
90654 +               goto out;
90656 +       err = ni_load_all_mi(ni);
90657 +       if (err)
90658 +               goto out;
90660 +       sbi->mft.ni = ni;
90662 +       /* Load $BadClus */
90663 +       ref.low = cpu_to_le32(MFT_REC_BADCLUST);
90664 +       ref.seq = cpu_to_le16(MFT_REC_BADCLUST);
90665 +       inode = ntfs_iget5(sb, &ref, &NAME_BADCLUS);
90666 +       if (IS_ERR(inode)) {
90667 +               err = PTR_ERR(inode);
90668 +               ntfs_err(sb, "Failed to load $BadClus.");
90669 +               inode = NULL;
90670 +               goto out;
90671 +       }
90673 +       ni = ntfs_i(inode);
90675 +       for (i = 0; run_get_entry(&ni->file.run, i, &vcn, &lcn, &len); i++) {
90676 +               if (lcn == SPARSE_LCN)
90677 +                       continue;
90679 +               if (!sbi->bad_clusters)
90680 +                       ntfs_notice(sb, "Volume contains bad blocks");
90682 +               sbi->bad_clusters += len;
90683 +       }
90685 +       iput(inode);
90687 +       /* Load $Bitmap */
90688 +       ref.low = cpu_to_le32(MFT_REC_BITMAP);
90689 +       ref.seq = cpu_to_le16(MFT_REC_BITMAP);
90690 +       inode = ntfs_iget5(sb, &ref, &NAME_BITMAP);
90691 +       if (IS_ERR(inode)) {
90692 +               err = PTR_ERR(inode);
90693 +               ntfs_err(sb, "Failed to load $Bitmap.");
90694 +               inode = NULL;
90695 +               goto out;
90696 +       }
90698 +       ni = ntfs_i(inode);
90700 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
90701 +       if (inode->i_size >> 32) {
90702 +               err = -EINVAL;
90703 +               goto out;
90704 +       }
90705 +#endif
90707 +       /* Check bitmap boundary */
90708 +       tt = sbi->used.bitmap.nbits;
90709 +       if (inode->i_size < bitmap_size(tt)) {
90710 +               err = -EINVAL;
90711 +               goto out;
90712 +       }
90714 +       /* Not necessary */
90715 +       sbi->used.bitmap.set_tail = true;
90716 +       err = wnd_init(&sbi->used.bitmap, sbi->sb, tt);
90717 +       if (err)
90718 +               goto out;
90720 +       iput(inode);
90722 +       /* Compute the mft zone */
90723 +       err = ntfs_refresh_zone(sbi);
90724 +       if (err)
90725 +               goto out;
90727 +       /* Load $AttrDef */
90728 +       ref.low = cpu_to_le32(MFT_REC_ATTR);
90729 +       ref.seq = cpu_to_le16(MFT_REC_ATTR);
90730 +       inode = ntfs_iget5(sbi->sb, &ref, &NAME_ATTRDEF);
90731 +       if (IS_ERR(inode)) {
90732 +               err = PTR_ERR(inode);
90733 +               ntfs_err(sb, "Failed to load $AttrDef -> %d", err);
90734 +               inode = NULL;
90735 +               goto out;
90736 +       }
90738 +       if (inode->i_size < sizeof(struct ATTR_DEF_ENTRY)) {
90739 +               err = -EINVAL;
90740 +               goto out;
90741 +       }
90742 +       bytes = inode->i_size;
90743 +       sbi->def_table = t = ntfs_malloc(bytes);
90744 +       if (!t) {
90745 +               err = -ENOMEM;
90746 +               goto out;
90747 +       }
90749 +       for (done = idx = 0; done < bytes; done += PAGE_SIZE, idx++) {
90750 +               unsigned long tail = bytes - done;
90751 +               struct page *page = ntfs_map_page(inode->i_mapping, idx);
90753 +               if (IS_ERR(page)) {
90754 +                       err = PTR_ERR(page);
90755 +                       goto out;
90756 +               }
90757 +               memcpy(Add2Ptr(t, done), page_address(page),
90758 +                      min(PAGE_SIZE, tail));
90759 +               ntfs_unmap_page(page);
90761 +               if (!idx && ATTR_STD != t->type) {
90762 +                       err = -EINVAL;
90763 +                       goto out;
90764 +               }
90765 +       }
90767 +       t += 1;
90768 +       sbi->def_entries = 1;
90769 +       done = sizeof(struct ATTR_DEF_ENTRY);
90770 +       sbi->reparse.max_size = MAXIMUM_REPARSE_DATA_BUFFER_SIZE;
90771 +       sbi->ea_max_size = 0x10000; /* default formater value */
90773 +       while (done + sizeof(struct ATTR_DEF_ENTRY) <= bytes) {
90774 +               u32 t32 = le32_to_cpu(t->type);
90775 +               u64 sz = le64_to_cpu(t->max_sz);
90777 +               if ((t32 & 0xF) || le32_to_cpu(t[-1].type) >= t32)
90778 +                       break;
90780 +               if (t->type == ATTR_REPARSE)
90781 +                       sbi->reparse.max_size = sz;
90782 +               else if (t->type == ATTR_EA)
90783 +                       sbi->ea_max_size = sz;
90785 +               done += sizeof(struct ATTR_DEF_ENTRY);
90786 +               t += 1;
90787 +               sbi->def_entries += 1;
90788 +       }
90789 +       iput(inode);
90791 +       /* Load $UpCase */
90792 +       ref.low = cpu_to_le32(MFT_REC_UPCASE);
90793 +       ref.seq = cpu_to_le16(MFT_REC_UPCASE);
90794 +       inode = ntfs_iget5(sb, &ref, &NAME_UPCASE);
90795 +       if (IS_ERR(inode)) {
90796 +               err = PTR_ERR(inode);
90797 +               ntfs_err(sb, "Failed to load \x24LogFile.");
90798 +               inode = NULL;
90799 +               goto out;
90800 +       }
90802 +       ni = ntfs_i(inode);
90804 +       if (inode->i_size != 0x10000 * sizeof(short)) {
90805 +               err = -EINVAL;
90806 +               goto out;
90807 +       }
90809 +       sbi->upcase = upcase = ntfs_vmalloc(0x10000 * sizeof(short));
90810 +       if (!upcase) {
90811 +               err = -ENOMEM;
90812 +               goto out;
90813 +       }
90815 +       for (idx = 0; idx < (0x10000 * sizeof(short) >> PAGE_SHIFT); idx++) {
90816 +               const __le16 *src;
90817 +               u16 *dst = Add2Ptr(upcase, idx << PAGE_SHIFT);
90818 +               struct page *page = ntfs_map_page(inode->i_mapping, idx);
90820 +               if (IS_ERR(page)) {
90821 +                       err = PTR_ERR(page);
90822 +                       goto out;
90823 +               }
90825 +               src = page_address(page);
90827 +#ifdef __BIG_ENDIAN
90828 +               for (i = 0; i < PAGE_SIZE / sizeof(u16); i++)
90829 +                       *dst++ = le16_to_cpu(*src++);
90830 +#else
90831 +               memcpy(dst, src, PAGE_SIZE);
90832 +#endif
90833 +               ntfs_unmap_page(page);
90834 +       }
90836 +       shared = ntfs_set_shared(upcase, 0x10000 * sizeof(short));
90837 +       if (shared && upcase != shared) {
90838 +               sbi->upcase = shared;
90839 +               ntfs_vfree(upcase);
90840 +       }
90842 +       iput(inode);
90843 +       inode = NULL;
90845 +       if (is_ntfs3(sbi)) {
90846 +               /* Load $Secure */
90847 +               err = ntfs_security_init(sbi);
90848 +               if (err)
90849 +                       goto out;
90851 +               /* Load $Extend */
90852 +               err = ntfs_extend_init(sbi);
90853 +               if (err)
90854 +                       goto load_root;
90856 +               /* Load $Extend\$Reparse */
90857 +               err = ntfs_reparse_init(sbi);
90858 +               if (err)
90859 +                       goto load_root;
90861 +               /* Load $Extend\$ObjId */
90862 +               err = ntfs_objid_init(sbi);
90863 +               if (err)
90864 +                       goto load_root;
90865 +       }
90867 +load_root:
90868 +       /* Load root */
90869 +       ref.low = cpu_to_le32(MFT_REC_ROOT);
90870 +       ref.seq = cpu_to_le16(MFT_REC_ROOT);
90871 +       inode = ntfs_iget5(sb, &ref, &NAME_ROOT);
90872 +       if (IS_ERR(inode)) {
90873 +               err = PTR_ERR(inode);
90874 +               ntfs_err(sb, "Failed to load root.");
90875 +               inode = NULL;
90876 +               goto out;
90877 +       }
90879 +       ni = ntfs_i(inode);
90881 +       sb->s_root = d_make_root(inode);
90883 +       if (!sb->s_root) {
90884 +               err = -EINVAL;
90885 +               goto out;
90886 +       }
90888 +       return 0;
90890 +out:
90891 +       iput(inode);
90893 +       if (sb->s_root) {
90894 +               d_drop(sb->s_root);
90895 +               sb->s_root = NULL;
90896 +       }
90898 +       put_ntfs(sbi);
90900 +       sb->s_fs_info = NULL;
90901 +       return err;
90904 +void ntfs_unmap_meta(struct super_block *sb, CLST lcn, CLST len)
90906 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
90907 +       struct block_device *bdev = sb->s_bdev;
90908 +       sector_t devblock = (u64)lcn * sbi->blocks_per_cluster;
90909 +       unsigned long blocks = (u64)len * sbi->blocks_per_cluster;
90910 +       unsigned long cnt = 0;
90911 +       unsigned long limit = global_zone_page_state(NR_FREE_PAGES)
90912 +                             << (PAGE_SHIFT - sb->s_blocksize_bits);
90914 +       if (limit >= 0x2000)
90915 +               limit -= 0x1000;
90916 +       else if (limit < 32)
90917 +               limit = 32;
90918 +       else
90919 +               limit >>= 1;
90921 +       while (blocks--) {
90922 +               clean_bdev_aliases(bdev, devblock++, 1);
90923 +               if (cnt++ >= limit) {
90924 +                       sync_blockdev(bdev);
90925 +                       cnt = 0;
90926 +               }
90927 +       }
90931 + * ntfs_discard
90932 + *
90933 + * issue a discard request (trim for SSD)
90934 + */
90935 +int ntfs_discard(struct ntfs_sb_info *sbi, CLST lcn, CLST len)
90937 +       int err;
90938 +       u64 lbo, bytes, start, end;
90939 +       struct super_block *sb;
90941 +       if (sbi->used.next_free_lcn == lcn + len)
90942 +               sbi->used.next_free_lcn = lcn;
90944 +       if (sbi->flags & NTFS_FLAGS_NODISCARD)
90945 +               return -EOPNOTSUPP;
90947 +       if (!sbi->options.discard)
90948 +               return -EOPNOTSUPP;
90950 +       lbo = (u64)lcn << sbi->cluster_bits;
90951 +       bytes = (u64)len << sbi->cluster_bits;
90953 +       /* Align up 'start' on discard_granularity */
90954 +       start = (lbo + sbi->discard_granularity - 1) &
90955 +               sbi->discard_granularity_mask_inv;
90956 +       /* Align down 'end' on discard_granularity */
90957 +       end = (lbo + bytes) & sbi->discard_granularity_mask_inv;
90959 +       sb = sbi->sb;
90960 +       if (start >= end)
90961 +               return 0;
90963 +       err = blkdev_issue_discard(sb->s_bdev, start >> 9, (end - start) >> 9,
90964 +                                  GFP_NOFS, 0);
90966 +       if (err == -EOPNOTSUPP)
90967 +               sbi->flags |= NTFS_FLAGS_NODISCARD;
90969 +       return err;
90972 +static struct dentry *ntfs_mount(struct file_system_type *fs_type, int flags,
90973 +                                const char *dev_name, void *data)
90975 +       return mount_bdev(fs_type, flags, dev_name, data, ntfs_fill_super);
90978 +static struct file_system_type ntfs_fs_type = {
90979 +       .owner = THIS_MODULE,
90980 +       .name = "ntfs3",
90981 +       .mount = ntfs_mount,
90982 +       .kill_sb = kill_block_super,
90983 +       .fs_flags = FS_REQUIRES_DEV,
90986 +static int __init init_ntfs_fs(void)
90988 +       int err;
90990 +       pr_notice("ntfs3: Index binary search\n");
90991 +       pr_notice("ntfs3: Hot fix free clusters\n");
90992 +       pr_notice("ntfs3: Max link count %u\n", NTFS_LINK_MAX);
90994 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
90995 +       pr_notice("ntfs3: Enabled Linux POSIX ACLs support\n");
90996 +#endif
90997 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
90998 +       pr_notice("ntfs3: Activated 64 bits per cluster\n");
90999 +#else
91000 +       pr_notice("ntfs3: Activated 32 bits per cluster\n");
91001 +#endif
91002 +#ifdef CONFIG_NTFS3_LZX_XPRESS
91003 +       pr_notice("ntfs3: Read-only lzx/xpress compression included\n");
91004 +#endif
91006 +       err = ntfs3_init_bitmap();
91007 +       if (err)
91008 +               return err;
91010 +       ntfs_inode_cachep = kmem_cache_create(
91011 +               "ntfs_inode_cache", sizeof(struct ntfs_inode), 0,
91012 +               (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT),
91013 +               init_once);
91014 +       if (!ntfs_inode_cachep) {
91015 +               err = -ENOMEM;
91016 +               goto out1;
91017 +       }
91019 +       err = register_filesystem(&ntfs_fs_type);
91020 +       if (err)
91021 +               goto out;
91023 +       return 0;
91024 +out:
91025 +       kmem_cache_destroy(ntfs_inode_cachep);
91026 +out1:
91027 +       ntfs3_exit_bitmap();
91028 +       return err;
91031 +static void __exit exit_ntfs_fs(void)
91033 +       if (ntfs_inode_cachep) {
91034 +               rcu_barrier();
91035 +               kmem_cache_destroy(ntfs_inode_cachep);
91036 +       }
91038 +       unregister_filesystem(&ntfs_fs_type);
91039 +       ntfs3_exit_bitmap();
91042 +MODULE_LICENSE("GPL");
91043 +MODULE_DESCRIPTION("ntfs3 read/write filesystem");
91044 +MODULE_INFO(behaviour, "Index binary search");
91045 +MODULE_INFO(behaviour, "Hot fix free clusters");
91046 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
91047 +MODULE_INFO(behaviour, "Enabled Linux POSIX ACLs support");
91048 +#endif
91049 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
91050 +MODULE_INFO(cluster, "Activated 64 bits per cluster");
91051 +#else
91052 +MODULE_INFO(cluster, "Activated 32 bits per cluster");
91053 +#endif
91054 +#ifdef CONFIG_NTFS3_LZX_XPRESS
91055 +MODULE_INFO(compression, "Read-only lzx/xpress compression included");
91056 +#endif
91058 +MODULE_AUTHOR("Konstantin Komarov");
91059 +MODULE_ALIAS_FS("ntfs3");
91061 +module_init(init_ntfs_fs);
91062 +module_exit(exit_ntfs_fs);
91063 diff --git a/fs/ntfs3/upcase.c b/fs/ntfs3/upcase.c
91064 new file mode 100644
91065 index 000000000000..9617382aca64
91066 --- /dev/null
91067 +++ b/fs/ntfs3/upcase.c
91068 @@ -0,0 +1,105 @@
91069 +// SPDX-License-Identifier: GPL-2.0
91071 + *
91072 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
91073 + *
91074 + */
91075 +#include <linux/blkdev.h>
91076 +#include <linux/buffer_head.h>
91077 +#include <linux/module.h>
91078 +#include <linux/nls.h>
91080 +#include "debug.h"
91081 +#include "ntfs.h"
91082 +#include "ntfs_fs.h"
91084 +static inline u16 upcase_unicode_char(const u16 *upcase, u16 chr)
91086 +       if (chr < 'a')
91087 +               return chr;
91089 +       if (chr <= 'z')
91090 +               return chr - ('a' - 'A');
91092 +       return upcase[chr];
91096 + * Thanks Kari Argillander <kari.argillander@gmail.com> for idea and implementation 'bothcase'
91097 + *
91098 + * Straigth way to compare names:
91099 + * - case insensitive
91100 + * - if name equals and 'bothcases' then
91101 + * - case sensitive
91102 + * 'Straigth way' code scans input names twice in worst case
91103 + * Optimized code scans input names only once
91104 + */
91105 +int ntfs_cmp_names(const __le16 *s1, size_t l1, const __le16 *s2, size_t l2,
91106 +                  const u16 *upcase, bool bothcase)
91108 +       int diff1 = 0;
91109 +       int diff2;
91110 +       size_t len = min(l1, l2);
91112 +       if (!bothcase && upcase)
91113 +               goto case_insentive;
91115 +       for (; len; s1++, s2++, len--) {
91116 +               diff1 = le16_to_cpu(*s1) - le16_to_cpu(*s2);
91117 +               if (diff1) {
91118 +                       if (bothcase && upcase)
91119 +                               goto case_insentive;
91121 +                       return diff1;
91122 +               }
91123 +       }
91124 +       return l1 - l2;
91126 +case_insentive:
91127 +       for (; len; s1++, s2++, len--) {
91128 +               diff2 = upcase_unicode_char(upcase, le16_to_cpu(*s1)) -
91129 +                       upcase_unicode_char(upcase, le16_to_cpu(*s2));
91130 +               if (diff2)
91131 +                       return diff2;
91132 +       }
91134 +       diff2 = l1 - l2;
91135 +       return diff2 ? diff2 : diff1;
91138 +int ntfs_cmp_names_cpu(const struct cpu_str *uni1, const struct le_str *uni2,
91139 +                      const u16 *upcase, bool bothcase)
91141 +       const u16 *s1 = uni1->name;
91142 +       const __le16 *s2 = uni2->name;
91143 +       size_t l1 = uni1->len;
91144 +       size_t l2 = uni2->len;
91145 +       size_t len = min(l1, l2);
91146 +       int diff1 = 0;
91147 +       int diff2;
91149 +       if (!bothcase && upcase)
91150 +               goto case_insentive;
91152 +       for (; len; s1++, s2++, len--) {
91153 +               diff1 = *s1 - le16_to_cpu(*s2);
91154 +               if (diff1) {
91155 +                       if (bothcase && upcase)
91156 +                               goto case_insentive;
91158 +                       return diff1;
91159 +               }
91160 +       }
91161 +       return l1 - l2;
91163 +case_insentive:
91164 +       for (; len; s1++, s2++, len--) {
91165 +               diff2 = upcase_unicode_char(upcase, *s1) -
91166 +                       upcase_unicode_char(upcase, le16_to_cpu(*s2));
91167 +               if (diff2)
91168 +                       return diff2;
91169 +       }
91171 +       diff2 = l1 - l2;
91172 +       return diff2 ? diff2 : diff1;
91174 diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
91175 new file mode 100644
91176 index 000000000000..759df507c92c
91177 --- /dev/null
91178 +++ b/fs/ntfs3/xattr.c
91179 @@ -0,0 +1,1046 @@
91180 +// SPDX-License-Identifier: GPL-2.0
91182 + *
91183 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
91184 + *
91185 + */
91187 +#include <linux/blkdev.h>
91188 +#include <linux/buffer_head.h>
91189 +#include <linux/fs.h>
91190 +#include <linux/nls.h>
91191 +#include <linux/posix_acl.h>
91192 +#include <linux/posix_acl_xattr.h>
91193 +#include <linux/xattr.h>
91195 +#include "debug.h"
91196 +#include "ntfs.h"
91197 +#include "ntfs_fs.h"
91199 +// clang-format off
91200 +#define SYSTEM_DOS_ATTRIB    "system.dos_attrib"
91201 +#define SYSTEM_NTFS_ATTRIB   "system.ntfs_attrib"
91202 +#define SYSTEM_NTFS_SECURITY "system.ntfs_security"
91203 +// clang-format on
91205 +static inline size_t unpacked_ea_size(const struct EA_FULL *ea)
91207 +       return ea->size ? le32_to_cpu(ea->size)
91208 +                       : DwordAlign(struct_size(
91209 +                                 ea, name,
91210 +                                 1 + ea->name_len + le16_to_cpu(ea->elength)));
91213 +static inline size_t packed_ea_size(const struct EA_FULL *ea)
91215 +       return struct_size(ea, name,
91216 +                          1 + ea->name_len + le16_to_cpu(ea->elength)) -
91217 +              offsetof(struct EA_FULL, flags);
91221 + * find_ea
91222 + *
91223 + * assume there is at least one xattr in the list
91224 + */
91225 +static inline bool find_ea(const struct EA_FULL *ea_all, u32 bytes,
91226 +                          const char *name, u8 name_len, u32 *off)
91228 +       *off = 0;
91230 +       if (!ea_all || !bytes)
91231 +               return false;
91233 +       for (;;) {
91234 +               const struct EA_FULL *ea = Add2Ptr(ea_all, *off);
91235 +               u32 next_off = *off + unpacked_ea_size(ea);
91237 +               if (next_off > bytes)
91238 +                       return false;
91240 +               if (ea->name_len == name_len &&
91241 +                   !memcmp(ea->name, name, name_len))
91242 +                       return true;
91244 +               *off = next_off;
91245 +               if (next_off >= bytes)
91246 +                       return false;
91247 +       }
91251 + * ntfs_read_ea
91252 + *
91253 + * reads all extended attributes
91254 + * ea - new allocated memory
91255 + * info - pointer into resident data
91256 + */
91257 +static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
91258 +                       size_t add_bytes, const struct EA_INFO **info)
91260 +       int err;
91261 +       struct ATTR_LIST_ENTRY *le = NULL;
91262 +       struct ATTRIB *attr_info, *attr_ea;
91263 +       void *ea_p;
91264 +       u32 size;
91266 +       static_assert(le32_to_cpu(ATTR_EA_INFO) < le32_to_cpu(ATTR_EA));
91268 +       *ea = NULL;
91269 +       *info = NULL;
91271 +       attr_info =
91272 +               ni_find_attr(ni, NULL, &le, ATTR_EA_INFO, NULL, 0, NULL, NULL);
91273 +       attr_ea =
91274 +               ni_find_attr(ni, attr_info, &le, ATTR_EA, NULL, 0, NULL, NULL);
91276 +       if (!attr_ea || !attr_info)
91277 +               return 0;
91279 +       *info = resident_data_ex(attr_info, sizeof(struct EA_INFO));
91280 +       if (!*info)
91281 +               return -EINVAL;
91283 +       /* Check Ea limit */
91284 +       size = le32_to_cpu((*info)->size);
91285 +       if (size > ni->mi.sbi->ea_max_size)
91286 +               return -EFBIG;
91288 +       if (attr_size(attr_ea) > ni->mi.sbi->ea_max_size)
91289 +               return -EFBIG;
91291 +       /* Allocate memory for packed Ea */
91292 +       ea_p = ntfs_malloc(size + add_bytes);
91293 +       if (!ea_p)
91294 +               return -ENOMEM;
91296 +       if (attr_ea->non_res) {
91297 +               struct runs_tree run;
91299 +               run_init(&run);
91301 +               err = attr_load_runs(attr_ea, ni, &run, NULL);
91302 +               if (!err)
91303 +                       err = ntfs_read_run_nb(ni->mi.sbi, &run, 0, ea_p, size,
91304 +                                              NULL);
91305 +               run_close(&run);
91307 +               if (err)
91308 +                       goto out;
91309 +       } else {
91310 +               void *p = resident_data_ex(attr_ea, size);
91312 +               if (!p) {
91313 +                       err = -EINVAL;
91314 +                       goto out;
91315 +               }
91316 +               memcpy(ea_p, p, size);
91317 +       }
91319 +       memset(Add2Ptr(ea_p, size), 0, add_bytes);
91320 +       *ea = ea_p;
91321 +       return 0;
91323 +out:
91324 +       ntfs_free(ea_p);
91325 +       *ea = NULL;
91326 +       return err;
91330 + * ntfs_list_ea
91331 + *
91332 + * copy a list of xattrs names into the buffer
91333 + * provided, or compute the buffer size required
91334 + *
91335 + * Returns a negative error number on failure, or the number of bytes
91336 + * used / required on success.
91337 + */
91338 +static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
91339 +                           size_t bytes_per_buffer)
91341 +       const struct EA_INFO *info;
91342 +       struct EA_FULL *ea_all = NULL;
91343 +       const struct EA_FULL *ea;
91344 +       u32 off, size;
91345 +       int err;
91346 +       size_t ret;
91348 +       err = ntfs_read_ea(ni, &ea_all, 0, &info);
91349 +       if (err)
91350 +               return err;
91352 +       if (!info || !ea_all)
91353 +               return 0;
91355 +       size = le32_to_cpu(info->size);
91357 +       /* Enumerate all xattrs */
91358 +       for (ret = 0, off = 0; off < size; off += unpacked_ea_size(ea)) {
91359 +               ea = Add2Ptr(ea_all, off);
91361 +               if (buffer) {
91362 +                       if (ret + ea->name_len + 1 > bytes_per_buffer) {
91363 +                               err = -ERANGE;
91364 +                               goto out;
91365 +                       }
91367 +                       memcpy(buffer + ret, ea->name, ea->name_len);
91368 +                       buffer[ret + ea->name_len] = 0;
91369 +               }
91371 +               ret += ea->name_len + 1;
91372 +       }
91374 +out:
91375 +       ntfs_free(ea_all);
91376 +       return err ? err : ret;
91379 +static int ntfs_get_ea(struct inode *inode, const char *name, size_t name_len,
91380 +                      void *buffer, size_t size, size_t *required)
91382 +       struct ntfs_inode *ni = ntfs_i(inode);
91383 +       const struct EA_INFO *info;
91384 +       struct EA_FULL *ea_all = NULL;
91385 +       const struct EA_FULL *ea;
91386 +       u32 off, len;
91387 +       int err;
91389 +       if (!(ni->ni_flags & NI_FLAG_EA))
91390 +               return -ENODATA;
91392 +       if (!required)
91393 +               ni_lock(ni);
91395 +       len = 0;
91397 +       if (name_len > 255) {
91398 +               err = -ENAMETOOLONG;
91399 +               goto out;
91400 +       }
91402 +       err = ntfs_read_ea(ni, &ea_all, 0, &info);
91403 +       if (err)
91404 +               goto out;
91406 +       if (!info)
91407 +               goto out;
91409 +       /* Enumerate all xattrs */
91410 +       if (!find_ea(ea_all, le32_to_cpu(info->size), name, name_len, &off)) {
91411 +               err = -ENODATA;
91412 +               goto out;
91413 +       }
91414 +       ea = Add2Ptr(ea_all, off);
91416 +       len = le16_to_cpu(ea->elength);
91417 +       if (!buffer) {
91418 +               err = 0;
91419 +               goto out;
91420 +       }
91422 +       if (len > size) {
91423 +               err = -ERANGE;
91424 +               if (required)
91425 +                       *required = len;
91426 +               goto out;
91427 +       }
91429 +       memcpy(buffer, ea->name + ea->name_len + 1, len);
91430 +       err = 0;
91432 +out:
91433 +       ntfs_free(ea_all);
91434 +       if (!required)
91435 +               ni_unlock(ni);
91437 +       return err ? err : len;
91440 +static noinline int ntfs_set_ea(struct inode *inode, const char *name,
91441 +                               size_t name_len, const void *value,
91442 +                               size_t val_size, int flags, int locked)
91444 +       struct ntfs_inode *ni = ntfs_i(inode);
91445 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
91446 +       int err;
91447 +       struct EA_INFO ea_info;
91448 +       const struct EA_INFO *info;
91449 +       struct EA_FULL *new_ea;
91450 +       struct EA_FULL *ea_all = NULL;
91451 +       size_t add, new_pack;
91452 +       u32 off, size;
91453 +       __le16 size_pack;
91454 +       struct ATTRIB *attr;
91455 +       struct ATTR_LIST_ENTRY *le;
91456 +       struct mft_inode *mi;
91457 +       struct runs_tree ea_run;
91458 +       u64 new_sz;
91459 +       void *p;
91461 +       if (!locked)
91462 +               ni_lock(ni);
91464 +       run_init(&ea_run);
91466 +       if (name_len > 255) {
91467 +               err = -ENAMETOOLONG;
91468 +               goto out;
91469 +       }
91471 +       add = DwordAlign(struct_size(ea_all, name, 1 + name_len + val_size));
91473 +       err = ntfs_read_ea(ni, &ea_all, add, &info);
91474 +       if (err)
91475 +               goto out;
91477 +       if (!info) {
91478 +               memset(&ea_info, 0, sizeof(ea_info));
91479 +               size = 0;
91480 +               size_pack = 0;
91481 +       } else {
91482 +               memcpy(&ea_info, info, sizeof(ea_info));
91483 +               size = le32_to_cpu(ea_info.size);
91484 +               size_pack = ea_info.size_pack;
91485 +       }
91487 +       if (info && find_ea(ea_all, size, name, name_len, &off)) {
91488 +               struct EA_FULL *ea;
91489 +               size_t ea_sz;
91491 +               if (flags & XATTR_CREATE) {
91492 +                       err = -EEXIST;
91493 +                       goto out;
91494 +               }
91496 +               /* Remove current xattr */
91497 +               ea = Add2Ptr(ea_all, off);
91498 +               if (ea->flags & FILE_NEED_EA)
91499 +                       le16_add_cpu(&ea_info.count, -1);
91501 +               ea_sz = unpacked_ea_size(ea);
91503 +               le16_add_cpu(&ea_info.size_pack, 0 - packed_ea_size(ea));
91505 +               memmove(ea, Add2Ptr(ea, ea_sz), size - off - ea_sz);
91507 +               size -= ea_sz;
91508 +               memset(Add2Ptr(ea_all, size), 0, ea_sz);
91510 +               ea_info.size = cpu_to_le32(size);
91512 +               if ((flags & XATTR_REPLACE) && !val_size)
91513 +                       goto update_ea;
91514 +       } else {
91515 +               if (flags & XATTR_REPLACE) {
91516 +                       err = -ENODATA;
91517 +                       goto out;
91518 +               }
91520 +               if (!ea_all) {
91521 +                       ea_all = ntfs_zalloc(add);
91522 +                       if (!ea_all) {
91523 +                               err = -ENOMEM;
91524 +                               goto out;
91525 +                       }
91526 +               }
91527 +       }
91529 +       /* append new xattr */
91530 +       new_ea = Add2Ptr(ea_all, size);
91531 +       new_ea->size = cpu_to_le32(add);
91532 +       new_ea->flags = 0;
91533 +       new_ea->name_len = name_len;
91534 +       new_ea->elength = cpu_to_le16(val_size);
91535 +       memcpy(new_ea->name, name, name_len);
91536 +       new_ea->name[name_len] = 0;
91537 +       memcpy(new_ea->name + name_len + 1, value, val_size);
91538 +       new_pack = le16_to_cpu(ea_info.size_pack) + packed_ea_size(new_ea);
91540 +       /* should fit into 16 bits */
91541 +       if (new_pack > 0xffff) {
91542 +               err = -EFBIG; // -EINVAL?
91543 +               goto out;
91544 +       }
91545 +       ea_info.size_pack = cpu_to_le16(new_pack);
91547 +       /* new size of ATTR_EA */
91548 +       size += add;
91549 +       if (size > sbi->ea_max_size) {
91550 +               err = -EFBIG; // -EINVAL?
91551 +               goto out;
91552 +       }
91553 +       ea_info.size = cpu_to_le32(size);
91555 +update_ea:
91557 +       if (!info) {
91558 +               /* Create xattr */
91559 +               if (!size) {
91560 +                       err = 0;
91561 +                       goto out;
91562 +               }
91564 +               err = ni_insert_resident(ni, sizeof(struct EA_INFO),
91565 +                                        ATTR_EA_INFO, NULL, 0, NULL, NULL);
91566 +               if (err)
91567 +                       goto out;
91569 +               err = ni_insert_resident(ni, 0, ATTR_EA, NULL, 0, NULL, NULL);
91570 +               if (err)
91571 +                       goto out;
91572 +       }
91574 +       new_sz = size;
91575 +       err = attr_set_size(ni, ATTR_EA, NULL, 0, &ea_run, new_sz, &new_sz,
91576 +                           false, NULL);
91577 +       if (err)
91578 +               goto out;
91580 +       le = NULL;
91581 +       attr = ni_find_attr(ni, NULL, &le, ATTR_EA_INFO, NULL, 0, NULL, &mi);
91582 +       if (!attr) {
91583 +               err = -EINVAL;
91584 +               goto out;
91585 +       }
91587 +       if (!size) {
91588 +               /* delete xattr, ATTR_EA_INFO */
91589 +               err = ni_remove_attr_le(ni, attr, le);
91590 +               if (err)
91591 +                       goto out;
91592 +       } else {
91593 +               p = resident_data_ex(attr, sizeof(struct EA_INFO));
91594 +               if (!p) {
91595 +                       err = -EINVAL;
91596 +                       goto out;
91597 +               }
91598 +               memcpy(p, &ea_info, sizeof(struct EA_INFO));
91599 +               mi->dirty = true;
91600 +       }
91602 +       le = NULL;
91603 +       attr = ni_find_attr(ni, NULL, &le, ATTR_EA, NULL, 0, NULL, &mi);
91604 +       if (!attr) {
91605 +               err = -EINVAL;
91606 +               goto out;
91607 +       }
91609 +       if (!size) {
91610 +               /* delete xattr, ATTR_EA */
91611 +               err = ni_remove_attr_le(ni, attr, le);
91612 +               if (err)
91613 +                       goto out;
91614 +       } else if (attr->non_res) {
91615 +               err = ntfs_sb_write_run(sbi, &ea_run, 0, ea_all, size);
91616 +               if (err)
91617 +                       goto out;
91618 +       } else {
91619 +               p = resident_data_ex(attr, size);
91620 +               if (!p) {
91621 +                       err = -EINVAL;
91622 +                       goto out;
91623 +               }
91624 +               memcpy(p, ea_all, size);
91625 +               mi->dirty = true;
91626 +       }
91628 +       /* Check if we delete the last xattr */
91629 +       if (size)
91630 +               ni->ni_flags |= NI_FLAG_EA;
91631 +       else
91632 +               ni->ni_flags &= ~NI_FLAG_EA;
91634 +       if (ea_info.size_pack != size_pack)
91635 +               ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
91636 +       mark_inode_dirty(&ni->vfs_inode);
91638 +out:
91639 +       if (!locked)
91640 +               ni_unlock(ni);
91642 +       run_close(&ea_run);
91643 +       ntfs_free(ea_all);
91645 +       return err;
91648 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
91649 +static inline void ntfs_posix_acl_release(struct posix_acl *acl)
91651 +       if (acl && refcount_dec_and_test(&acl->a_refcount))
91652 +               kfree(acl);
91655 +static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
91656 +                                        struct inode *inode, int type,
91657 +                                        int locked)
91659 +       struct ntfs_inode *ni = ntfs_i(inode);
91660 +       const char *name;
91661 +       size_t name_len;
91662 +       struct posix_acl *acl;
91663 +       size_t req;
91664 +       int err;
91665 +       void *buf;
91667 +       /* allocate PATH_MAX bytes */
91668 +       buf = __getname();
91669 +       if (!buf)
91670 +               return ERR_PTR(-ENOMEM);
91672 +       /* Possible values of 'type' was already checked above */
91673 +       if (type == ACL_TYPE_ACCESS) {
91674 +               name = XATTR_NAME_POSIX_ACL_ACCESS;
91675 +               name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
91676 +       } else {
91677 +               name = XATTR_NAME_POSIX_ACL_DEFAULT;
91678 +               name_len = sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1;
91679 +       }
91681 +       if (!locked)
91682 +               ni_lock(ni);
91684 +       err = ntfs_get_ea(inode, name, name_len, buf, PATH_MAX, &req);
91686 +       if (!locked)
91687 +               ni_unlock(ni);
91689 +       /* Translate extended attribute to acl */
91690 +       if (err > 0) {
91691 +               acl = posix_acl_from_xattr(mnt_userns, buf, err);
91692 +               if (!IS_ERR(acl))
91693 +                       set_cached_acl(inode, type, acl);
91694 +       } else {
91695 +               acl = err == -ENODATA ? NULL : ERR_PTR(err);
91696 +       }
91698 +       __putname(buf);
91700 +       return acl;
91704 + * ntfs_get_acl
91705 + *
91706 + * inode_operations::get_acl
91707 + */
91708 +struct posix_acl *ntfs_get_acl(struct inode *inode, int type)
91710 +       /* TODO: init_user_ns? */
91711 +       return ntfs_get_acl_ex(&init_user_ns, inode, type, 0);
91714 +static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
91715 +                                   struct inode *inode, struct posix_acl *acl,
91716 +                                   int type, int locked)
91718 +       const char *name;
91719 +       size_t size, name_len;
91720 +       void *value = NULL;
91721 +       int err = 0;
91723 +       if (S_ISLNK(inode->i_mode))
91724 +               return -EOPNOTSUPP;
91726 +       switch (type) {
91727 +       case ACL_TYPE_ACCESS:
91728 +               if (acl) {
91729 +                       umode_t mode = inode->i_mode;
91731 +                       err = posix_acl_equiv_mode(acl, &mode);
91732 +                       if (err < 0)
91733 +                               return err;
91735 +                       if (inode->i_mode != mode) {
91736 +                               inode->i_mode = mode;
91737 +                               mark_inode_dirty(inode);
91738 +                       }
91740 +                       if (!err) {
91741 +                               /*
91742 +                                * acl can be exactly represented in the
91743 +                                * traditional file mode permission bits
91744 +                                */
91745 +                               acl = NULL;
91746 +                               goto out;
91747 +                       }
91748 +               }
91749 +               name = XATTR_NAME_POSIX_ACL_ACCESS;
91750 +               name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
91751 +               break;
91753 +       case ACL_TYPE_DEFAULT:
91754 +               if (!S_ISDIR(inode->i_mode))
91755 +                       return acl ? -EACCES : 0;
91756 +               name = XATTR_NAME_POSIX_ACL_DEFAULT;
91757 +               name_len = sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1;
91758 +               break;
91760 +       default:
91761 +               return -EINVAL;
91762 +       }
91764 +       if (!acl)
91765 +               goto out;
91767 +       size = posix_acl_xattr_size(acl->a_count);
91768 +       value = ntfs_malloc(size);
91769 +       if (!value)
91770 +               return -ENOMEM;
91772 +       err = posix_acl_to_xattr(mnt_userns, acl, value, size);
91773 +       if (err)
91774 +               goto out;
91776 +       err = ntfs_set_ea(inode, name, name_len, value, size, 0, locked);
91777 +       if (err)
91778 +               goto out;
91780 +       inode->i_flags &= ~S_NOSEC;
91782 +out:
91783 +       if (!err)
91784 +               set_cached_acl(inode, type, acl);
91786 +       kfree(value);
91788 +       return err;
91792 + * ntfs_set_acl
91793 + *
91794 + * inode_operations::set_acl
91795 + */
91796 +int ntfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
91797 +                struct posix_acl *acl, int type)
91799 +       return ntfs_set_acl_ex(mnt_userns, inode, acl, type, 0);
91802 +static int ntfs_xattr_get_acl(struct user_namespace *mnt_userns,
91803 +                             struct inode *inode, int type, void *buffer,
91804 +                             size_t size)
91806 +       struct posix_acl *acl;
91807 +       int err;
91809 +       if (!(inode->i_sb->s_flags & SB_POSIXACL))
91810 +               return -EOPNOTSUPP;
91812 +       acl = ntfs_get_acl(inode, type);
91813 +       if (IS_ERR(acl))
91814 +               return PTR_ERR(acl);
91816 +       if (!acl)
91817 +               return -ENODATA;
91819 +       err = posix_acl_to_xattr(mnt_userns, acl, buffer, size);
91820 +       ntfs_posix_acl_release(acl);
91822 +       return err;
91825 +static int ntfs_xattr_set_acl(struct user_namespace *mnt_userns,
91826 +                             struct inode *inode, int type, const void *value,
91827 +                             size_t size)
91829 +       struct posix_acl *acl;
91830 +       int err;
91832 +       if (!(inode->i_sb->s_flags & SB_POSIXACL))
91833 +               return -EOPNOTSUPP;
91835 +       if (!inode_owner_or_capable(mnt_userns, inode))
91836 +               return -EPERM;
91838 +       if (!value)
91839 +               return 0;
91841 +       acl = posix_acl_from_xattr(mnt_userns, value, size);
91842 +       if (IS_ERR(acl))
91843 +               return PTR_ERR(acl);
91845 +       if (acl) {
91846 +               err = posix_acl_valid(mnt_userns, acl);
91847 +               if (err)
91848 +                       goto release_and_out;
91849 +       }
91851 +       err = ntfs_set_acl(mnt_userns, inode, acl, type);
91853 +release_and_out:
91854 +       ntfs_posix_acl_release(acl);
91855 +       return err;
91859 + * Initialize the ACLs of a new inode. Called from ntfs_create_inode.
91860 + */
91861 +int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
91862 +                 struct inode *dir)
91864 +       struct posix_acl *default_acl, *acl;
91865 +       int err;
91867 +       /*
91868 +        * TODO refactoring lock
91869 +        * ni_lock(dir) ... -> posix_acl_create(dir,...) -> ntfs_get_acl -> ni_lock(dir)
91870 +        */
91871 +       inode->i_default_acl = NULL;
91873 +       default_acl = ntfs_get_acl_ex(mnt_userns, dir, ACL_TYPE_DEFAULT, 1);
91875 +       if (!default_acl || default_acl == ERR_PTR(-EOPNOTSUPP)) {
91876 +               inode->i_mode &= ~current_umask();
91877 +               err = 0;
91878 +               goto out;
91879 +       }
91881 +       if (IS_ERR(default_acl)) {
91882 +               err = PTR_ERR(default_acl);
91883 +               goto out;
91884 +       }
91886 +       acl = default_acl;
91887 +       err = __posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
91888 +       if (err < 0)
91889 +               goto out1;
91890 +       if (!err) {
91891 +               posix_acl_release(acl);
91892 +               acl = NULL;
91893 +       }
91895 +       if (!S_ISDIR(inode->i_mode)) {
91896 +               posix_acl_release(default_acl);
91897 +               default_acl = NULL;
91898 +       }
91900 +       if (default_acl)
91901 +               err = ntfs_set_acl_ex(mnt_userns, inode, default_acl,
91902 +                                     ACL_TYPE_DEFAULT, 1);
91904 +       if (!acl)
91905 +               inode->i_acl = NULL;
91906 +       else if (!err)
91907 +               err = ntfs_set_acl_ex(mnt_userns, inode, acl, ACL_TYPE_ACCESS,
91908 +                                     1);
91910 +       posix_acl_release(acl);
91911 +out1:
91912 +       posix_acl_release(default_acl);
91914 +out:
91915 +       return err;
91917 +#endif
91920 + * ntfs_acl_chmod
91921 + *
91922 + * helper for 'ntfs3_setattr'
91923 + */
91924 +int ntfs_acl_chmod(struct user_namespace *mnt_userns, struct inode *inode)
91926 +       struct super_block *sb = inode->i_sb;
91928 +       if (!(sb->s_flags & SB_POSIXACL))
91929 +               return 0;
91931 +       if (S_ISLNK(inode->i_mode))
91932 +               return -EOPNOTSUPP;
91934 +       return posix_acl_chmod(mnt_userns, inode, inode->i_mode);
91938 + * ntfs_permission
91939 + *
91940 + * inode_operations::permission
91941 + */
91942 +int ntfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
91943 +                   int mask)
91945 +       if (ntfs_sb(inode->i_sb)->options.no_acs_rules) {
91946 +               /* "no access rules" mode - allow all changes */
91947 +               return 0;
91948 +       }
91950 +       return generic_permission(mnt_userns, inode, mask);
91954 + * ntfs_listxattr
91955 + *
91956 + * inode_operations::listxattr
91957 + */
91958 +ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
91960 +       struct inode *inode = d_inode(dentry);
91961 +       struct ntfs_inode *ni = ntfs_i(inode);
91962 +       ssize_t ret;
91964 +       if (!(ni->ni_flags & NI_FLAG_EA)) {
91965 +               /* no xattr in file */
91966 +               return 0;
91967 +       }
91969 +       ni_lock(ni);
91971 +       ret = ntfs_list_ea(ni, buffer, size);
91973 +       ni_unlock(ni);
91975 +       return ret;
91978 +static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
91979 +                        struct inode *inode, const char *name, void *buffer,
91980 +                        size_t size)
91982 +       int err;
91983 +       struct ntfs_inode *ni = ntfs_i(inode);
91984 +       size_t name_len = strlen(name);
91986 +       /* Dispatch request */
91987 +       if (name_len == sizeof(SYSTEM_DOS_ATTRIB) - 1 &&
91988 +           !memcmp(name, SYSTEM_DOS_ATTRIB, sizeof(SYSTEM_DOS_ATTRIB))) {
91989 +               /* system.dos_attrib */
91990 +               if (!buffer) {
91991 +                       err = sizeof(u8);
91992 +               } else if (size < sizeof(u8)) {
91993 +                       err = -ENODATA;
91994 +               } else {
91995 +                       err = sizeof(u8);
91996 +                       *(u8 *)buffer = le32_to_cpu(ni->std_fa);
91997 +               }
91998 +               goto out;
91999 +       }
92001 +       if (name_len == sizeof(SYSTEM_NTFS_ATTRIB) - 1 &&
92002 +           !memcmp(name, SYSTEM_NTFS_ATTRIB, sizeof(SYSTEM_NTFS_ATTRIB))) {
92003 +               /* system.ntfs_attrib */
92004 +               if (!buffer) {
92005 +                       err = sizeof(u32);
92006 +               } else if (size < sizeof(u32)) {
92007 +                       err = -ENODATA;
92008 +               } else {
92009 +                       err = sizeof(u32);
92010 +                       *(u32 *)buffer = le32_to_cpu(ni->std_fa);
92011 +               }
92012 +               goto out;
92013 +       }
92015 +       if (name_len == sizeof(SYSTEM_NTFS_SECURITY) - 1 &&
92016 +           !memcmp(name, SYSTEM_NTFS_SECURITY, sizeof(SYSTEM_NTFS_SECURITY))) {
92017 +               /* system.ntfs_security*/
92018 +               struct SECURITY_DESCRIPTOR_RELATIVE *sd = NULL;
92019 +               size_t sd_size = 0;
92021 +               if (!is_ntfs3(ni->mi.sbi)) {
92022 +                       /* we should get nt4 security */
92023 +                       err = -EINVAL;
92024 +                       goto out;
92025 +               } else if (le32_to_cpu(ni->std_security_id) <
92026 +                          SECURITY_ID_FIRST) {
92027 +                       err = -ENOENT;
92028 +                       goto out;
92029 +               }
92031 +               err = ntfs_get_security_by_id(ni->mi.sbi, ni->std_security_id,
92032 +                                             &sd, &sd_size);
92033 +               if (err)
92034 +                       goto out;
92036 +               if (!is_sd_valid(sd, sd_size)) {
92037 +                       ntfs_inode_warn(
92038 +                               inode,
92039 +                               "looks like you get incorrect security descriptor id=%u",
92040 +                               ni->std_security_id);
92041 +               }
92043 +               if (!buffer) {
92044 +                       err = sd_size;
92045 +               } else if (size < sd_size) {
92046 +                       err = -ENODATA;
92047 +               } else {
92048 +                       err = sd_size;
92049 +                       memcpy(buffer, sd, sd_size);
92050 +               }
92051 +               ntfs_free(sd);
92052 +               goto out;
92053 +       }
92055 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
92056 +       if ((name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 &&
92057 +            !memcmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
92058 +                    sizeof(XATTR_NAME_POSIX_ACL_ACCESS))) ||
92059 +           (name_len == sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 &&
92060 +            !memcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
92061 +                    sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)))) {
92062 +               /* TODO: init_user_ns? */
92063 +               err = ntfs_xattr_get_acl(
92064 +                       &init_user_ns, inode,
92065 +                       name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
92066 +                               ? ACL_TYPE_ACCESS
92067 +                               : ACL_TYPE_DEFAULT,
92068 +                       buffer, size);
92069 +               goto out;
92070 +       }
92071 +#endif
92072 +       /* deal with ntfs extended attribute */
92073 +       err = ntfs_get_ea(inode, name, name_len, buffer, size, NULL);
92075 +out:
92076 +       return err;
92080 + * ntfs_setxattr
92081 + *
92082 + * inode_operations::setxattr
92083 + */
92084 +static noinline int ntfs_setxattr(const struct xattr_handler *handler,
92085 +                                 struct user_namespace *mnt_userns,
92086 +                                 struct dentry *de, struct inode *inode,
92087 +                                 const char *name, const void *value,
92088 +                                 size_t size, int flags)
92090 +       int err = -EINVAL;
92091 +       struct ntfs_inode *ni = ntfs_i(inode);
92092 +       size_t name_len = strlen(name);
92093 +       enum FILE_ATTRIBUTE new_fa;
92095 +       /* Dispatch request */
92096 +       if (name_len == sizeof(SYSTEM_DOS_ATTRIB) - 1 &&
92097 +           !memcmp(name, SYSTEM_DOS_ATTRIB, sizeof(SYSTEM_DOS_ATTRIB))) {
92098 +               if (sizeof(u8) != size)
92099 +                       goto out;
92100 +               new_fa = cpu_to_le32(*(u8 *)value);
92101 +               goto set_new_fa;
92102 +       }
92104 +       if (name_len == sizeof(SYSTEM_NTFS_ATTRIB) - 1 &&
92105 +           !memcmp(name, SYSTEM_NTFS_ATTRIB, sizeof(SYSTEM_NTFS_ATTRIB))) {
92106 +               if (size != sizeof(u32))
92107 +                       goto out;
92108 +               new_fa = cpu_to_le32(*(u32 *)value);
92110 +               if (S_ISREG(inode->i_mode)) {
92111 +                       /* Process compressed/sparsed in special way*/
92112 +                       ni_lock(ni);
92113 +                       err = ni_new_attr_flags(ni, new_fa);
92114 +                       ni_unlock(ni);
92115 +                       if (err)
92116 +                               goto out;
92117 +               }
92118 +set_new_fa:
92119 +               /*
92120 +                * Thanks Mark Harmstone:
92121 +                * keep directory bit consistency
92122 +                */
92123 +               if (S_ISDIR(inode->i_mode))
92124 +                       new_fa |= FILE_ATTRIBUTE_DIRECTORY;
92125 +               else
92126 +                       new_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
92128 +               if (ni->std_fa != new_fa) {
92129 +                       ni->std_fa = new_fa;
92130 +                       if (new_fa & FILE_ATTRIBUTE_READONLY)
92131 +                               inode->i_mode &= ~0222;
92132 +                       else
92133 +                               inode->i_mode |= 0222;
92134 +                       /* std attribute always in primary record */
92135 +                       ni->mi.dirty = true;
92136 +                       mark_inode_dirty(inode);
92137 +               }
92138 +               err = 0;
92140 +               goto out;
92141 +       }
92143 +       if (name_len == sizeof(SYSTEM_NTFS_SECURITY) - 1 &&
92144 +           !memcmp(name, SYSTEM_NTFS_SECURITY, sizeof(SYSTEM_NTFS_SECURITY))) {
92145 +               /* system.ntfs_security*/
92146 +               __le32 security_id;
92147 +               bool inserted;
92148 +               struct ATTR_STD_INFO5 *std;
92150 +               if (!is_ntfs3(ni->mi.sbi)) {
92151 +                       /*
92152 +                        * we should replace ATTR_SECURE
92153 +                        * Skip this way cause it is nt4 feature
92154 +                        */
92155 +                       err = -EINVAL;
92156 +                       goto out;
92157 +               }
92159 +               if (!is_sd_valid(value, size)) {
92160 +                       err = -EINVAL;
92161 +                       ntfs_inode_warn(
92162 +                               inode,
92163 +                               "you try to set invalid security descriptor");
92164 +                       goto out;
92165 +               }
92167 +               err = ntfs_insert_security(ni->mi.sbi, value, size,
92168 +                                          &security_id, &inserted);
92169 +               if (err)
92170 +                       goto out;
92172 +               ni_lock(ni);
92173 +               std = ni_std5(ni);
92174 +               if (!std) {
92175 +                       err = -EINVAL;
92176 +               } else if (std->security_id != security_id) {
92177 +                       std->security_id = ni->std_security_id = security_id;
92178 +                       /* std attribute always in primary record */
92179 +                       ni->mi.dirty = true;
92180 +                       mark_inode_dirty(&ni->vfs_inode);
92181 +               }
92182 +               ni_unlock(ni);
92183 +               goto out;
92184 +       }
92186 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
92187 +       if ((name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 &&
92188 +            !memcmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
92189 +                    sizeof(XATTR_NAME_POSIX_ACL_ACCESS))) ||
92190 +           (name_len == sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 &&
92191 +            !memcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
92192 +                    sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)))) {
92193 +               /* TODO: init_user_ns? */
92194 +               err = ntfs_xattr_set_acl(
92195 +                       &init_user_ns, inode,
92196 +                       name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
92197 +                               ? ACL_TYPE_ACCESS
92198 +                               : ACL_TYPE_DEFAULT,
92199 +                       value, size);
92200 +               goto out;
92201 +       }
92202 +#endif
92203 +       /* deal with ntfs extended attribute */
92204 +       err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0);
92206 +out:
92207 +       return err;
92210 +static bool ntfs_xattr_user_list(struct dentry *dentry)
92212 +       return true;
92215 +static const struct xattr_handler ntfs_xattr_handler = {
92216 +       .prefix = "",
92217 +       .get = ntfs_getxattr,
92218 +       .set = ntfs_setxattr,
92219 +       .list = ntfs_xattr_user_list,
92222 +const struct xattr_handler *ntfs_xattr_handlers[] = {
92223 +       &ntfs_xattr_handler,
92224 +       NULL,
92226 diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
92227 index 0b2891c6c71e..2846b943e80c 100644
92228 --- a/fs/overlayfs/copy_up.c
92229 +++ b/fs/overlayfs/copy_up.c
92230 @@ -932,7 +932,7 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
92231  static int ovl_copy_up_flags(struct dentry *dentry, int flags)
92233         int err = 0;
92234 -       const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
92235 +       const struct cred *old_cred;
92236         bool disconnected = (dentry->d_flags & DCACHE_DISCONNECTED);
92238         /*
92239 @@ -943,6 +943,7 @@ static int ovl_copy_up_flags(struct dentry *dentry, int flags)
92240         if (WARN_ON(disconnected && d_is_dir(dentry)))
92241                 return -EIO;
92243 +       old_cred = ovl_override_creds(dentry->d_sb);
92244         while (!err) {
92245                 struct dentry *next;
92246                 struct dentry *parent = NULL;
92247 diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
92248 index 3fe05fb5d145..71e264e2f16b 100644
92249 --- a/fs/overlayfs/namei.c
92250 +++ b/fs/overlayfs/namei.c
92251 @@ -919,6 +919,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
92252                         continue;
92254                 if ((uppermetacopy || d.metacopy) && !ofs->config.metacopy) {
92255 +                       dput(this);
92256                         err = -EPERM;
92257                         pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n", dentry);
92258                         goto out_put;
92259 diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
92260 index 95cff83786a5..2322f854533c 100644
92261 --- a/fs/overlayfs/overlayfs.h
92262 +++ b/fs/overlayfs/overlayfs.h
92263 @@ -319,9 +319,6 @@ int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
92264                        enum ovl_xattr ox, const void *value, size_t size,
92265                        int xerr);
92266  int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry);
92267 -void ovl_set_flag(unsigned long flag, struct inode *inode);
92268 -void ovl_clear_flag(unsigned long flag, struct inode *inode);
92269 -bool ovl_test_flag(unsigned long flag, struct inode *inode);
92270  bool ovl_inuse_trylock(struct dentry *dentry);
92271  void ovl_inuse_unlock(struct dentry *dentry);
92272  bool ovl_is_inuse(struct dentry *dentry);
92273 @@ -335,6 +332,21 @@ char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
92274                              int padding);
92275  int ovl_sync_status(struct ovl_fs *ofs);
92277 +static inline void ovl_set_flag(unsigned long flag, struct inode *inode)
92279 +       set_bit(flag, &OVL_I(inode)->flags);
92282 +static inline void ovl_clear_flag(unsigned long flag, struct inode *inode)
92284 +       clear_bit(flag, &OVL_I(inode)->flags);
92287 +static inline bool ovl_test_flag(unsigned long flag, struct inode *inode)
92289 +       return test_bit(flag, &OVL_I(inode)->flags);
92292  static inline bool ovl_is_impuredir(struct super_block *sb,
92293                                     struct dentry *dentry)
92295 @@ -439,6 +451,18 @@ int ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
92296                         struct dentry *dentry, int level);
92297  int ovl_indexdir_cleanup(struct ovl_fs *ofs);
92300 + * Can we iterate real dir directly?
92301 + *
92302 + * Non-merge dir may contain whiteouts from a time it was a merge upper, before
92303 + * lower dir was removed under it and possibly before it was rotated from upper
92304 + * to lower layer.
92305 + */
92306 +static inline bool ovl_dir_is_real(struct dentry *dir)
92308 +       return !ovl_test_flag(OVL_WHITEOUTS, d_inode(dir));
92311  /* inode.c */
92312  int ovl_set_nlink_upper(struct dentry *dentry);
92313  int ovl_set_nlink_lower(struct dentry *dentry);
92314 diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
92315 index f404a78e6b60..cc1e80257064 100644
92316 --- a/fs/overlayfs/readdir.c
92317 +++ b/fs/overlayfs/readdir.c
92318 @@ -319,18 +319,6 @@ static inline int ovl_dir_read(struct path *realpath,
92319         return err;
92323 - * Can we iterate real dir directly?
92324 - *
92325 - * Non-merge dir may contain whiteouts from a time it was a merge upper, before
92326 - * lower dir was removed under it and possibly before it was rotated from upper
92327 - * to lower layer.
92328 - */
92329 -static bool ovl_dir_is_real(struct dentry *dir)
92331 -       return !ovl_test_flag(OVL_WHITEOUTS, d_inode(dir));
92334  static void ovl_dir_reset(struct file *file)
92336         struct ovl_dir_file *od = file->private_data;
92337 diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
92338 index fdd72f1a9c5e..787ce7c38fba 100644
92339 --- a/fs/overlayfs/super.c
92340 +++ b/fs/overlayfs/super.c
92341 @@ -380,6 +380,8 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
92342                            ofs->config.metacopy ? "on" : "off");
92343         if (ofs->config.ovl_volatile)
92344                 seq_puts(m, ",volatile");
92345 +       if (ofs->config.userxattr)
92346 +               seq_puts(m, ",userxattr");
92347         return 0;
92350 @@ -1826,7 +1828,8 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
92351   * - upper/work dir of any overlayfs instance
92352   */
92353  static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
92354 -                          struct dentry *dentry, const char *name)
92355 +                          struct dentry *dentry, const char *name,
92356 +                          bool is_lower)
92358         struct dentry *next = dentry, *parent;
92359         int err = 0;
92360 @@ -1838,7 +1841,7 @@ static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
92362         /* Walk back ancestors to root (inclusive) looking for traps */
92363         while (!err && parent != next) {
92364 -               if (ovl_lookup_trap_inode(sb, parent)) {
92365 +               if (is_lower && ovl_lookup_trap_inode(sb, parent)) {
92366                         err = -ELOOP;
92367                         pr_err("overlapping %s path\n", name);
92368                 } else if (ovl_is_inuse(parent)) {
92369 @@ -1864,7 +1867,7 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
92371         if (ovl_upper_mnt(ofs)) {
92372                 err = ovl_check_layer(sb, ofs, ovl_upper_mnt(ofs)->mnt_root,
92373 -                                     "upperdir");
92374 +                                     "upperdir", false);
92375                 if (err)
92376                         return err;
92378 @@ -1875,7 +1878,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
92379                  * workbasedir.  In that case, we already have their traps in
92380                  * inode cache and we will catch that case on lookup.
92381                  */
92382 -               err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir");
92383 +               err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir",
92384 +                                     false);
92385                 if (err)
92386                         return err;
92387         }
92388 @@ -1883,7 +1887,7 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
92389         for (i = 1; i < ofs->numlayer; i++) {
92390                 err = ovl_check_layer(sb, ofs,
92391                                       ofs->layers[i].mnt->mnt_root,
92392 -                                     "lowerdir");
92393 +                                     "lowerdir", true);
92394                 if (err)
92395                         return err;
92396         }
92397 diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
92398 index 7f5a01a11f97..404a0a32ddf6 100644
92399 --- a/fs/overlayfs/util.c
92400 +++ b/fs/overlayfs/util.c
92401 @@ -422,18 +422,20 @@ void ovl_inode_update(struct inode *inode, struct dentry *upperdentry)
92402         }
92405 -static void ovl_dentry_version_inc(struct dentry *dentry, bool impurity)
92406 +static void ovl_dir_version_inc(struct dentry *dentry, bool impurity)
92408         struct inode *inode = d_inode(dentry);
92410         WARN_ON(!inode_is_locked(inode));
92411 +       WARN_ON(!d_is_dir(dentry));
92412         /*
92413 -        * Version is used by readdir code to keep cache consistent.  For merge
92414 -        * dirs all changes need to be noted.  For non-merge dirs, cache only
92415 -        * contains impure (ones which have been copied up and have origins)
92416 -        * entries, so only need to note changes to impure entries.
92417 +        * Version is used by readdir code to keep cache consistent.
92418 +        * For merge dirs (or dirs with origin) all changes need to be noted.
92419 +        * For non-merge dirs, cache contains only impure entries (i.e. ones
92420 +        * which have been copied up and have origins), so only need to note
92421 +        * changes to impure entries.
92422          */
92423 -       if (OVL_TYPE_MERGE(ovl_path_type(dentry)) || impurity)
92424 +       if (!ovl_dir_is_real(dentry) || impurity)
92425                 OVL_I(inode)->version++;
92428 @@ -442,7 +444,7 @@ void ovl_dir_modified(struct dentry *dentry, bool impurity)
92429         /* Copy mtime/ctime */
92430         ovl_copyattr(d_inode(ovl_dentry_upper(dentry)), d_inode(dentry));
92432 -       ovl_dentry_version_inc(dentry, impurity);
92433 +       ovl_dir_version_inc(dentry, impurity);
92436  u64 ovl_dentry_version_get(struct dentry *dentry)
92437 @@ -638,21 +640,6 @@ int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry)
92438         return err;
92441 -void ovl_set_flag(unsigned long flag, struct inode *inode)
92443 -       set_bit(flag, &OVL_I(inode)->flags);
92446 -void ovl_clear_flag(unsigned long flag, struct inode *inode)
92448 -       clear_bit(flag, &OVL_I(inode)->flags);
92451 -bool ovl_test_flag(unsigned long flag, struct inode *inode)
92453 -       return test_bit(flag, &OVL_I(inode)->flags);
92456  /**
92457   * Caller must hold a reference to inode to prevent it from being freed while
92458   * it is marked inuse.
92459 diff --git a/fs/proc/array.c b/fs/proc/array.c
92460 index bb87e4d89cd8..7ec59171f197 100644
92461 --- a/fs/proc/array.c
92462 +++ b/fs/proc/array.c
92463 @@ -342,8 +342,10 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
92464         seq_put_decimal_ull(m, "NoNewPrivs:\t", task_no_new_privs(p));
92465  #ifdef CONFIG_SECCOMP
92466         seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
92467 +#ifdef CONFIG_SECCOMP_FILTER
92468         seq_put_decimal_ull(m, "\nSeccomp_filters:\t",
92469                             atomic_read(&p->seccomp.filter_count));
92470 +#endif
92471  #endif
92472         seq_puts(m, "\nSpeculation_Store_Bypass:\t");
92473         switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
92474 diff --git a/fs/proc/generic.c b/fs/proc/generic.c
92475 index bc86aa87cc41..5600da30e289 100644
92476 --- a/fs/proc/generic.c
92477 +++ b/fs/proc/generic.c
92478 @@ -756,7 +756,7 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
92479         while (1) {
92480                 next = pde_subdir_first(de);
92481                 if (next) {
92482 -                       if (unlikely(pde_is_permanent(root))) {
92483 +                       if (unlikely(pde_is_permanent(next))) {
92484                                 write_unlock(&proc_subdir_lock);
92485                                 WARN(1, "removing permanent /proc entry '%s/%s'",
92486                                         next->parent->name, next->name);
92487 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
92488 index e862cab69583..d292f20c4e3d 100644
92489 --- a/fs/proc/task_mmu.c
92490 +++ b/fs/proc/task_mmu.c
92491 @@ -19,6 +19,7 @@
92492  #include <linux/shmem_fs.h>
92493  #include <linux/uaccess.h>
92494  #include <linux/pkeys.h>
92495 +#include <linux/mm_inline.h>
92497  #include <asm/elf.h>
92498  #include <asm/tlb.h>
92499 @@ -1718,7 +1719,7 @@ static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
92500         if (PageSwapCache(page))
92501                 md->swapcache += nr_pages;
92503 -       if (PageActive(page) || PageUnevictable(page))
92504 +       if (PageUnevictable(page) || page_is_active(compound_head(page), NULL))
92505                 md->active += nr_pages;
92507         if (PageWriteback(page))
92508 diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
92509 index d963ae7902f9..67b194ba1b03 100644
92510 --- a/fs/pstore/platform.c
92511 +++ b/fs/pstore/platform.c
92512 @@ -218,7 +218,7 @@ static int zbufsize_842(size_t size)
92513  #if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
92514  static int zbufsize_zstd(size_t size)
92516 -       return ZSTD_compressBound(size);
92517 +       return zstd_compress_bound(size);
92519  #endif
92521 diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
92522 index 7b1128398976..89d492916dea 100644
92523 --- a/fs/squashfs/file.c
92524 +++ b/fs/squashfs/file.c
92525 @@ -211,11 +211,11 @@ static long long read_indexes(struct super_block *sb, int n,
92526   * If the skip factor is limited in this way then the file will use multiple
92527   * slots.
92528   */
92529 -static inline int calculate_skip(int blocks)
92530 +static inline int calculate_skip(u64 blocks)
92532 -       int skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
92533 +       u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
92534                  * SQUASHFS_META_INDEXES);
92535 -       return min(SQUASHFS_CACHED_BLKS - 1, skip + 1);
92536 +       return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
92540 diff --git a/fs/squashfs/zstd_wrapper.c b/fs/squashfs/zstd_wrapper.c
92541 index b7cb1faa652d..6967c0aae801 100644
92542 --- a/fs/squashfs/zstd_wrapper.c
92543 +++ b/fs/squashfs/zstd_wrapper.c
92544 @@ -34,7 +34,7 @@ static void *zstd_init(struct squashfs_sb_info *msblk, void *buff)
92545                 goto failed;
92546         wksp->window_size = max_t(size_t,
92547                         msblk->block_size, SQUASHFS_METADATA_SIZE);
92548 -       wksp->mem_size = ZSTD_DStreamWorkspaceBound(wksp->window_size);
92549 +       wksp->mem_size = zstd_dstream_workspace_bound(wksp->window_size);
92550         wksp->mem = vmalloc(wksp->mem_size);
92551         if (wksp->mem == NULL)
92552                 goto failed;
92553 @@ -63,15 +63,15 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
92554         struct squashfs_page_actor *output)
92556         struct workspace *wksp = strm;
92557 -       ZSTD_DStream *stream;
92558 +       zstd_dstream *stream;
92559         size_t total_out = 0;
92560         int error = 0;
92561 -       ZSTD_inBuffer in_buf = { NULL, 0, 0 };
92562 -       ZSTD_outBuffer out_buf = { NULL, 0, 0 };
92563 +       zstd_in_buffer in_buf = { NULL, 0, 0 };
92564 +       zstd_out_buffer out_buf = { NULL, 0, 0 };
92565         struct bvec_iter_all iter_all = {};
92566         struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
92568 -       stream = ZSTD_initDStream(wksp->window_size, wksp->mem, wksp->mem_size);
92569 +       stream = zstd_init_dstream(wksp->window_size, wksp->mem, wksp->mem_size);
92571         if (!stream) {
92572                 ERROR("Failed to initialize zstd decompressor\n");
92573 @@ -116,14 +116,14 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
92574                 }
92576                 total_out -= out_buf.pos;
92577 -               zstd_err = ZSTD_decompressStream(stream, &out_buf, &in_buf);
92578 +               zstd_err = zstd_decompress_stream(stream, &out_buf, &in_buf);
92579                 total_out += out_buf.pos; /* add the additional data produced */
92580                 if (zstd_err == 0)
92581                         break;
92583 -               if (ZSTD_isError(zstd_err)) {
92584 +               if (zstd_is_error(zstd_err)) {
92585                         ERROR("zstd decompression error: %d\n",
92586 -                                       (int)ZSTD_getErrorCode(zstd_err));
92587 +                                       (int)zstd_get_error_code(zstd_err));
92588                         error = -EIO;
92589                         break;
92590                 }
92591 diff --git a/fs/stat.c b/fs/stat.c
92592 index fbc171d038aa..1fa38bdec1a6 100644
92593 --- a/fs/stat.c
92594 +++ b/fs/stat.c
92595 @@ -86,12 +86,20 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
92596         /* SB_NOATIME means filesystem supplies dummy atime value */
92597         if (inode->i_sb->s_flags & SB_NOATIME)
92598                 stat->result_mask &= ~STATX_ATIME;
92600 +       /*
92601 +        * Note: If you add another clause to set an attribute flag, please
92602 +        * update attributes_mask below.
92603 +        */
92604         if (IS_AUTOMOUNT(inode))
92605                 stat->attributes |= STATX_ATTR_AUTOMOUNT;
92607         if (IS_DAX(inode))
92608                 stat->attributes |= STATX_ATTR_DAX;
92610 +       stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT |
92611 +                                 STATX_ATTR_DAX);
92613         mnt_userns = mnt_user_ns(path->mnt);
92614         if (inode->i_op->getattr)
92615                 return inode->i_op->getattr(mnt_userns, path, stat,
92616 diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
92617 index 0f8a6a16421b..1929ec63a0cb 100644
92618 --- a/fs/ubifs/replay.c
92619 +++ b/fs/ubifs/replay.c
92620 @@ -223,7 +223,8 @@ static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino)
92621          */
92622         list_for_each_entry_reverse(r, &c->replay_list, list) {
92623                 ubifs_assert(c, r->sqnum >= rino->sqnum);
92624 -               if (key_inum(c, &r->key) == key_inum(c, &rino->key))
92625 +               if (key_inum(c, &r->key) == key_inum(c, &rino->key) &&
92626 +                   key_type(c, &r->key) == UBIFS_INO_KEY)
92627                         return r->deletion == 0;
92629         }
92630 diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
92631 index 472b3039eabb..902e5f7e6642 100644
92632 --- a/fs/xfs/libxfs/xfs_attr.c
92633 +++ b/fs/xfs/libxfs/xfs_attr.c
92634 @@ -928,6 +928,7 @@ xfs_attr_node_addname(
92635          * Search to see if name already exists, and get back a pointer
92636          * to where it should go.
92637          */
92638 +       error = 0;
92639         retval = xfs_attr_node_hasname(args, &state);
92640         if (retval != -ENOATTR && retval != -EEXIST)
92641                 goto out;
92642 diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
92643 index fcde59c65a81..cb3d6b1c655d 100644
92644 --- a/include/crypto/acompress.h
92645 +++ b/include/crypto/acompress.h
92646 @@ -165,6 +165,8 @@ static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
92647   * crypto_free_acomp() -- free ACOMPRESS tfm handle
92648   *
92649   * @tfm:       ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
92650 + *
92651 + * If @tfm is a NULL or error pointer, this function does nothing.
92652   */
92653  static inline void crypto_free_acomp(struct crypto_acomp *tfm)
92655 diff --git a/include/crypto/aead.h b/include/crypto/aead.h
92656 index fcc12c593ef8..e728469c4ccc 100644
92657 --- a/include/crypto/aead.h
92658 +++ b/include/crypto/aead.h
92659 @@ -185,6 +185,8 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
92660  /**
92661   * crypto_free_aead() - zeroize and free aead handle
92662   * @tfm: cipher handle to be freed
92663 + *
92664 + * If @tfm is a NULL or error pointer, this function does nothing.
92665   */
92666  static inline void crypto_free_aead(struct crypto_aead *tfm)
92668 diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
92669 index 1d3aa252caba..5764b46bd1ec 100644
92670 --- a/include/crypto/akcipher.h
92671 +++ b/include/crypto/akcipher.h
92672 @@ -174,6 +174,8 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm(
92673   * crypto_free_akcipher() - free AKCIPHER tfm handle
92674   *
92675   * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
92676 + *
92677 + * If @tfm is a NULL or error pointer, this function does nothing.
92678   */
92679  static inline void crypto_free_akcipher(struct crypto_akcipher *tfm)
92681 diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
92682 index 3a1c72fdb7cf..dabaee698718 100644
92683 --- a/include/crypto/chacha.h
92684 +++ b/include/crypto/chacha.h
92685 @@ -47,13 +47,18 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
92686                 hchacha_block_generic(state, out, nrounds);
92689 -void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
92690 -static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
92691 +static inline void chacha_init_consts(u32 *state)
92693         state[0]  = 0x61707865; /* "expa" */
92694         state[1]  = 0x3320646e; /* "nd 3" */
92695         state[2]  = 0x79622d32; /* "2-by" */
92696         state[3]  = 0x6b206574; /* "te k" */
92699 +void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
92700 +static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
92702 +       chacha_init_consts(state);
92703         state[4]  = key[0];
92704         state[5]  = key[1];
92705         state[6]  = key[2];
92706 diff --git a/include/crypto/hash.h b/include/crypto/hash.h
92707 index 13f8a6a54ca8..b2bc1e46e86a 100644
92708 --- a/include/crypto/hash.h
92709 +++ b/include/crypto/hash.h
92710 @@ -281,6 +281,8 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
92711  /**
92712   * crypto_free_ahash() - zeroize and free the ahash handle
92713   * @tfm: cipher handle to be freed
92714 + *
92715 + * If @tfm is a NULL or error pointer, this function does nothing.
92716   */
92717  static inline void crypto_free_ahash(struct crypto_ahash *tfm)
92719 @@ -724,6 +726,8 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
92720  /**
92721   * crypto_free_shash() - zeroize and free the message digest handle
92722   * @tfm: cipher handle to be freed
92723 + *
92724 + * If @tfm is a NULL or error pointer, this function does nothing.
92725   */
92726  static inline void crypto_free_shash(struct crypto_shash *tfm)
92728 diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
92729 index 064e52ca5248..196aa769f296 100644
92730 --- a/include/crypto/internal/poly1305.h
92731 +++ b/include/crypto/internal/poly1305.h
92732 @@ -18,7 +18,8 @@
92733   * only the ε-almost-∆-universal hash function (not the full MAC) is computed.
92734   */
92736 -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key);
92737 +void poly1305_core_setkey(struct poly1305_core_key *key,
92738 +                         const u8 raw_key[POLY1305_BLOCK_SIZE]);
92739  static inline void poly1305_core_init(struct poly1305_state *state)
92741         *state = (struct poly1305_state){};
92742 diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
92743 index 88b591215d5c..cccceadc164b 100644
92744 --- a/include/crypto/kpp.h
92745 +++ b/include/crypto/kpp.h
92746 @@ -154,6 +154,8 @@ static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags)
92747   * crypto_free_kpp() - free KPP tfm handle
92748   *
92749   * @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
92750 + *
92751 + * If @tfm is a NULL or error pointer, this function does nothing.
92752   */
92753  static inline void crypto_free_kpp(struct crypto_kpp *tfm)
92755 diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
92756 index f1f67fc749cf..090692ec3bc7 100644
92757 --- a/include/crypto/poly1305.h
92758 +++ b/include/crypto/poly1305.h
92759 @@ -58,8 +58,10 @@ struct poly1305_desc_ctx {
92760         };
92761  };
92763 -void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key);
92764 -void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key);
92765 +void poly1305_init_arch(struct poly1305_desc_ctx *desc,
92766 +                       const u8 key[POLY1305_KEY_SIZE]);
92767 +void poly1305_init_generic(struct poly1305_desc_ctx *desc,
92768 +                          const u8 key[POLY1305_KEY_SIZE]);
92770  static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key)
92772 diff --git a/include/crypto/rng.h b/include/crypto/rng.h
92773 index 8b4b844b4eef..17bb3673d3c1 100644
92774 --- a/include/crypto/rng.h
92775 +++ b/include/crypto/rng.h
92776 @@ -111,6 +111,8 @@ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
92777  /**
92778   * crypto_free_rng() - zeroize and free RNG handle
92779   * @tfm: cipher handle to be freed
92780 + *
92781 + * If @tfm is a NULL or error pointer, this function does nothing.
92782   */
92783  static inline void crypto_free_rng(struct crypto_rng *tfm)
92785 diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
92786 index 6a733b171a5d..ef0fc9ed4342 100644
92787 --- a/include/crypto/skcipher.h
92788 +++ b/include/crypto/skcipher.h
92789 @@ -196,6 +196,8 @@ static inline struct crypto_tfm *crypto_skcipher_tfm(
92790  /**
92791   * crypto_free_skcipher() - zeroize and free cipher handle
92792   * @tfm: cipher handle to be freed
92793 + *
92794 + * If @tfm is a NULL or error pointer, this function does nothing.
92795   */
92796  static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
92798 diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
92799 index a94c03a61d8f..b2ed3481c6a0 100644
92800 --- a/include/keys/trusted-type.h
92801 +++ b/include/keys/trusted-type.h
92802 @@ -30,6 +30,7 @@ struct trusted_key_options {
92803         uint16_t keytype;
92804         uint32_t keyhandle;
92805         unsigned char keyauth[TPM_DIGEST_SIZE];
92806 +       uint32_t blobauth_len;
92807         unsigned char blobauth[TPM_DIGEST_SIZE];
92808         uint32_t pcrinfo_len;
92809         unsigned char pcrinfo[MAX_PCRINFO_SIZE];
92810 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
92811 index 158aefae1030..40c48e30f1eb 100644
92812 --- a/include/linux/blkdev.h
92813 +++ b/include/linux/blkdev.h
92814 @@ -620,6 +620,7 @@ struct request_queue {
92816  #define QUEUE_FLAG_MQ_DEFAULT  ((1 << QUEUE_FLAG_IO_STAT) |            \
92817                                  (1 << QUEUE_FLAG_SAME_COMP) |          \
92818 +                                (1 << QUEUE_FLAG_SAME_FORCE) |         \
92819                                  (1 << QUEUE_FLAG_NOWAIT))
92821  void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
92822 diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
92823 index 971b33aca13d..99bc82342ca0 100644
92824 --- a/include/linux/bpf_verifier.h
92825 +++ b/include/linux/bpf_verifier.h
92826 @@ -299,10 +299,11 @@ struct bpf_verifier_state_list {
92827  };
92829  /* Possible states for alu_state member. */
92830 -#define BPF_ALU_SANITIZE_SRC           1U
92831 -#define BPF_ALU_SANITIZE_DST           2U
92832 +#define BPF_ALU_SANITIZE_SRC           (1U << 0)
92833 +#define BPF_ALU_SANITIZE_DST           (1U << 1)
92834  #define BPF_ALU_NEG_VALUE              (1U << 2)
92835  #define BPF_ALU_NON_POINTER            (1U << 3)
92836 +#define BPF_ALU_IMMEDIATE              (1U << 4)
92837  #define BPF_ALU_SANITIZE               (BPF_ALU_SANITIZE_SRC | \
92838                                          BPF_ALU_SANITIZE_DST)
92840 diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
92841 index 4f2f79de083e..bd5744360cfa 100644
92842 --- a/include/linux/cgroup.h
92843 +++ b/include/linux/cgroup.h
92844 @@ -432,6 +432,18 @@ static inline void cgroup_put(struct cgroup *cgrp)
92845         css_put(&cgrp->self);
92848 +extern struct mutex cgroup_mutex;
92850 +static inline void cgroup_lock(void)
92852 +       mutex_lock(&cgroup_mutex);
92855 +static inline void cgroup_unlock(void)
92857 +       mutex_unlock(&cgroup_mutex);
92860  /**
92861   * task_css_set_check - obtain a task's css_set with extra access conditions
92862   * @task: the task to obtain css_set for
92863 @@ -446,7 +458,6 @@ static inline void cgroup_put(struct cgroup *cgrp)
92864   * as locks used during the cgroup_subsys::attach() methods.
92865   */
92866  #ifdef CONFIG_PROVE_RCU
92867 -extern struct mutex cgroup_mutex;
92868  extern spinlock_t css_set_lock;
92869  #define task_css_set_check(task, __c)                                  \
92870         rcu_dereference_check((task)->cgroups,                          \
92871 @@ -704,6 +715,8 @@ struct cgroup;
92872  static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
92873  static inline void css_get(struct cgroup_subsys_state *css) {}
92874  static inline void css_put(struct cgroup_subsys_state *css) {}
92875 +static inline void cgroup_lock(void) {}
92876 +static inline void cgroup_unlock(void) {}
92877  static inline int cgroup_attach_task_all(struct task_struct *from,
92878                                          struct task_struct *t) { return 0; }
92879  static inline int cgroupstats_build(struct cgroupstats *stats,
92880 diff --git a/include/linux/compat.h b/include/linux/compat.h
92881 index 6e65be753603..d4c1b402b962 100644
92882 --- a/include/linux/compat.h
92883 +++ b/include/linux/compat.h
92884 @@ -365,6 +365,17 @@ struct compat_robust_list_head {
92885         compat_uptr_t                   list_op_pending;
92886  };
92888 +struct compat_futex_waitv {
92889 +       compat_uptr_t uaddr;
92890 +       compat_uint_t val;
92891 +       compat_uint_t flags;
92894 +struct compat_futex_requeue {
92895 +       compat_uptr_t uaddr;
92896 +       compat_uint_t flags;
92899  #ifdef CONFIG_COMPAT_OLD_SIGACTION
92900  struct compat_old_sigaction {
92901         compat_uptr_t                   sa_handler;
92902 @@ -654,6 +665,18 @@ asmlinkage long
92903  compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
92904                            compat_size_t __user *len_ptr);
92906 +/* kernel/futex2.c */
92907 +asmlinkage long compat_sys_futex_waitv(struct compat_futex_waitv *waiters,
92908 +                                      compat_uint_t nr_futexes, compat_uint_t flags,
92909 +                                      struct __kernel_timespec __user *timo);
92911 +asmlinkage long compat_sys_futex_requeue(struct compat_futex_requeue *uaddr1,
92912 +                                        struct compat_futex_requeue *uaddr2,
92913 +                                        compat_uint_t nr_wake,
92914 +                                        compat_uint_t nr_requeue,
92915 +                                        compat_uint_t cmpval,
92916 +                                        compat_uint_t flags);
92918  /* kernel/itimer.c */
92919  asmlinkage long compat_sys_getitimer(int which,
92920                                      struct old_itimerval32 __user *it);
92921 diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
92922 index f14adb882338..cc7c3fda2aa6 100644
92923 --- a/include/linux/cpuhotplug.h
92924 +++ b/include/linux/cpuhotplug.h
92925 @@ -135,6 +135,7 @@ enum cpuhp_state {
92926         CPUHP_AP_RISCV_TIMER_STARTING,
92927         CPUHP_AP_CLINT_TIMER_STARTING,
92928         CPUHP_AP_CSKY_TIMER_STARTING,
92929 +       CPUHP_AP_TI_GP_TIMER_STARTING,
92930         CPUHP_AP_HYPERV_TIMER_STARTING,
92931         CPUHP_AP_KVM_STARTING,
92932         CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
92933 diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
92934 index 706b68d1359b..13d1f4c14d7b 100644
92935 --- a/include/linux/dma-iommu.h
92936 +++ b/include/linux/dma-iommu.h
92937 @@ -40,6 +40,8 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
92938  void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
92939                 struct iommu_domain *domain);
92941 +extern bool iommu_dma_forcedac;
92943  #else /* CONFIG_IOMMU_DMA */
92945  struct iommu_domain;
92946 diff --git a/include/linux/elevator.h b/include/linux/elevator.h
92947 index 1fe8e105b83b..dcb2f9022c1d 100644
92948 --- a/include/linux/elevator.h
92949 +++ b/include/linux/elevator.h
92950 @@ -34,7 +34,7 @@ struct elevator_mq_ops {
92951         void (*depth_updated)(struct blk_mq_hw_ctx *);
92953         bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
92954 -       bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int);
92955 +       bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int);
92956         int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
92957         void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
92958         void (*requests_merged)(struct request_queue *, struct request *, struct request *);
92959 diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
92960 index 71177b17eee5..66e2423d9feb 100644
92961 --- a/include/linux/firmware/xlnx-zynqmp.h
92962 +++ b/include/linux/firmware/xlnx-zynqmp.h
92963 @@ -354,11 +354,6 @@ int zynqmp_pm_read_pggs(u32 index, u32 *value);
92964  int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype);
92965  int zynqmp_pm_set_boot_health_status(u32 value);
92966  #else
92967 -static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
92969 -       return ERR_PTR(-ENODEV);
92972  static inline int zynqmp_pm_get_api_version(u32 *version)
92974         return -ENODEV;
92975 diff --git a/include/linux/freezer.h b/include/linux/freezer.h
92976 index 27828145ca09..504cc97bf475 100644
92977 --- a/include/linux/freezer.h
92978 +++ b/include/linux/freezer.h
92979 @@ -311,6 +311,7 @@ static inline void set_freezable(void) {}
92980  #define wait_event_freezekillable_unsafe(wq, condition)                        \
92981                 wait_event_killable(wq, condition)
92983 +#define pm_freezing (false)
92984  #endif /* !CONFIG_FREEZER */
92986  #endif /* FREEZER_H_INCLUDED */
92987 diff --git a/include/linux/fs.h b/include/linux/fs.h
92988 index ec8f3ddf4a6a..33683ff94cb3 100644
92989 --- a/include/linux/fs.h
92990 +++ b/include/linux/fs.h
92991 @@ -683,6 +683,7 @@ struct inode {
92992         };
92993         atomic64_t              i_version;
92994         atomic64_t              i_sequence; /* see futex */
92995 +       atomic64_t              i_sequence2; /* see futex2 */
92996         atomic_t                i_count;
92997         atomic_t                i_dio_count;
92998         atomic_t                i_writecount;
92999 diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
93000 index 286de0520574..ecf0032a0995 100644
93001 --- a/include/linux/gpio/driver.h
93002 +++ b/include/linux/gpio/driver.h
93003 @@ -624,8 +624,17 @@ void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
93004  bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
93005                                 unsigned int offset);
93007 +#ifdef CONFIG_GPIOLIB_IRQCHIP
93008  int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
93009                                 struct irq_domain *domain);
93010 +#else
93011 +static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
93012 +                                             struct irq_domain *domain)
93014 +       WARN_ON(1);
93015 +       return -EINVAL;
93017 +#endif
93019  int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset);
93020  void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset);
93021 diff --git a/include/linux/hid.h b/include/linux/hid.h
93022 index ef702b3f56e3..3e33eb14118c 100644
93023 --- a/include/linux/hid.h
93024 +++ b/include/linux/hid.h
93025 @@ -262,6 +262,8 @@ struct hid_item {
93026  #define HID_CP_SELECTION       0x000c0080
93027  #define HID_CP_MEDIASELECTION  0x000c0087
93028  #define HID_CP_SELECTDISC      0x000c00ba
93029 +#define HID_CP_VOLUMEUP                0x000c00e9
93030 +#define HID_CP_VOLUMEDOWN      0x000c00ea
93031  #define HID_CP_PLAYBACKSPEED   0x000c00f1
93032  #define HID_CP_PROXIMITY       0x000c0109
93033  #define HID_CP_SPEAKERSYSTEM   0x000c0160
93034 diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
93035 index ba973efcd369..0ba7b3f9029c 100644
93036 --- a/include/linux/huge_mm.h
93037 +++ b/include/linux/huge_mm.h
93038 @@ -443,6 +443,11 @@ static inline bool is_huge_zero_page(struct page *page)
93039         return false;
93042 +static inline bool is_huge_zero_pmd(pmd_t pmd)
93044 +       return false;
93047  static inline bool is_huge_zero_pud(pud_t pud)
93049         return false;
93050 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
93051 index 56622658b215..a670ae129f4b 100644
93052 --- a/include/linux/i2c.h
93053 +++ b/include/linux/i2c.h
93054 @@ -687,6 +687,8 @@ struct i2c_adapter_quirks {
93055  #define I2C_AQ_NO_ZERO_LEN_READ                BIT(5)
93056  #define I2C_AQ_NO_ZERO_LEN_WRITE       BIT(6)
93057  #define I2C_AQ_NO_ZERO_LEN             (I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE)
93058 +/* adapter cannot do repeated START */
93059 +#define I2C_AQ_NO_REP_START            BIT(7)
93061  /*
93062   * i2c_adapter is the structure used to identify a physical i2c bus along
93063 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
93064 index 1bc46b88711a..d1f32b33415a 100644
93065 --- a/include/linux/intel-iommu.h
93066 +++ b/include/linux/intel-iommu.h
93067 @@ -372,6 +372,7 @@ enum {
93068  /* PASID cache invalidation granu */
93069  #define QI_PC_ALL_PASIDS       0
93070  #define QI_PC_PASID_SEL                1
93071 +#define QI_PC_GLOBAL           3
93073  #define QI_EIOTLB_ADDR(addr)   ((u64)(addr) & VTD_PAGE_MASK)
93074  #define QI_EIOTLB_IH(ih)       (((u64)ih) << 6)
93075 diff --git a/include/linux/iommu.h b/include/linux/iommu.h
93076 index 5e7fe519430a..9ca6e6b8084d 100644
93077 --- a/include/linux/iommu.h
93078 +++ b/include/linux/iommu.h
93079 @@ -547,7 +547,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
93080          * structure can be rewritten.
93081          */
93082         if (gather->pgsize != size ||
93083 -           end < gather->start || start > gather->end) {
93084 +           end + 1 < gather->start || start > gather->end + 1) {
93085                 if (gather->pgsize)
93086                         iommu_iotlb_sync(domain, gather);
93087                 gather->pgsize = size;
93088 diff --git a/include/linux/ioport.h b/include/linux/ioport.h
93089 index 55de385c839c..647744d8514e 100644
93090 --- a/include/linux/ioport.h
93091 +++ b/include/linux/ioport.h
93092 @@ -331,7 +331,7 @@ static inline void irqresource_disabled(struct resource *res, u32 irq)
93094         res->start = irq;
93095         res->end = irq;
93096 -       res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
93097 +       res->flags |= IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
93100  extern struct address_space *iomem_get_mapping(void);
93101 diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
93102 index 05e22770af51..3ccd19f13f5c 100644
93103 --- a/include/linux/ipc_namespace.h
93104 +++ b/include/linux/ipc_namespace.h
93105 @@ -120,6 +120,9 @@ extern int mq_init_ns(struct ipc_namespace *ns);
93106  static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
93107  #endif
93109 +extern struct ipc_namespace *get_ipc_ns_exported(struct ipc_namespace *ns);
93110 +extern struct ipc_namespace *show_init_ipc_ns(void);
93112  #if defined(CONFIG_IPC_NS)
93113  extern struct ipc_namespace *copy_ipcs(unsigned long flags,
93114         struct user_namespace *user_ns, struct ipc_namespace *ns);
93115 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
93116 index 1b65e7204344..99dccea4293c 100644
93117 --- a/include/linux/kvm_host.h
93118 +++ b/include/linux/kvm_host.h
93119 @@ -192,8 +192,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
93120                     int len, void *val);
93121  int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
93122                             int len, struct kvm_io_device *dev);
93123 -void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
93124 -                              struct kvm_io_device *dev);
93125 +int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
93126 +                             struct kvm_io_device *dev);
93127  struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
93128                                          gpa_t addr);
93130 diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
93131 index 0c04d39a7967..cff95ed1ee2b 100644
93132 --- a/include/linux/memcontrol.h
93133 +++ b/include/linux/memcontrol.h
93134 @@ -212,6 +212,8 @@ struct obj_cgroup {
93135         };
93136  };
93138 +struct lru_gen_mm_list;
93140  /*
93141   * The memory controller data structure. The memory controller controls both
93142   * page cache and RSS per cgroup. We would eventually like to provide
93143 @@ -335,6 +337,10 @@ struct mem_cgroup {
93144         struct deferred_split deferred_split_queue;
93145  #endif
93147 +#ifdef CONFIG_LRU_GEN
93148 +       struct lru_gen_mm_list *mm_list;
93149 +#endif
93151         struct mem_cgroup_per_node *nodeinfo[0];
93152         /* WARNING: nodeinfo must be the last member here */
93153  };
93154 @@ -1077,7 +1083,6 @@ static inline struct mem_cgroup *page_memcg(struct page *page)
93156  static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
93158 -       WARN_ON_ONCE(!rcu_read_lock_held());
93159         return NULL;
93162 diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
93163 index 1dbabf1b3cb8..6e0f66a2e727 100644
93164 --- a/include/linux/mfd/da9063/registers.h
93165 +++ b/include/linux/mfd/da9063/registers.h
93166 @@ -1037,6 +1037,9 @@
93167  #define                DA9063_NONKEY_PIN_AUTODOWN      0x02
93168  #define                DA9063_NONKEY_PIN_AUTOFLPRT     0x03
93170 +/* DA9063_REG_CONFIG_J (addr=0x10F) */
93171 +#define DA9063_TWOWIRE_TO                      0x40
93173  /* DA9063_REG_MON_REG_5 (addr=0x116) */
93174  #define DA9063_MON_A8_IDX_MASK                 0x07
93175  #define                DA9063_MON_A8_IDX_NONE          0x00
93176 diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h
93177 index 74d4e193966a..9b54ca13eac3 100644
93178 --- a/include/linux/mfd/intel-m10-bmc.h
93179 +++ b/include/linux/mfd/intel-m10-bmc.h
93180 @@ -11,7 +11,7 @@
93182  #define M10BMC_LEGACY_SYS_BASE         0x300400
93183  #define M10BMC_SYS_BASE                        0x300800
93184 -#define M10BMC_MEM_END                 0x200000fc
93185 +#define M10BMC_MEM_END                 0x1fffffff
93187  /* Register offset of system registers */
93188  #define NIOS2_FW_VERSION               0x0
93189 diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
93190 index 53b89631a1d9..ab07f09f2bad 100644
93191 --- a/include/linux/mlx5/driver.h
93192 +++ b/include/linux/mlx5/driver.h
93193 @@ -1226,7 +1226,7 @@ enum {
93194         MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
93195  };
93197 -static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
93198 +static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev)
93200         struct devlink *devlink = priv_to_devlink(dev);
93201         union devlink_param_value val;
93202 diff --git a/include/linux/mm.h b/include/linux/mm.h
93203 index 8ba434287387..c0ecb207198c 100644
93204 --- a/include/linux/mm.h
93205 +++ b/include/linux/mm.h
93206 @@ -203,6 +203,9 @@ static inline void __mm_zero_struct_page(struct page *page)
93208  extern int sysctl_max_map_count;
93210 +extern unsigned long sysctl_clean_low_kbytes;
93211 +extern unsigned long sysctl_clean_min_kbytes;
93213  extern unsigned long sysctl_user_reserve_kbytes;
93214  extern unsigned long sysctl_admin_reserve_kbytes;
93216 @@ -1070,6 +1073,8 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
93217  #define ZONES_PGOFF            (NODES_PGOFF - ZONES_WIDTH)
93218  #define LAST_CPUPID_PGOFF      (ZONES_PGOFF - LAST_CPUPID_WIDTH)
93219  #define KASAN_TAG_PGOFF                (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
93220 +#define LRU_GEN_PGOFF          (KASAN_TAG_PGOFF - LRU_GEN_WIDTH)
93221 +#define LRU_USAGE_PGOFF                (LRU_GEN_PGOFF - LRU_USAGE_WIDTH)
93223  /*
93224   * Define the bit shifts to access each section.  For non-existent
93225 @@ -3170,5 +3175,37 @@ extern int sysctl_nr_trim_pages;
93227  void mem_dump_obj(void *object);
93229 +/**
93230 + * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
93231 + * @seals: the seals to check
93232 + * @vma: the vma to operate on
93233 + *
93234 + * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on
93235 + * the vma flags.  Return 0 if check pass, or <0 for errors.
93236 + */
93237 +static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
93239 +       if (seals & F_SEAL_FUTURE_WRITE) {
93240 +               /*
93241 +                * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
93242 +                * "future write" seal active.
93243 +                */
93244 +               if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
93245 +                       return -EPERM;
93247 +               /*
93248 +                * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
93249 +                * MAP_SHARED and read-only, take care to not allow mprotect to
93250 +                * revert protections on such mappings. Do this only for shared
93251 +                * mappings. For private mappings, don't need to mask
93252 +                * VM_MAYWRITE as we still want them to be COW-writable.
93253 +                */
93254 +               if (vma->vm_flags & VM_SHARED)
93255 +                       vma->vm_flags &= ~(VM_MAYWRITE);
93256 +       }
93258 +       return 0;
93261  #endif /* __KERNEL__ */
93262  #endif /* _LINUX_MM_H */
93263 diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
93264 index 355ea1ee32bd..5eb4b12972ec 100644
93265 --- a/include/linux/mm_inline.h
93266 +++ b/include/linux/mm_inline.h
93267 @@ -79,11 +79,299 @@ static __always_inline enum lru_list page_lru(struct page *page)
93268         return lru;
93271 +#ifdef CONFIG_LRU_GEN
93273 +#ifdef CONFIG_LRU_GEN_ENABLED
93274 +DECLARE_STATIC_KEY_TRUE(lru_gen_static_key);
93275 +#define lru_gen_enabled() static_branch_likely(&lru_gen_static_key)
93276 +#else
93277 +DECLARE_STATIC_KEY_FALSE(lru_gen_static_key);
93278 +#define lru_gen_enabled() static_branch_unlikely(&lru_gen_static_key)
93279 +#endif
93281 +/* We track at most MAX_NR_GENS generations using the sliding window technique. */
93282 +static inline int lru_gen_from_seq(unsigned long seq)
93284 +       return seq % MAX_NR_GENS;
93287 +/* Convert the level of usage to a tier. See the comment on MAX_NR_TIERS. */
93288 +static inline int lru_tier_from_usage(int usage)
93290 +       return order_base_2(usage + 1);
93293 +/* Return a proper index regardless whether we keep a full history of stats. */
93294 +static inline int sid_from_seq_or_gen(int seq_or_gen)
93296 +       return seq_or_gen % NR_STAT_GENS;
93299 +/* The youngest and the second youngest generations are considered active. */
93300 +static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
93302 +       unsigned long max_seq = READ_ONCE(lruvec->evictable.max_seq);
93304 +       VM_BUG_ON(!max_seq);
93305 +       VM_BUG_ON(gen >= MAX_NR_GENS);
93307 +       return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
93310 +/* Update the sizes of the multigenerational lru. */
93311 +static inline void lru_gen_update_size(struct page *page, struct lruvec *lruvec,
93312 +                                      int old_gen, int new_gen)
93314 +       int file = page_is_file_lru(page);
93315 +       int zone = page_zonenum(page);
93316 +       int delta = thp_nr_pages(page);
93317 +       enum lru_list lru = LRU_FILE * file;
93318 +       struct lrugen *lrugen = &lruvec->evictable;
93320 +       lockdep_assert_held(&lruvec->lru_lock);
93321 +       VM_BUG_ON(old_gen != -1 && old_gen >= MAX_NR_GENS);
93322 +       VM_BUG_ON(new_gen != -1 && new_gen >= MAX_NR_GENS);
93323 +       VM_BUG_ON(old_gen == -1 && new_gen == -1);
93325 +       if (old_gen >= 0)
93326 +               WRITE_ONCE(lrugen->sizes[old_gen][file][zone],
93327 +                          lrugen->sizes[old_gen][file][zone] - delta);
93328 +       if (new_gen >= 0)
93329 +               WRITE_ONCE(lrugen->sizes[new_gen][file][zone],
93330 +                          lrugen->sizes[new_gen][file][zone] + delta);
93332 +       if (old_gen < 0) {
93333 +               if (lru_gen_is_active(lruvec, new_gen))
93334 +                       lru += LRU_ACTIVE;
93335 +               update_lru_size(lruvec, lru, zone, delta);
93336 +               return;
93337 +       }
93339 +       if (new_gen < 0) {
93340 +               if (lru_gen_is_active(lruvec, old_gen))
93341 +                       lru += LRU_ACTIVE;
93342 +               update_lru_size(lruvec, lru, zone, -delta);
93343 +               return;
93344 +       }
93346 +       if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
93347 +               update_lru_size(lruvec, lru, zone, -delta);
93348 +               update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
93349 +       }
93351 +       VM_BUG_ON(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
93354 +/* Add a page to a list of the multigenerational lru. Return true on success. */
93355 +static inline bool lru_gen_addition(struct page *page, struct lruvec *lruvec, bool front)
93357 +       int gen;
93358 +       unsigned long old_flags, new_flags;
93359 +       int file = page_is_file_lru(page);
93360 +       int zone = page_zonenum(page);
93361 +       struct lrugen *lrugen = &lruvec->evictable;
93363 +       if (PageUnevictable(page) || !lrugen->enabled[file])
93364 +               return false;
93365 +       /*
93366 +        * If a page is being faulted in, add it to the youngest generation.
93367 +        * try_walk_mm_list() may look at the size of the youngest generation to
93368 +        * determine if the aging is due.
93369 +        *
93370 +        * If a page can't be evicted immediately, i.e., a shmem page not in
93371 +        * swap cache, a dirty page waiting on writeback, or a page rejected by
93372 +        * evict_lru_gen_pages() due to races, dirty buffer heads, etc., add it
93373 +        * to the second oldest generation.
93374 +        *
93375 +        * If a page could be evicted immediately, i.e., deactivated, rotated by
93376 +        * writeback, or allocated for buffered io, add it to the oldest
93377 +        * generation.
93378 +        */
93379 +       if (PageActive(page))
93380 +               gen = lru_gen_from_seq(lrugen->max_seq);
93381 +       else if ((!file && !PageSwapCache(page)) ||
93382 +                (PageReclaim(page) && (PageDirty(page) || PageWriteback(page))) ||
93383 +                (!PageReferenced(page) && PageWorkingset(page)))
93384 +               gen = lru_gen_from_seq(lrugen->min_seq[file] + 1);
93385 +       else
93386 +               gen = lru_gen_from_seq(lrugen->min_seq[file]);
93388 +       do {
93389 +               old_flags = READ_ONCE(page->flags);
93390 +               VM_BUG_ON_PAGE(old_flags & LRU_GEN_MASK, page);
93392 +               new_flags = (old_flags & ~(LRU_GEN_MASK | BIT(PG_active))) |
93393 +                           ((gen + 1UL) << LRU_GEN_PGOFF);
93394 +               /* see the comment in evict_lru_gen_pages() */
93395 +               if (!(old_flags & BIT(PG_referenced)))
93396 +                       new_flags &= ~(LRU_USAGE_MASK | LRU_TIER_FLAGS);
93397 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
93399 +       lru_gen_update_size(page, lruvec, -1, gen);
93400 +       if (front)
93401 +               list_add(&page->lru, &lrugen->lists[gen][file][zone]);
93402 +       else
93403 +               list_add_tail(&page->lru, &lrugen->lists[gen][file][zone]);
93405 +       return true;
93408 +/* Delete a page from a list of the multigenerational lru. Return true on success. */
93409 +static inline bool lru_gen_deletion(struct page *page, struct lruvec *lruvec)
93411 +       int gen;
93412 +       unsigned long old_flags, new_flags;
93414 +       do {
93415 +               old_flags = READ_ONCE(page->flags);
93416 +               if (!(old_flags & LRU_GEN_MASK))
93417 +                       return false;
93419 +               VM_BUG_ON_PAGE(PageActive(page), page);
93420 +               VM_BUG_ON_PAGE(PageUnevictable(page), page);
93422 +               gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
93424 +               new_flags = old_flags & ~LRU_GEN_MASK;
93425 +               /* mark page active accordingly */
93426 +               if (lru_gen_is_active(lruvec, gen))
93427 +                       new_flags |= BIT(PG_active);
93428 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
93430 +       lru_gen_update_size(page, lruvec, gen, -1);
93431 +       list_del(&page->lru);
93433 +       return true;
93436 +/* Activate a page from page cache or swap cache after it's mapped. */
93437 +static inline void lru_gen_activation(struct page *page, struct vm_area_struct *vma)
93439 +       if (!lru_gen_enabled())
93440 +               return;
93442 +       if (PageActive(page) || PageUnevictable(page) || vma_is_dax(vma) ||
93443 +           (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)))
93444 +               return;
93445 +       /*
93446 +        * TODO: pass vm_fault to add_to_page_cache_lru() and
93447 +        * __read_swap_cache_async() so they can activate pages directly when in
93448 +        * the page fault path.
93449 +        */
93450 +       activate_page(page);
93453 +/* Return -1 when a page is not on a list of the multigenerational lru. */
93454 +static inline int page_lru_gen(struct page *page)
93456 +       return ((READ_ONCE(page->flags) & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
93459 +/* This function works regardless whether the multigenerational lru is enabled. */
93460 +static inline bool page_is_active(struct page *page, struct lruvec *lruvec)
93462 +       struct mem_cgroup *memcg;
93463 +       int gen = page_lru_gen(page);
93464 +       bool active = false;
93466 +       VM_BUG_ON_PAGE(PageTail(page), page);
93468 +       if (gen < 0)
93469 +               return PageActive(page);
93471 +       if (lruvec) {
93472 +               VM_BUG_ON_PAGE(PageUnevictable(page), page);
93473 +               VM_BUG_ON_PAGE(PageActive(page), page);
93474 +               lockdep_assert_held(&lruvec->lru_lock);
93476 +               return lru_gen_is_active(lruvec, gen);
93477 +       }
93479 +       rcu_read_lock();
93481 +       memcg = page_memcg_rcu(page);
93482 +       lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
93483 +       active = lru_gen_is_active(lruvec, gen);
93485 +       rcu_read_unlock();
93487 +       return active;
93490 +/* Return the level of usage of a page. See the comment on MAX_NR_TIERS. */
93491 +static inline int page_tier_usage(struct page *page)
93493 +       unsigned long flags = READ_ONCE(page->flags);
93495 +       return flags & BIT(PG_workingset) ?
93496 +              ((flags & LRU_USAGE_MASK) >> LRU_USAGE_PGOFF) + 1 : 0;
93499 +/* Increment the usage counter after a page is accessed via file descriptors. */
93500 +static inline bool page_inc_usage(struct page *page)
93502 +       unsigned long old_flags, new_flags;
93504 +       if (!lru_gen_enabled())
93505 +               return PageActive(page);
93507 +       do {
93508 +               old_flags = READ_ONCE(page->flags);
93510 +               if (!(old_flags & BIT(PG_workingset)))
93511 +                       new_flags = old_flags | BIT(PG_workingset);
93512 +               else
93513 +                       new_flags = (old_flags & ~LRU_USAGE_MASK) | min(LRU_USAGE_MASK,
93514 +                                   (old_flags & LRU_USAGE_MASK) + BIT(LRU_USAGE_PGOFF));
93516 +               if (old_flags == new_flags)
93517 +                       break;
93518 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
93520 +       return true;
93523 +#else /* CONFIG_LRU_GEN */
93525 +static inline bool lru_gen_enabled(void)
93527 +       return false;
93530 +static inline bool lru_gen_addition(struct page *page, struct lruvec *lruvec, bool front)
93532 +       return false;
93535 +static inline bool lru_gen_deletion(struct page *page, struct lruvec *lruvec)
93537 +       return false;
93540 +static inline void lru_gen_activation(struct page *page, struct vm_area_struct *vma)
93544 +static inline bool page_is_active(struct page *page, struct lruvec *lruvec)
93546 +       return PageActive(page);
93549 +static inline bool page_inc_usage(struct page *page)
93551 +       return PageActive(page);
93554 +#endif /* CONFIG_LRU_GEN */
93556  static __always_inline void add_page_to_lru_list(struct page *page,
93557                                 struct lruvec *lruvec)
93559         enum lru_list lru = page_lru(page);
93561 +       if (lru_gen_addition(page, lruvec, true))
93562 +               return;
93564         update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
93565         list_add(&page->lru, &lruvec->lists[lru]);
93567 @@ -93,6 +381,9 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
93569         enum lru_list lru = page_lru(page);
93571 +       if (lru_gen_addition(page, lruvec, false))
93572 +               return;
93574         update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
93575         list_add_tail(&page->lru, &lruvec->lists[lru]);
93577 @@ -100,6 +391,9 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
93578  static __always_inline void del_page_from_lru_list(struct page *page,
93579                                 struct lruvec *lruvec)
93581 +       if (lru_gen_deletion(page, lruvec))
93582 +               return;
93584         list_del(&page->lru);
93585         update_lru_size(lruvec, page_lru(page), page_zonenum(page),
93586                         -thp_nr_pages(page));
93587 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
93588 index 6613b26a8894..b936703a39a2 100644
93589 --- a/include/linux/mm_types.h
93590 +++ b/include/linux/mm_types.h
93591 @@ -15,6 +15,8 @@
93592  #include <linux/page-flags-layout.h>
93593  #include <linux/workqueue.h>
93594  #include <linux/seqlock.h>
93595 +#include <linux/nodemask.h>
93596 +#include <linux/mmdebug.h>
93598  #include <asm/mmu.h>
93600 @@ -97,10 +99,10 @@ struct page {
93601                 };
93602                 struct {        /* page_pool used by netstack */
93603                         /**
93604 -                        * @dma_addr: might require a 64-bit value even on
93605 +                        * @dma_addr: might require a 64-bit value on
93606                          * 32-bit architectures.
93607                          */
93608 -                       dma_addr_t dma_addr;
93609 +                       unsigned long dma_addr[2];
93610                 };
93611                 struct {        /* slab, slob and slub */
93612                         union {
93613 @@ -383,6 +385,8 @@ struct core_state {
93614         struct completion startup;
93615  };
93617 +#define ANON_AND_FILE 2
93619  struct kioctx_table;
93620  struct mm_struct {
93621         struct {
93622 @@ -561,6 +565,22 @@ struct mm_struct {
93624  #ifdef CONFIG_IOMMU_SUPPORT
93625                 u32 pasid;
93626 +#endif
93627 +#ifdef CONFIG_LRU_GEN
93628 +               struct {
93629 +                       /* the node of a global or per-memcg mm_struct list */
93630 +                       struct list_head list;
93631 +#ifdef CONFIG_MEMCG
93632 +                       /* points to memcg of the owner task above */
93633 +                       struct mem_cgroup *memcg;
93634 +#endif
93635 +                       /* whether this mm_struct has been used since the last walk */
93636 +                       nodemask_t nodes[ANON_AND_FILE];
93637 +#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
93638 +                       /* the number of CPUs using this mm_struct */
93639 +                       atomic_t nr_cpus;
93640 +#endif
93641 +               } lrugen;
93642  #endif
93643         } __randomize_layout;
93645 @@ -588,6 +608,103 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
93646         return (struct cpumask *)&mm->cpu_bitmap;
93649 +#ifdef CONFIG_LRU_GEN
93651 +void lru_gen_init_mm(struct mm_struct *mm);
93652 +void lru_gen_add_mm(struct mm_struct *mm);
93653 +void lru_gen_del_mm(struct mm_struct *mm);
93654 +#ifdef CONFIG_MEMCG
93655 +int lru_gen_alloc_mm_list(struct mem_cgroup *memcg);
93656 +void lru_gen_free_mm_list(struct mem_cgroup *memcg);
93657 +void lru_gen_migrate_mm(struct mm_struct *mm);
93658 +#endif
93661 + * Track the usage so mm_struct's that haven't been used since the last walk can
93662 + * be skipped. This function adds a theoretical overhead to each context switch,
93663 + * which hasn't been measurable.
93664 + */
93665 +static inline void lru_gen_switch_mm(struct mm_struct *old, struct mm_struct *new)
93667 +       int file;
93669 +       /* exclude init_mm, efi_mm, etc. */
93670 +       if (!core_kernel_data((unsigned long)old)) {
93671 +               VM_BUG_ON(old == &init_mm);
93673 +               for (file = 0; file < ANON_AND_FILE; file++)
93674 +                       nodes_setall(old->lrugen.nodes[file]);
93676 +#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
93677 +               atomic_dec(&old->lrugen.nr_cpus);
93678 +               VM_BUG_ON_MM(atomic_read(&old->lrugen.nr_cpus) < 0, old);
93679 +#endif
93680 +       } else
93681 +               VM_BUG_ON_MM(READ_ONCE(old->lrugen.list.prev) ||
93682 +                            READ_ONCE(old->lrugen.list.next), old);
93684 +       if (!core_kernel_data((unsigned long)new)) {
93685 +               VM_BUG_ON(new == &init_mm);
93687 +#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
93688 +               atomic_inc(&new->lrugen.nr_cpus);
93689 +               VM_BUG_ON_MM(atomic_read(&new->lrugen.nr_cpus) < 0, new);
93690 +#endif
93691 +       } else
93692 +               VM_BUG_ON_MM(READ_ONCE(new->lrugen.list.prev) ||
93693 +                            READ_ONCE(new->lrugen.list.next), new);
93696 +/* Return whether this mm_struct is being used on any CPUs. */
93697 +static inline bool lru_gen_mm_is_active(struct mm_struct *mm)
93699 +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
93700 +       return !cpumask_empty(mm_cpumask(mm));
93701 +#else
93702 +       return atomic_read(&mm->lrugen.nr_cpus);
93703 +#endif
93706 +#else /* CONFIG_LRU_GEN */
93708 +static inline void lru_gen_init_mm(struct mm_struct *mm)
93712 +static inline void lru_gen_add_mm(struct mm_struct *mm)
93716 +static inline void lru_gen_del_mm(struct mm_struct *mm)
93720 +#ifdef CONFIG_MEMCG
93721 +static inline int lru_gen_alloc_mm_list(struct mem_cgroup *memcg)
93723 +       return 0;
93726 +static inline void lru_gen_free_mm_list(struct mem_cgroup *memcg)
93730 +static inline void lru_gen_migrate_mm(struct mm_struct *mm)
93733 +#endif
93735 +static inline void lru_gen_switch_mm(struct mm_struct *old, struct mm_struct *new)
93739 +static inline bool lru_gen_mm_is_active(struct mm_struct *mm)
93741 +       return false;
93744 +#endif /* CONFIG_LRU_GEN */
93746  struct mmu_gather;
93747  extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
93748  extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
93749 diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
93750 index 26a3c7bc29ae..a3a4e374f802 100644
93751 --- a/include/linux/mmc/host.h
93752 +++ b/include/linux/mmc/host.h
93753 @@ -302,9 +302,6 @@ struct mmc_host {
93754         u32                     ocr_avail_sdio; /* SDIO-specific OCR */
93755         u32                     ocr_avail_sd;   /* SD-specific OCR */
93756         u32                     ocr_avail_mmc;  /* MMC-specific OCR */
93757 -#ifdef CONFIG_PM_SLEEP
93758 -       struct notifier_block   pm_notify;
93759 -#endif
93760         struct wakeup_source    *ws;            /* Enable consume of uevents */
93761         u32                     max_current_330;
93762         u32                     max_current_300;
93763 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
93764 index 47946cec7584..a22e9e40083f 100644
93765 --- a/include/linux/mmzone.h
93766 +++ b/include/linux/mmzone.h
93767 @@ -285,14 +285,124 @@ static inline bool is_active_lru(enum lru_list lru)
93768         return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
93771 -#define ANON_AND_FILE 2
93773  enum lruvec_flags {
93774         LRUVEC_CONGESTED,               /* lruvec has many dirty pages
93775                                          * backed by a congested BDI
93776                                          */
93777  };
93779 +struct lruvec;
93780 +struct page_vma_mapped_walk;
93782 +#define LRU_GEN_MASK           ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
93783 +#define LRU_USAGE_MASK         ((BIT(LRU_USAGE_WIDTH) - 1) << LRU_USAGE_PGOFF)
93785 +#ifdef CONFIG_LRU_GEN
93788 + * For each lruvec, evictable pages are divided into multiple generations. The
93789 + * youngest and the oldest generation numbers, AKA max_seq and min_seq, are
93790 + * monotonically increasing. The sliding window technique is used to track at
93791 + * most MAX_NR_GENS and at least MIN_NR_GENS generations. An offset within the
93792 + * window, AKA gen, indexes an array of per-type and per-zone lists for the
93793 + * corresponding generation. All pages from this array of lists have gen+1
93794 + * stored in page->flags. 0 is reserved to indicate that pages are not on the
93795 + * lists.
93796 + */
93797 +#define MAX_NR_GENS            ((unsigned int)CONFIG_NR_LRU_GENS)
93800 + * Each generation is then divided into multiple tiers. Tiers represent levels
93801 + * of usage from file descriptors, i.e., mark_page_accessed(). In contrast to
93802 + * moving across generations which requires the lru lock, moving across tiers
93803 + * only involves an atomic operation on page->flags and therefore has a
93804 + * negligible cost.
93805 + *
93806 + * The purposes of tiers are to:
93807 + *   1) estimate whether pages accessed multiple times via file descriptors are
93808 + *   more active than pages accessed only via page tables by separating the two
93809 + *   access types into upper tiers and the base tier and comparing refault rates
93810 + *   across tiers.
93811 + *   2) improve buffered io performance by deferring activations of pages
93812 + *   accessed multiple times until the eviction. That is activations happen in
93813 + *   the reclaim path, not the access path.
93814 + *
93815 + * Pages accessed N times via file descriptors belong to tier order_base_2(N).
93816 + * The base tier uses the following page flag:
93817 + *   !PageReferenced() -- readahead pages
93818 + *   PageReferenced() -- single-access pages
93819 + * All upper tiers use the following page flags:
93820 + *   PageReferenced() && PageWorkingset() -- multi-access pages
93821 + * in addition to the bits storing N-2 accesses. Therefore, we can support one
93822 + * upper tier without using additional bits in page->flags.
93823 + *
93824 + * Note that
93825 + *   1) PageWorkingset() is always set for upper tiers because we want to
93826 + *    maintain the existing psi behavior.
93827 + *   2) !PageReferenced() && PageWorkingset() is not a valid tier. See the
93828 + *   comment in evict_lru_gen_pages().
93829 + *   3) pages accessed only via page tables belong to the base tier.
93830 + *
93831 + * Pages from the base tier are evicted regardless of the refault rate. Pages
93832 + * from upper tiers will be moved to the next generation, if their refault rates
93833 + * are higher than that of the base tier.
93834 + */
93835 +#define MAX_NR_TIERS           ((unsigned int)CONFIG_TIERS_PER_GEN)
93836 +#define LRU_TIER_FLAGS         (BIT(PG_referenced) | BIT(PG_workingset))
93837 +#define LRU_USAGE_SHIFT                (CONFIG_TIERS_PER_GEN - 1)
93839 +/* Whether to keep historical stats for each generation. */
93840 +#ifdef CONFIG_LRU_GEN_STATS
93841 +#define NR_STAT_GENS           ((unsigned int)CONFIG_NR_LRU_GENS)
93842 +#else
93843 +#define NR_STAT_GENS           1U
93844 +#endif
93846 +struct lrugen {
93847 +       /* the aging increments the max generation number */
93848 +       unsigned long max_seq;
93849 +       /* the eviction increments the min generation numbers */
93850 +       unsigned long min_seq[ANON_AND_FILE];
93851 +       /* the birth time of each generation in jiffies */
93852 +       unsigned long timestamps[MAX_NR_GENS];
93853 +       /* the lists of the multigenerational lru */
93854 +       struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
93855 +       /* the sizes of the multigenerational lru in pages */
93856 +       unsigned long sizes[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
93857 +       /* to determine which type and its tiers to evict */
93858 +       atomic_long_t evicted[NR_STAT_GENS][ANON_AND_FILE][MAX_NR_TIERS];
93859 +       atomic_long_t refaulted[NR_STAT_GENS][ANON_AND_FILE][MAX_NR_TIERS];
93860 +       /* the base tier is inactive and won't be activated */
93861 +       unsigned long activated[NR_STAT_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1];
93862 +       /* arithmetic mean weighted by geometric series 1/2, 1/4, ... */
93863 +       unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS];
93864 +       unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS];
93865 +       /* reclaim priority to compare across memcgs */
93866 +       atomic_t priority;
93867 +       /* whether the multigenerational lru is enabled */
93868 +       bool enabled[ANON_AND_FILE];
93871 +void lru_gen_init_lruvec(struct lruvec *lruvec);
93872 +void lru_gen_set_state(bool enable, bool main, bool swap);
93873 +void lru_gen_scan_around(struct page_vma_mapped_walk *pvmw);
93875 +#else /* CONFIG_LRU_GEN */
93877 +static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
93881 +static inline void lru_gen_set_state(bool enable, bool main, bool swap)
93885 +static inline void lru_gen_scan_around(struct page_vma_mapped_walk *pvmw)
93889 +#endif /* CONFIG_LRU_GEN */
93891  struct lruvec {
93892         struct list_head                lists[NR_LRU_LISTS];
93893         /* per lruvec lru_lock for memcg */
93894 @@ -310,6 +420,10 @@ struct lruvec {
93895         unsigned long                   refaults[ANON_AND_FILE];
93896         /* Various lruvec state flags (enum lruvec_flags) */
93897         unsigned long                   flags;
93898 +#ifdef CONFIG_LRU_GEN
93899 +       /* unevictable pages are on LRU_UNEVICTABLE */
93900 +       struct lrugen                   evictable;
93901 +#endif
93902  #ifdef CONFIG_MEMCG
93903         struct pglist_data *pgdat;
93904  #endif
93905 diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
93906 index 3327239fa2f9..cc29dee508f7 100644
93907 --- a/include/linux/nfs_xdr.h
93908 +++ b/include/linux/nfs_xdr.h
93909 @@ -15,6 +15,8 @@
93910  #define NFS_DEF_FILE_IO_SIZE   (4096U)
93911  #define NFS_MIN_FILE_IO_SIZE   (1024U)
93913 +#define NFS_BITMASK_SZ         3
93915  struct nfs4_string {
93916         unsigned int len;
93917         char *data;
93918 @@ -525,7 +527,8 @@ struct nfs_closeargs {
93919         struct nfs_seqid *      seqid;
93920         fmode_t                 fmode;
93921         u32                     share_access;
93922 -       u32 *                   bitmask;
93923 +       const u32 *             bitmask;
93924 +       u32                     bitmask_store[NFS_BITMASK_SZ];
93925         struct nfs4_layoutreturn_args *lr_args;
93926  };
93928 @@ -608,7 +611,8 @@ struct nfs4_delegreturnargs {
93929         struct nfs4_sequence_args       seq_args;
93930         const struct nfs_fh *fhandle;
93931         const nfs4_stateid *stateid;
93932 -       u32 * bitmask;
93933 +       const u32 *bitmask;
93934 +       u32 bitmask_store[NFS_BITMASK_SZ];
93935         struct nfs4_layoutreturn_args *lr_args;
93936  };
93938 @@ -648,7 +652,8 @@ struct nfs_pgio_args {
93939         union {
93940                 unsigned int            replen;                 /* used by read */
93941                 struct {
93942 -                       u32 *                   bitmask;        /* used by write */
93943 +                       const u32 *             bitmask;        /* used by write */
93944 +                       u32 bitmask_store[NFS_BITMASK_SZ];      /* used by write */
93945                         enum nfs3_stable_how    stable;         /* used by write */
93946                 };
93947         };
93948 diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
93949 index ac398e143c9a..89fe4e3592f9 100644
93950 --- a/include/linux/nodemask.h
93951 +++ b/include/linux/nodemask.h
93952 @@ -486,6 +486,7 @@ static inline int num_node_state(enum node_states state)
93953  #define first_online_node      0
93954  #define first_memory_node      0
93955  #define next_online_node(nid)  (MAX_NUMNODES)
93956 +#define next_memory_node(nid)  (MAX_NUMNODES)
93957  #define nr_node_ids            1U
93958  #define nr_online_nodes                1U
93960 diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
93961 index 7d4ec26d8a3e..df83aaec8498 100644
93962 --- a/include/linux/page-flags-layout.h
93963 +++ b/include/linux/page-flags-layout.h
93964 @@ -24,6 +24,17 @@
93965  #error ZONES_SHIFT -- too many zones configured adjust calculation
93966  #endif
93968 +#ifdef CONFIG_LRU_GEN
93970 + * LRU_GEN_WIDTH is generated from order_base_2(CONFIG_NR_LRU_GENS + 1). And the
93971 + * comment on MAX_NR_TIERS explains why we offset by 2 here.
93972 + */
93973 +#define LRU_USAGE_WIDTH                (CONFIG_TIERS_PER_GEN - 2)
93974 +#else
93975 +#define LRU_GEN_WIDTH          0
93976 +#define LRU_USAGE_WIDTH                0
93977 +#endif
93979  #ifdef CONFIG_SPARSEMEM
93980  #include <asm/sparsemem.h>
93982 @@ -56,7 +67,8 @@
93984  #define ZONES_WIDTH            ZONES_SHIFT
93986 -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
93987 +#if SECTIONS_WIDTH+ZONES_WIDTH+LRU_GEN_WIDTH+LRU_USAGE_WIDTH+NODES_SHIFT \
93988 +       <= BITS_PER_LONG - NR_PAGEFLAGS
93989  #define NODES_WIDTH            NODES_SHIFT
93990  #else
93991  #ifdef CONFIG_SPARSEMEM_VMEMMAP
93992 @@ -83,14 +95,16 @@
93993  #define KASAN_TAG_WIDTH 0
93994  #endif
93996 -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \
93997 +#if SECTIONS_WIDTH+ZONES_WIDTH+LRU_GEN_WIDTH+LRU_USAGE_WIDTH+ \
93998 +       NODES_WIDTH+KASAN_TAG_WIDTH+LAST_CPUPID_SHIFT \
93999         <= BITS_PER_LONG - NR_PAGEFLAGS
94000  #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
94001  #else
94002  #define LAST_CPUPID_WIDTH 0
94003  #endif
94005 -#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
94006 +#if SECTIONS_WIDTH+ZONES_WIDTH+LRU_GEN_WIDTH+LRU_USAGE_WIDTH+ \
94007 +       NODES_WIDTH+KASAN_TAG_WIDTH+LAST_CPUPID_WIDTH \
94008         > BITS_PER_LONG - NR_PAGEFLAGS
94009  #error "Not enough bits in page flags"
94010  #endif
94011 diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
94012 index 04a34c08e0a6..e58984fca32a 100644
94013 --- a/include/linux/page-flags.h
94014 +++ b/include/linux/page-flags.h
94015 @@ -817,7 +817,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
94016          1UL << PG_private      | 1UL << PG_private_2   |       \
94017          1UL << PG_writeback    | 1UL << PG_reserved    |       \
94018          1UL << PG_slab         | 1UL << PG_active      |       \
94019 -        1UL << PG_unevictable  | __PG_MLOCKED)
94020 +        1UL << PG_unevictable  | __PG_MLOCKED | LRU_GEN_MASK)
94022  /*
94023   * Flags checked when a page is prepped for return by the page allocator.
94024 @@ -828,7 +828,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
94025   * alloc-free cycle to prevent from reusing the page.
94026   */
94027  #define PAGE_FLAGS_CHECK_AT_PREP       \
94028 -       (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
94029 +       ((((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_USAGE_MASK)
94031  #define PAGE_FLAGS_PRIVATE                             \
94032         (1UL << PG_private | 1UL << PG_private_2)
94033 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
94034 index 3f7f89ea5e51..3d478abf411c 100644
94035 --- a/include/linux/perf_event.h
94036 +++ b/include/linux/perf_event.h
94037 @@ -607,6 +607,7 @@ struct swevent_hlist {
94038  #define PERF_ATTACH_TASK_DATA  0x08
94039  #define PERF_ATTACH_ITRACE     0x10
94040  #define PERF_ATTACH_SCHED_CB   0x20
94041 +#define PERF_ATTACH_CHILD      0x40
94043  struct perf_cgroup;
94044  struct perf_buffer;
94045 diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
94046 index 5e772392a379..08dd9b8c055a 100644
94047 --- a/include/linux/pgtable.h
94048 +++ b/include/linux/pgtable.h
94049 @@ -193,7 +193,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
94050  #endif
94052  #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
94053 -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
94054 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG)
94055  static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
94056                                             unsigned long address,
94057                                             pmd_t *pmdp)
94058 @@ -214,7 +214,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
94059         BUILD_BUG();
94060         return 0;
94062 -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
94063 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG */
94064  #endif
94066  #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
94067 diff --git a/include/linux/phy.h b/include/linux/phy.h
94068 index 1a12e4436b5b..8644b097dea3 100644
94069 --- a/include/linux/phy.h
94070 +++ b/include/linux/phy.h
94071 @@ -493,6 +493,7 @@ struct macsec_ops;
94072   * @loopback_enabled: Set true if this PHY has been loopbacked successfully.
94073   * @downshifted_rate: Set true if link speed has been downshifted.
94074   * @is_on_sfp_module: Set true if PHY is located on an SFP module.
94075 + * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
94076   * @state: State of the PHY for management purposes
94077   * @dev_flags: Device-specific flags used by the PHY driver.
94078   * @irq: IRQ number of the PHY's interrupt (-1 if none)
94079 @@ -567,6 +568,7 @@ struct phy_device {
94080         unsigned loopback_enabled:1;
94081         unsigned downshifted_rate:1;
94082         unsigned is_on_sfp_module:1;
94083 +       unsigned mac_managed_pm:1;
94085         unsigned autoneg:1;
94086         /* The most recently read link state */
94087 diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
94088 index 3f23f6e430bf..cd81e060863c 100644
94089 --- a/include/linux/platform_device.h
94090 +++ b/include/linux/platform_device.h
94091 @@ -359,4 +359,7 @@ static inline int is_sh_early_platform_device(struct platform_device *pdev)
94093  #endif /* CONFIG_SUPERH */
94095 +/* For now only SuperH uses it */
94096 +void early_platform_cleanup(void);
94098  #endif /* _PLATFORM_DEVICE_H_ */
94099 diff --git a/include/linux/pm.h b/include/linux/pm.h
94100 index 482313a8ccfc..628718697679 100644
94101 --- a/include/linux/pm.h
94102 +++ b/include/linux/pm.h
94103 @@ -602,6 +602,7 @@ struct dev_pm_info {
94104         unsigned int            idle_notification:1;
94105         unsigned int            request_pending:1;
94106         unsigned int            deferred_resume:1;
94107 +       unsigned int            needs_force_resume:1;
94108         unsigned int            runtime_auto:1;
94109         bool                    ignore_children:1;
94110         unsigned int            no_callbacks:1;
94111 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
94112 index b492ae00cc90..6c08a085367b 100644
94113 --- a/include/linux/pm_runtime.h
94114 +++ b/include/linux/pm_runtime.h
94115 @@ -265,7 +265,7 @@ static inline void pm_runtime_no_callbacks(struct device *dev) {}
94116  static inline void pm_runtime_irq_safe(struct device *dev) {}
94117  static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
94119 -static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
94120 +static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; }
94121  static inline void pm_runtime_mark_last_busy(struct device *dev) {}
94122  static inline void __pm_runtime_use_autosuspend(struct device *dev,
94123                                                 bool use) {}
94124 diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
94125 index 111a40d0d3d5..8d5f4f40fb41 100644
94126 --- a/include/linux/power/bq27xxx_battery.h
94127 +++ b/include/linux/power/bq27xxx_battery.h
94128 @@ -53,7 +53,6 @@ struct bq27xxx_reg_cache {
94129         int capacity;
94130         int energy;
94131         int flags;
94132 -       int power_avg;
94133         int health;
94134  };
94136 diff --git a/include/linux/reset.h b/include/linux/reset.h
94137 index b9109efa2a5c..9700124affa3 100644
94138 --- a/include/linux/reset.h
94139 +++ b/include/linux/reset.h
94140 @@ -47,6 +47,11 @@ static inline int reset_control_reset(struct reset_control *rstc)
94141         return 0;
94144 +static inline int reset_control_rearm(struct reset_control *rstc)
94146 +       return 0;
94149  static inline int reset_control_assert(struct reset_control *rstc)
94151         return 0;
94152 diff --git a/include/linux/sched.h b/include/linux/sched.h
94153 index ef00bb22164c..98f2e1dc6f90 100644
94154 --- a/include/linux/sched.h
94155 +++ b/include/linux/sched.h
94156 @@ -216,13 +216,40 @@ struct task_group;
94158  extern void scheduler_tick(void);
94160 -#define        MAX_SCHEDULE_TIMEOUT            LONG_MAX
94162 +#define        MAX_SCHEDULE_TIMEOUT    LONG_MAX
94163  extern long schedule_timeout(long timeout);
94164  extern long schedule_timeout_interruptible(long timeout);
94165  extern long schedule_timeout_killable(long timeout);
94166  extern long schedule_timeout_uninterruptible(long timeout);
94167  extern long schedule_timeout_idle(long timeout);
94169 +#ifdef CONFIG_HIGH_RES_TIMERS
94170 +extern long schedule_msec_hrtimeout(long timeout);
94171 +extern long schedule_min_hrtimeout(void);
94172 +extern long schedule_msec_hrtimeout_interruptible(long timeout);
94173 +extern long schedule_msec_hrtimeout_uninterruptible(long timeout);
94174 +#else
94175 +static inline long schedule_msec_hrtimeout(long timeout)
94177 +       return schedule_timeout(msecs_to_jiffies(timeout));
94180 +static inline long schedule_min_hrtimeout(void)
94182 +       return schedule_timeout(1);
94185 +static inline long schedule_msec_hrtimeout_interruptible(long timeout)
94187 +       return schedule_timeout_interruptible(msecs_to_jiffies(timeout));
94190 +static inline long schedule_msec_hrtimeout_uninterruptible(long timeout)
94192 +       return schedule_timeout_uninterruptible(msecs_to_jiffies(timeout));
94194 +#endif
94196  asmlinkage void schedule(void);
94197  extern void schedule_preempt_disabled(void);
94198  asmlinkage void preempt_schedule_irq(void);
94199 diff --git a/include/linux/smp.h b/include/linux/smp.h
94200 index 70c6f6284dcf..238a3f97a415 100644
94201 --- a/include/linux/smp.h
94202 +++ b/include/linux/smp.h
94203 @@ -73,7 +73,7 @@ void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
94204  void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
94205                            void *info, bool wait, const struct cpumask *mask);
94207 -int smp_call_function_single_async(int cpu, call_single_data_t *csd);
94208 +int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
94210  #ifdef CONFIG_SMP
94212 diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
94213 index 592897fa4f03..643139b1eafe 100644
94214 --- a/include/linux/spi/spi.h
94215 +++ b/include/linux/spi/spi.h
94216 @@ -510,6 +510,9 @@ struct spi_controller {
94218  #define SPI_MASTER_GPIO_SS             BIT(5)  /* GPIO CS must select slave */
94220 +       /* flag indicating this is a non-devres managed controller */
94221 +       bool                    devm_allocated;
94223         /* flag indicating this is an SPI slave controller */
94224         bool                    slave;
94226 diff --git a/include/linux/swap.h b/include/linux/swap.h
94227 index 4cc6ec3bf0ab..0e7532c7db22 100644
94228 --- a/include/linux/swap.h
94229 +++ b/include/linux/swap.h
94230 @@ -344,13 +344,14 @@ extern void lru_add_drain_cpu(int cpu);
94231  extern void lru_add_drain_cpu_zone(struct zone *zone);
94232  extern void lru_add_drain_all(void);
94233  extern void rotate_reclaimable_page(struct page *page);
94234 +extern void activate_page(struct page *page);
94235  extern void deactivate_file_page(struct page *page);
94236  extern void deactivate_page(struct page *page);
94237  extern void mark_page_lazyfree(struct page *page);
94238  extern void swap_setup(void);
94240 -extern void lru_cache_add_inactive_or_unevictable(struct page *page,
94241 -                                               struct vm_area_struct *vma);
94242 +extern void lru_cache_add_page_vma(struct page *page, struct vm_area_struct *vma,
94243 +                                  bool faulting);
94245  /* linux/mm/vmscan.c */
94246  extern unsigned long zone_reclaimable_pages(struct zone *zone);
94247 diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
94248 index 2839dc9a7c01..b07b7d4334a6 100644
94249 --- a/include/linux/syscalls.h
94250 +++ b/include/linux/syscalls.h
94251 @@ -69,6 +69,8 @@ struct io_uring_params;
94252  struct clone_args;
94253  struct open_how;
94254  struct mount_attr;
94255 +struct futex_waitv;
94256 +struct futex_requeue;
94258  #include <linux/types.h>
94259  #include <linux/aio_abi.h>
94260 @@ -619,6 +621,20 @@ asmlinkage long sys_get_robust_list(int pid,
94261  asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
94262                                     size_t len);
94264 +/* kernel/futex2.c */
94265 +asmlinkage long sys_futex_wait(void __user *uaddr, unsigned int val,
94266 +                              unsigned int flags,
94267 +                              struct __kernel_timespec __user *timo);
94268 +asmlinkage long sys_futex_wake(void __user *uaddr, unsigned int nr_wake,
94269 +                              unsigned int flags);
94270 +asmlinkage long sys_futex_waitv(struct futex_waitv __user *waiters,
94271 +                               unsigned int nr_futexes, unsigned int flags,
94272 +                               struct __kernel_timespec __user *timo);
94273 +asmlinkage long sys_futex_requeue(struct futex_requeue __user *uaddr1,
94274 +                                 struct futex_requeue __user *uaddr2,
94275 +                                 unsigned int nr_wake, unsigned int nr_requeue,
94276 +                                 unsigned int cmpval, unsigned int flags);
94278  /* kernel/hrtimer.c */
94279  asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp,
94280                               struct __kernel_timespec __user *rmtp);
94281 @@ -1300,6 +1316,8 @@ int ksys_ipc(unsigned int call, int first, unsigned long second,
94282         unsigned long third, void __user * ptr, long fifth);
94283  int compat_ksys_ipc(u32 call, int first, int second,
94284         u32 third, u32 ptr, u32 fifth);
94285 +long ksys_futex_wake(void __user *uaddr, unsigned long nr_wake,
94286 +                    unsigned int flags);
94288  /*
94289   * The following kernel syscall equivalents are just wrappers to fs-internal
94290 diff --git a/include/linux/tcp.h b/include/linux/tcp.h
94291 index 48d8a363319e..1bd559c69e83 100644
94292 --- a/include/linux/tcp.h
94293 +++ b/include/linux/tcp.h
94294 @@ -225,7 +225,8 @@ struct tcp_sock {
94295         u8      compressed_ack;
94296         u8      dup_ack_counter:2,
94297                 tlp_retrans:1,  /* TLP is a retransmission */
94298 -               unused:5;
94299 +               fast_ack_mode:2, /* which fast ack mode ? */
94300 +               unused:3;
94301         u32     chrono_start;   /* Start time in jiffies of a TCP chrono */
94302         u32     chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
94303         u8      chrono_type:2,  /* current chronograph type */
94304 diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
94305 index 61c3372d3f32..2f719b471d52 100644
94306 --- a/include/linux/tty_driver.h
94307 +++ b/include/linux/tty_driver.h
94308 @@ -228,7 +228,7 @@
94309   *
94310   *     Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel
94311   *     structure to complete. This method is optional and will only be called
94312 - *     if provided (otherwise EINVAL will be returned).
94313 + *     if provided (otherwise ENOTTY will be returned).
94314   */
94316  #include <linux/export.h>
94317 diff --git a/include/linux/udp.h b/include/linux/udp.h
94318 index aa84597bdc33..ae58ff3b6b5b 100644
94319 --- a/include/linux/udp.h
94320 +++ b/include/linux/udp.h
94321 @@ -51,7 +51,9 @@ struct udp_sock {
94322                                            * different encapsulation layer set
94323                                            * this
94324                                            */
94325 -                        gro_enabled:1; /* Can accept GRO packets */
94326 +                        gro_enabled:1, /* Request GRO aggregation */
94327 +                        accept_udp_l4:1,
94328 +                        accept_udp_fraglist:1;
94329         /*
94330          * Following member retains the information to create a UDP header
94331          * when the socket is uncorked.
94332 @@ -131,8 +133,16 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
94334  static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
94336 -       return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
94337 -              skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4;
94338 +       if (!skb_is_gso(skb))
94339 +               return false;
94341 +       if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4)
94342 +               return true;
94344 +       if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist)
94345 +               return true;
94347 +       return false;
94350  #define udp_portaddr_for_each_entry(__sk, list) \
94351 diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
94352 index 70d681918d01..bf00259493e0 100644
94353 --- a/include/linux/usb/pd.h
94354 +++ b/include/linux/usb/pd.h
94355 @@ -493,4 +493,6 @@ static inline unsigned int rdo_max_power(u32 rdo)
94356  #define PD_N_CAPS_COUNT                (PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP)
94357  #define PD_N_HARD_RESET_COUNT  2
94359 +#define PD_P_SNK_STDBY_MW      2500    /* 2500 mW */
94361  #endif /* __LINUX_USB_PD_H */
94362 diff --git a/include/linux/zstd.h b/include/linux/zstd.h
94363 index e87f78c9b19c..446ecabcdd02 100644
94364 --- a/include/linux/zstd.h
94365 +++ b/include/linux/zstd.h
94366 @@ -1,138 +1,97 @@
94367 +/* SPDX-License-Identifier: GPL-2.0-only */
94368  /*
94369 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
94370 + * Copyright (c) Yann Collet, Facebook, Inc.
94371   * All rights reserved.
94372   *
94373 - * This source code is licensed under the BSD-style license found in the
94374 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
94375 - * An additional grant of patent rights can be found in the PATENTS file in the
94376 - * same directory.
94377 - *
94378 - * This program is free software; you can redistribute it and/or modify it under
94379 - * the terms of the GNU General Public License version 2 as published by the
94380 - * Free Software Foundation. This program is dual-licensed; you may select
94381 - * either version 2 of the GNU General Public License ("GPL") or BSD license
94382 - * ("BSD").
94383 + * This source code is licensed under both the BSD-style license (found in the
94384 + * LICENSE file in the root directory of https://github.com/facebook/zstd) and
94385 + * the GPLv2 (found in the COPYING file in the root directory of
94386 + * https://github.com/facebook/zstd). You may select, at your option, one of the
94387 + * above-listed licenses.
94388   */
94390 -#ifndef ZSTD_H
94391 -#define ZSTD_H
94392 +#ifndef LINUX_ZSTD_H
94393 +#define LINUX_ZSTD_H
94395 -/* ======   Dependency   ======*/
94396 -#include <linux/types.h>   /* size_t */
94397 +/**
94398 + * This is a kernel-style API that wraps the upstream zstd API, which cannot be
94399 + * used directly because the symbols aren't exported. It exposes the minimal
94400 + * functionality which is currently required by users of zstd in the kernel.
94401 + * Expose extra functions from lib/zstd/zstd.h as needed.
94402 + */
94404 +/* ======   Dependency   ====== */
94405 +#include <linux/types.h>
94406 +#include <linux/zstd_errors.h>
94407 +#include <linux/zstd_lib.h>
94409 -/*-*****************************************************************************
94410 - * Introduction
94411 +/* ======   Helper Functions   ====== */
94412 +/**
94413 + * zstd_compress_bound() - maximum compressed size in worst case scenario
94414 + * @src_size: The size of the data to compress.
94415   *
94416 - * zstd, short for Zstandard, is a fast lossless compression algorithm,
94417 - * targeting real-time compression scenarios at zlib-level and better
94418 - * compression ratios. The zstd compression library provides in-memory
94419 - * compression and decompression functions. The library supports compression
94420 - * levels from 1 up to ZSTD_maxCLevel() which is 22. Levels >= 20, labeled
94421 - * ultra, should be used with caution, as they require more memory.
94422 - * Compression can be done in:
94423 - *  - a single step, reusing a context (described as Explicit memory management)
94424 - *  - unbounded multiple steps (described as Streaming compression)
94425 - * The compression ratio achievable on small data can be highly improved using
94426 - * compression with a dictionary in:
94427 - *  - a single step (described as Simple dictionary API)
94428 - *  - a single step, reusing a dictionary (described as Fast dictionary API)
94429 - ******************************************************************************/
94431 -/*======  Helper functions  ======*/
94432 + * Return:    The maximum compressed size in the worst case scenario.
94433 + */
94434 +size_t zstd_compress_bound(size_t src_size);
94436  /**
94437 - * enum ZSTD_ErrorCode - zstd error codes
94438 + * zstd_is_error() - tells if a size_t function result is an error code
94439 + * @code:  The function result to check for error.
94440   *
94441 - * Functions that return size_t can be checked for errors using ZSTD_isError()
94442 - * and the ZSTD_ErrorCode can be extracted using ZSTD_getErrorCode().
94443 + * Return: Non-zero iff the code is an error.
94444 + */
94445 +unsigned int zstd_is_error(size_t code);
94447 +/**
94448 + * enum zstd_error_code - zstd error codes
94449   */
94450 -typedef enum {
94451 -       ZSTD_error_no_error,
94452 -       ZSTD_error_GENERIC,
94453 -       ZSTD_error_prefix_unknown,
94454 -       ZSTD_error_version_unsupported,
94455 -       ZSTD_error_parameter_unknown,
94456 -       ZSTD_error_frameParameter_unsupported,
94457 -       ZSTD_error_frameParameter_unsupportedBy32bits,
94458 -       ZSTD_error_frameParameter_windowTooLarge,
94459 -       ZSTD_error_compressionParameter_unsupported,
94460 -       ZSTD_error_init_missing,
94461 -       ZSTD_error_memory_allocation,
94462 -       ZSTD_error_stage_wrong,
94463 -       ZSTD_error_dstSize_tooSmall,
94464 -       ZSTD_error_srcSize_wrong,
94465 -       ZSTD_error_corruption_detected,
94466 -       ZSTD_error_checksum_wrong,
94467 -       ZSTD_error_tableLog_tooLarge,
94468 -       ZSTD_error_maxSymbolValue_tooLarge,
94469 -       ZSTD_error_maxSymbolValue_tooSmall,
94470 -       ZSTD_error_dictionary_corrupted,
94471 -       ZSTD_error_dictionary_wrong,
94472 -       ZSTD_error_dictionaryCreation_failed,
94473 -       ZSTD_error_maxCode
94474 -} ZSTD_ErrorCode;
94475 +typedef ZSTD_ErrorCode zstd_error_code;
94477  /**
94478 - * ZSTD_maxCLevel() - maximum compression level available
94479 + * zstd_get_error_code() - translates an error function result to an error code
94480 + * @code:  The function result for which zstd_is_error(code) is true.
94481   *
94482 - * Return: Maximum compression level available.
94483 + * Return: A unique error code for this error.
94484   */
94485 -int ZSTD_maxCLevel(void);
94486 +zstd_error_code zstd_get_error_code(size_t code);
94488  /**
94489 - * ZSTD_compressBound() - maximum compressed size in worst case scenario
94490 - * @srcSize: The size of the data to compress.
94491 + * zstd_get_error_name() - translates an error function result to a string
94492 + * @code:  The function result for which zstd_is_error(code) is true.
94493   *
94494 - * Return:   The maximum compressed size in the worst case scenario.
94495 + * Return: An error string corresponding to the error code.
94496   */
94497 -size_t ZSTD_compressBound(size_t srcSize);
94498 +const char *zstd_get_error_name(size_t code);
94500  /**
94501 - * ZSTD_isError() - tells if a size_t function result is an error code
94502 - * @code:  The function result to check for error.
94503 + * zstd_min_clevel() - minimum allowed compression level
94504   *
94505 - * Return: Non-zero iff the code is an error.
94506 + * Return: The minimum allowed compression level.
94507   */
94508 -static __attribute__((unused)) unsigned int ZSTD_isError(size_t code)
94510 -       return code > (size_t)-ZSTD_error_maxCode;
94512 +int zstd_min_clevel(void);
94514  /**
94515 - * ZSTD_getErrorCode() - translates an error function result to a ZSTD_ErrorCode
94516 - * @functionResult: The result of a function for which ZSTD_isError() is true.
94517 + * zstd_max_clevel() - maximum allowed compression level
94518   *
94519 - * Return:          The ZSTD_ErrorCode corresponding to the functionResult or 0
94520 - *                  if the functionResult isn't an error.
94521 + * Return: The maximum allowed compression level.
94522   */
94523 -static __attribute__((unused)) ZSTD_ErrorCode ZSTD_getErrorCode(
94524 -       size_t functionResult)
94526 -       if (!ZSTD_isError(functionResult))
94527 -               return (ZSTD_ErrorCode)0;
94528 -       return (ZSTD_ErrorCode)(0 - functionResult);
94530 +int zstd_max_clevel(void);
94532 +/* ======   Parameter Selection   ====== */
94534  /**
94535 - * enum ZSTD_strategy - zstd compression search strategy
94536 + * enum zstd_strategy - zstd compression search strategy
94537   *
94538 - * From faster to stronger.
94539 + * From faster to stronger. See zstd_lib.h.
94540   */
94541 -typedef enum {
94542 -       ZSTD_fast,
94543 -       ZSTD_dfast,
94544 -       ZSTD_greedy,
94545 -       ZSTD_lazy,
94546 -       ZSTD_lazy2,
94547 -       ZSTD_btlazy2,
94548 -       ZSTD_btopt,
94549 -       ZSTD_btopt2
94550 -} ZSTD_strategy;
94551 +typedef ZSTD_strategy zstd_strategy;
94553  /**
94554 - * struct ZSTD_compressionParameters - zstd compression parameters
94555 + * struct zstd_compression_parameters - zstd compression parameters
94556   * @windowLog:    Log of the largest match distance. Larger means more
94557   *                compression, and more memory needed during decompression.
94558 - * @chainLog:     Fully searched segment. Larger means more compression, slower,
94559 - *                and more memory (useless for fast).
94560 + * @chainLog:     Fully searched segment. Larger means more compression,
94561 + *                slower, and more memory (useless for fast).
94562   * @hashLog:      Dispatch table. Larger means more compression,
94563   *                slower, and more memory.
94564   * @searchLog:    Number of searches. Larger means more compression and slower.
94565 @@ -141,1017 +100,348 @@ typedef enum {
94566   * @targetLength: Acceptable match size for optimal parser (only). Larger means
94567   *                more compression, and slower.
94568   * @strategy:     The zstd compression strategy.
94569 + *
94570 + * See zstd_lib.h.
94571   */
94572 -typedef struct {
94573 -       unsigned int windowLog;
94574 -       unsigned int chainLog;
94575 -       unsigned int hashLog;
94576 -       unsigned int searchLog;
94577 -       unsigned int searchLength;
94578 -       unsigned int targetLength;
94579 -       ZSTD_strategy strategy;
94580 -} ZSTD_compressionParameters;
94581 +typedef ZSTD_compressionParameters zstd_compression_parameters;
94583  /**
94584 - * struct ZSTD_frameParameters - zstd frame parameters
94585 - * @contentSizeFlag: Controls whether content size will be present in the frame
94586 - *                   header (when known).
94587 - * @checksumFlag:    Controls whether a 32-bit checksum is generated at the end
94588 - *                   of the frame for error detection.
94589 - * @noDictIDFlag:    Controls whether dictID will be saved into the frame header
94590 - *                   when using dictionary compression.
94591 + * struct zstd_frame_parameters - zstd frame parameters
94592 + * @contentSizeFlag: Controls whether content size will be present in the
94593 + *                   frame header (when known).
94594 + * @checksumFlag:    Controls whether a 32-bit checksum is generated at the
94595 + *                   end of the frame for error detection.
94596 + * @noDictIDFlag:    Controls whether dictID will be saved into the frame
94597 + *                   header when using dictionary compression.
94598   *
94599 - * The default value is all fields set to 0.
94600 + * The default value is all fields set to 0. See zstd_lib.h.
94601   */
94602 -typedef struct {
94603 -       unsigned int contentSizeFlag;
94604 -       unsigned int checksumFlag;
94605 -       unsigned int noDictIDFlag;
94606 -} ZSTD_frameParameters;
94607 +typedef ZSTD_frameParameters zstd_frame_parameters;
94609  /**
94610 - * struct ZSTD_parameters - zstd parameters
94611 + * struct zstd_parameters - zstd parameters
94612   * @cParams: The compression parameters.
94613   * @fParams: The frame parameters.
94614   */
94615 -typedef struct {
94616 -       ZSTD_compressionParameters cParams;
94617 -       ZSTD_frameParameters fParams;
94618 -} ZSTD_parameters;
94619 +typedef ZSTD_parameters zstd_parameters;
94621  /**
94622 - * ZSTD_getCParams() - returns ZSTD_compressionParameters for selected level
94623 - * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
94624 - * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
94625 - * @dictSize:         The dictionary size or 0 if a dictionary isn't being used.
94626 + * zstd_get_params() - returns zstd_parameters for selected level
94627 + * @level:              The compression level
94628 + * @estimated_src_size: The estimated source size to compress or 0
94629 + *                      if unknown.
94630   *
94631 - * Return:            The selected ZSTD_compressionParameters.
94632 + * Return:              The selected zstd_parameters.
94633   */
94634 -ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel,
94635 -       unsigned long long estimatedSrcSize, size_t dictSize);
94636 +zstd_parameters zstd_get_params(int level,
94637 +       unsigned long long estimated_src_size);
94639 -/**
94640 - * ZSTD_getParams() - returns ZSTD_parameters for selected level
94641 - * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
94642 - * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
94643 - * @dictSize:         The dictionary size or 0 if a dictionary isn't being used.
94644 - *
94645 - * The same as ZSTD_getCParams() except also selects the default frame
94646 - * parameters (all zero).
94647 - *
94648 - * Return:            The selected ZSTD_parameters.
94649 - */
94650 -ZSTD_parameters ZSTD_getParams(int compressionLevel,
94651 -       unsigned long long estimatedSrcSize, size_t dictSize);
94652 +/* ======   Single-pass Compression   ====== */
94654 -/*-*************************************
94655 - * Explicit memory management
94656 - **************************************/
94657 +typedef ZSTD_CCtx zstd_cctx;
94659  /**
94660 - * ZSTD_CCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_CCtx
94661 - * @cParams: The compression parameters to be used for compression.
94662 + * zstd_cctx_workspace_bound() - max memory needed to initialize a zstd_cctx
94663 + * @parameters: The compression parameters to be used.
94664   *
94665   * If multiple compression parameters might be used, the caller must call
94666 - * ZSTD_CCtxWorkspaceBound() for each set of parameters and use the maximum
94667 + * zstd_cctx_workspace_bound() for each set of parameters and use the maximum
94668   * size.
94669   *
94670 - * Return:   A lower bound on the size of the workspace that is passed to
94671 - *           ZSTD_initCCtx().
94672 + * Return:      A lower bound on the size of the workspace that is passed to
94673 + *              zstd_init_cctx().
94674   */
94675 -size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams);
94676 +size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *parameters);
94678  /**
94679 - * struct ZSTD_CCtx - the zstd compression context
94680 - *
94681 - * When compressing many times it is recommended to allocate a context just once
94682 - * and reuse it for each successive compression operation.
94683 - */
94684 -typedef struct ZSTD_CCtx_s ZSTD_CCtx;
94685 -/**
94686 - * ZSTD_initCCtx() - initialize a zstd compression context
94687 - * @workspace:     The workspace to emplace the context into. It must outlive
94688 - *                 the returned context.
94689 - * @workspaceSize: The size of workspace. Use ZSTD_CCtxWorkspaceBound() to
94690 - *                 determine how large the workspace must be.
94691 - *
94692 - * Return:         A compression context emplaced into workspace.
94693 - */
94694 -ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize);
94696 -/**
94697 - * ZSTD_compressCCtx() - compress src into dst
94698 - * @ctx:         The context. Must have been initialized with a workspace at
94699 - *               least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
94700 - * @dst:         The buffer to compress src into.
94701 - * @dstCapacity: The size of the destination buffer. May be any size, but
94702 - *               ZSTD_compressBound(srcSize) is guaranteed to be large enough.
94703 - * @src:         The data to compress.
94704 - * @srcSize:     The size of the data to compress.
94705 - * @params:      The parameters to use for compression. See ZSTD_getParams().
94706 - *
94707 - * Return:       The compressed size or an error, which can be checked using
94708 - *               ZSTD_isError().
94709 - */
94710 -size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
94711 -       const void *src, size_t srcSize, ZSTD_parameters params);
94713 -/**
94714 - * ZSTD_DCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_DCtx
94715 - *
94716 - * Return: A lower bound on the size of the workspace that is passed to
94717 - *         ZSTD_initDCtx().
94718 - */
94719 -size_t ZSTD_DCtxWorkspaceBound(void);
94721 -/**
94722 - * struct ZSTD_DCtx - the zstd decompression context
94723 - *
94724 - * When decompressing many times it is recommended to allocate a context just
94725 - * once and reuse it for each successive decompression operation.
94726 - */
94727 -typedef struct ZSTD_DCtx_s ZSTD_DCtx;
94728 -/**
94729 - * ZSTD_initDCtx() - initialize a zstd decompression context
94730 - * @workspace:     The workspace to emplace the context into. It must outlive
94731 - *                 the returned context.
94732 - * @workspaceSize: The size of workspace. Use ZSTD_DCtxWorkspaceBound() to
94733 - *                 determine how large the workspace must be.
94734 - *
94735 - * Return:         A decompression context emplaced into workspace.
94736 - */
94737 -ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize);
94739 -/**
94740 - * ZSTD_decompressDCtx() - decompress zstd compressed src into dst
94741 - * @ctx:         The decompression context.
94742 - * @dst:         The buffer to decompress src into.
94743 - * @dstCapacity: The size of the destination buffer. Must be at least as large
94744 - *               as the decompressed size. If the caller cannot upper bound the
94745 - *               decompressed size, then it's better to use the streaming API.
94746 - * @src:         The zstd compressed data to decompress. Multiple concatenated
94747 - *               frames and skippable frames are allowed.
94748 - * @srcSize:     The exact size of the data to decompress.
94749 - *
94750 - * Return:       The decompressed size or an error, which can be checked using
94751 - *               ZSTD_isError().
94752 - */
94753 -size_t ZSTD_decompressDCtx(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
94754 -       const void *src, size_t srcSize);
94756 -/*-************************
94757 - * Simple dictionary API
94758 - **************************/
94760 -/**
94761 - * ZSTD_compress_usingDict() - compress src into dst using a dictionary
94762 - * @ctx:         The context. Must have been initialized with a workspace at
94763 - *               least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
94764 - * @dst:         The buffer to compress src into.
94765 - * @dstCapacity: The size of the destination buffer. May be any size, but
94766 - *               ZSTD_compressBound(srcSize) is guaranteed to be large enough.
94767 - * @src:         The data to compress.
94768 - * @srcSize:     The size of the data to compress.
94769 - * @dict:        The dictionary to use for compression.
94770 - * @dictSize:    The size of the dictionary.
94771 - * @params:      The parameters to use for compression. See ZSTD_getParams().
94772 - *
94773 - * Compression using a predefined dictionary. The same dictionary must be used
94774 - * during decompression.
94775 - *
94776 - * Return:       The compressed size or an error, which can be checked using
94777 - *               ZSTD_isError().
94778 - */
94779 -size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
94780 -       const void *src, size_t srcSize, const void *dict, size_t dictSize,
94781 -       ZSTD_parameters params);
94783 -/**
94784 - * ZSTD_decompress_usingDict() - decompress src into dst using a dictionary
94785 - * @ctx:         The decompression context.
94786 - * @dst:         The buffer to decompress src into.
94787 - * @dstCapacity: The size of the destination buffer. Must be at least as large
94788 - *               as the decompressed size. If the caller cannot upper bound the
94789 - *               decompressed size, then it's better to use the streaming API.
94790 - * @src:         The zstd compressed data to decompress. Multiple concatenated
94791 - *               frames and skippable frames are allowed.
94792 - * @srcSize:     The exact size of the data to decompress.
94793 - * @dict:        The dictionary to use for decompression. The same dictionary
94794 - *               must've been used to compress the data.
94795 - * @dictSize:    The size of the dictionary.
94796 - *
94797 - * Return:       The decompressed size or an error, which can be checked using
94798 - *               ZSTD_isError().
94799 - */
94800 -size_t ZSTD_decompress_usingDict(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
94801 -       const void *src, size_t srcSize, const void *dict, size_t dictSize);
94803 -/*-**************************
94804 - * Fast dictionary API
94805 - ***************************/
94807 -/**
94808 - * ZSTD_CDictWorkspaceBound() - memory needed to initialize a ZSTD_CDict
94809 - * @cParams: The compression parameters to be used for compression.
94810 + * zstd_init_cctx() - initialize a zstd compression context
94811 + * @workspace:      The workspace to emplace the context into. It must outlive
94812 + *                  the returned context.
94813 + * @workspace_size: The size of workspace. Use zstd_cctx_workspace_bound() to
94814 + *                  determine how large the workspace must be.
94815   *
94816 - * Return:   A lower bound on the size of the workspace that is passed to
94817 - *           ZSTD_initCDict().
94818 - */
94819 -size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams);
94821 -/**
94822 - * struct ZSTD_CDict - a digested dictionary to be used for compression
94823 + * Return:          A zstd compression context or NULL on error.
94824   */
94825 -typedef struct ZSTD_CDict_s ZSTD_CDict;
94826 +zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size);
94828  /**
94829 - * ZSTD_initCDict() - initialize a digested dictionary for compression
94830 - * @dictBuffer:    The dictionary to digest. The buffer is referenced by the
94831 - *                 ZSTD_CDict so it must outlive the returned ZSTD_CDict.
94832 - * @dictSize:      The size of the dictionary.
94833 - * @params:        The parameters to use for compression. See ZSTD_getParams().
94834 - * @workspace:     The workspace. It must outlive the returned ZSTD_CDict.
94835 - * @workspaceSize: The workspace size. Must be at least
94836 - *                 ZSTD_CDictWorkspaceBound(params.cParams).
94837 + * zstd_compress_cctx() - compress src into dst with the initialized parameters
94838 + * @cctx:         The context. Must have been initialized with zstd_init_cctx().
94839 + * @dst:          The buffer to compress src into.
94840 + * @dst_capacity: The size of the destination buffer. May be any size, but
94841 + *                ZSTD_compressBound(srcSize) is guaranteed to be large enough.
94842 + * @src:          The data to compress.
94843 + * @src_size:     The size of the data to compress.
94844 + * @parameters:   The compression parameters to be used.
94845   *
94846 - * When compressing multiple messages / blocks with the same dictionary it is
94847 - * recommended to load it just once. The ZSTD_CDict merely references the
94848 - * dictBuffer, so it must outlive the returned ZSTD_CDict.
94849 - *
94850 - * Return:         The digested dictionary emplaced into workspace.
94851 + * Return:        The compressed size or an error, which can be checked using
94852 + *                zstd_is_error().
94853   */
94854 -ZSTD_CDict *ZSTD_initCDict(const void *dictBuffer, size_t dictSize,
94855 -       ZSTD_parameters params, void *workspace, size_t workspaceSize);
94856 +size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
94857 +       const void *src, size_t src_size, const zstd_parameters *parameters);
94859 -/**
94860 - * ZSTD_compress_usingCDict() - compress src into dst using a ZSTD_CDict
94861 - * @ctx:         The context. Must have been initialized with a workspace at
94862 - *               least as large as ZSTD_CCtxWorkspaceBound(cParams) where
94863 - *               cParams are the compression parameters used to initialize the
94864 - *               cdict.
94865 - * @dst:         The buffer to compress src into.
94866 - * @dstCapacity: The size of the destination buffer. May be any size, but
94867 - *               ZSTD_compressBound(srcSize) is guaranteed to be large enough.
94868 - * @src:         The data to compress.
94869 - * @srcSize:     The size of the data to compress.
94870 - * @cdict:       The digested dictionary to use for compression.
94871 - * @params:      The parameters to use for compression. See ZSTD_getParams().
94872 - *
94873 - * Compression using a digested dictionary. The same dictionary must be used
94874 - * during decompression.
94875 - *
94876 - * Return:       The compressed size or an error, which can be checked using
94877 - *               ZSTD_isError().
94878 - */
94879 -size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
94880 -       const void *src, size_t srcSize, const ZSTD_CDict *cdict);
94881 +/* ======   Single-pass Decompression   ====== */
94883 +typedef ZSTD_DCtx zstd_dctx;
94885  /**
94886 - * ZSTD_DDictWorkspaceBound() - memory needed to initialize a ZSTD_DDict
94887 + * zstd_dctx_workspace_bound() - max memory needed to initialize a zstd_dctx
94888   *
94889 - * Return:  A lower bound on the size of the workspace that is passed to
94890 - *          ZSTD_initDDict().
94891 - */
94892 -size_t ZSTD_DDictWorkspaceBound(void);
94894 -/**
94895 - * struct ZSTD_DDict - a digested dictionary to be used for decompression
94896 + * Return: A lower bound on the size of the workspace that is passed to
94897 + *         zstd_init_dctx().
94898   */
94899 -typedef struct ZSTD_DDict_s ZSTD_DDict;
94900 +size_t zstd_dctx_workspace_bound(void);
94902  /**
94903 - * ZSTD_initDDict() - initialize a digested dictionary for decompression
94904 - * @dictBuffer:    The dictionary to digest. The buffer is referenced by the
94905 - *                 ZSTD_DDict so it must outlive the returned ZSTD_DDict.
94906 - * @dictSize:      The size of the dictionary.
94907 - * @workspace:     The workspace. It must outlive the returned ZSTD_DDict.
94908 - * @workspaceSize: The workspace size. Must be at least
94909 - *                 ZSTD_DDictWorkspaceBound().
94910 - *
94911 - * When decompressing multiple messages / blocks with the same dictionary it is
94912 - * recommended to load it just once. The ZSTD_DDict merely references the
94913 - * dictBuffer, so it must outlive the returned ZSTD_DDict.
94914 + * zstd_init_dctx() - initialize a zstd decompression context
94915 + * @workspace:      The workspace to emplace the context into. It must outlive
94916 + *                  the returned context.
94917 + * @workspace_size: The size of workspace. Use zstd_dctx_workspace_bound() to
94918 + *                  determine how large the workspace must be.
94919   *
94920 - * Return:         The digested dictionary emplaced into workspace.
94921 + * Return:          A zstd decompression context or NULL on error.
94922   */
94923 -ZSTD_DDict *ZSTD_initDDict(const void *dictBuffer, size_t dictSize,
94924 -       void *workspace, size_t workspaceSize);
94925 +zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size);
94927  /**
94928 - * ZSTD_decompress_usingDDict() - decompress src into dst using a ZSTD_DDict
94929 - * @ctx:         The decompression context.
94930 - * @dst:         The buffer to decompress src into.
94931 - * @dstCapacity: The size of the destination buffer. Must be at least as large
94932 - *               as the decompressed size. If the caller cannot upper bound the
94933 - *               decompressed size, then it's better to use the streaming API.
94934 - * @src:         The zstd compressed data to decompress. Multiple concatenated
94935 - *               frames and skippable frames are allowed.
94936 - * @srcSize:     The exact size of the data to decompress.
94937 - * @ddict:       The digested dictionary to use for decompression. The same
94938 - *               dictionary must've been used to compress the data.
94939 + * zstd_decompress_dctx() - decompress zstd compressed src into dst
94940 + * @dctx:         The decompression context.
94941 + * @dst:          The buffer to decompress src into.
94942 + * @dst_capacity: The size of the destination buffer. Must be at least as large
94943 + *                as the decompressed size. If the caller cannot upper bound the
94944 + *                decompressed size, then it's better to use the streaming API.
94945 + * @src:          The zstd compressed data to decompress. Multiple concatenated
94946 + *                frames and skippable frames are allowed.
94947 + * @src_size:     The exact size of the data to decompress.
94948   *
94949 - * Return:       The decompressed size or an error, which can be checked using
94950 - *               ZSTD_isError().
94951 + * Return:        The decompressed size or an error, which can be checked using
94952 + *                zstd_is_error().
94953   */
94954 -size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst,
94955 -       size_t dstCapacity, const void *src, size_t srcSize,
94956 -       const ZSTD_DDict *ddict);
94957 +size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
94958 +       const void *src, size_t src_size);
94961 -/*-**************************
94962 - * Streaming
94963 - ***************************/
94964 +/* ======   Streaming Buffers   ====== */
94966  /**
94967 - * struct ZSTD_inBuffer - input buffer for streaming
94968 + * struct zstd_in_buffer - input buffer for streaming
94969   * @src:  Start of the input buffer.
94970   * @size: Size of the input buffer.
94971   * @pos:  Position where reading stopped. Will be updated.
94972   *        Necessarily 0 <= pos <= size.
94973 + *
94974 + * See zstd_lib.h.
94975   */
94976 -typedef struct ZSTD_inBuffer_s {
94977 -       const void *src;
94978 -       size_t size;
94979 -       size_t pos;
94980 -} ZSTD_inBuffer;
94981 +typedef ZSTD_inBuffer zstd_in_buffer;
94983  /**
94984 - * struct ZSTD_outBuffer - output buffer for streaming
94985 + * struct zstd_out_buffer - output buffer for streaming
94986   * @dst:  Start of the output buffer.
94987   * @size: Size of the output buffer.
94988   * @pos:  Position where writing stopped. Will be updated.
94989   *        Necessarily 0 <= pos <= size.
94990 + *
94991 + * See zstd_lib.h.
94992   */
94993 -typedef struct ZSTD_outBuffer_s {
94994 -       void *dst;
94995 -       size_t size;
94996 -       size_t pos;
94997 -} ZSTD_outBuffer;
94998 +typedef ZSTD_outBuffer zstd_out_buffer;
95000 +/* ======   Streaming Compression   ====== */
95003 -/*-*****************************************************************************
95004 - * Streaming compression - HowTo
95005 - *
95006 - * A ZSTD_CStream object is required to track streaming operation.
95007 - * Use ZSTD_initCStream() to initialize a ZSTD_CStream object.
95008 - * ZSTD_CStream objects can be reused multiple times on consecutive compression
95009 - * operations. It is recommended to re-use ZSTD_CStream in situations where many
95010 - * streaming operations will be achieved consecutively. Use one separate
95011 - * ZSTD_CStream per thread for parallel execution.
95012 - *
95013 - * Use ZSTD_compressStream() repetitively to consume input stream.
95014 - * The function will automatically update both `pos` fields.
95015 - * Note that it may not consume the entire input, in which case `pos < size`,
95016 - * and it's up to the caller to present again remaining data.
95017 - * It returns a hint for the preferred number of bytes to use as an input for
95018 - * the next function call.
95019 - *
95020 - * At any moment, it's possible to flush whatever data remains within internal
95021 - * buffer, using ZSTD_flushStream(). `output->pos` will be updated. There might
95022 - * still be some content left within the internal buffer if `output->size` is
95023 - * too small. It returns the number of bytes left in the internal buffer and
95024 - * must be called until it returns 0.
95025 - *
95026 - * ZSTD_endStream() instructs to finish a frame. It will perform a flush and
95027 - * write frame epilogue. The epilogue is required for decoders to consider a
95028 - * frame completed. Similar to ZSTD_flushStream(), it may not be able to flush
95029 - * the full content if `output->size` is too small. In which case, call again
95030 - * ZSTD_endStream() to complete the flush. It returns the number of bytes left
95031 - * in the internal buffer and must be called until it returns 0.
95032 - ******************************************************************************/
95033 +typedef ZSTD_CStream zstd_cstream;
95035  /**
95036 - * ZSTD_CStreamWorkspaceBound() - memory needed to initialize a ZSTD_CStream
95037 - * @cParams: The compression parameters to be used for compression.
95038 + * zstd_cstream_workspace_bound() - memory needed to initialize a zstd_cstream
95039 + * @cparams: The compression parameters to be used for compression.
95040   *
95041   * Return:   A lower bound on the size of the workspace that is passed to
95042 - *           ZSTD_initCStream() and ZSTD_initCStream_usingCDict().
95043 - */
95044 -size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams);
95046 -/**
95047 - * struct ZSTD_CStream - the zstd streaming compression context
95048 - */
95049 -typedef struct ZSTD_CStream_s ZSTD_CStream;
95051 -/*===== ZSTD_CStream management functions =====*/
95052 -/**
95053 - * ZSTD_initCStream() - initialize a zstd streaming compression context
95054 - * @params:         The zstd compression parameters.
95055 - * @pledgedSrcSize: If params.fParams.contentSizeFlag == 1 then the caller must
95056 - *                  pass the source size (zero means empty source). Otherwise,
95057 - *                  the caller may optionally pass the source size, or zero if
95058 - *                  unknown.
95059 - * @workspace:      The workspace to emplace the context into. It must outlive
95060 - *                  the returned context.
95061 - * @workspaceSize:  The size of workspace.
95062 - *                  Use ZSTD_CStreamWorkspaceBound(params.cParams) to determine
95063 - *                  how large the workspace must be.
95064 - *
95065 - * Return:          The zstd streaming compression context.
95066 + *           zstd_init_cstream().
95067   */
95068 -ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params,
95069 -       unsigned long long pledgedSrcSize, void *workspace,
95070 -       size_t workspaceSize);
95071 +size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams);
95073  /**
95074 - * ZSTD_initCStream_usingCDict() - initialize a streaming compression context
95075 - * @cdict:          The digested dictionary to use for compression.
95076 - * @pledgedSrcSize: Optionally the source size, or zero if unknown.
95077 - * @workspace:      The workspace to emplace the context into. It must outlive
95078 - *                  the returned context.
95079 - * @workspaceSize:  The size of workspace. Call ZSTD_CStreamWorkspaceBound()
95080 - *                  with the cParams used to initialize the cdict to determine
95081 - *                  how large the workspace must be.
95082 + * zstd_init_cstream() - initialize a zstd streaming compression context
95083 + * @parameters        The zstd parameters to use for compression.
95084 + * @pledged_src_size: If params.fParams.contentSizeFlag == 1 then the caller
95085 + *                    must pass the source size (zero means empty source).
95086 + *                    Otherwise, the caller may optionally pass the source
95087 + *                    size, or zero if unknown.
95088 + * @workspace:        The workspace to emplace the context into. It must outlive
95089 + *                    the returned context.
95090 + * @workspace_size:   The size of workspace.
95091 + *                    Use zstd_cstream_workspace_bound(params->cparams) to
95092 + *                    determine how large the workspace must be.
95093   *
95094 - * Return:          The zstd streaming compression context.
95095 + * Return:            The zstd streaming compression context or NULL on error.
95096   */
95097 -ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict,
95098 -       unsigned long long pledgedSrcSize, void *workspace,
95099 -       size_t workspaceSize);
95100 +zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters,
95101 +       unsigned long long pledged_src_size, void *workspace, size_t workspace_size);
95103 -/*===== Streaming compression functions =====*/
95104  /**
95105 - * ZSTD_resetCStream() - reset the context using parameters from creation
95106 - * @zcs:            The zstd streaming compression context to reset.
95107 - * @pledgedSrcSize: Optionally the source size, or zero if unknown.
95108 + * zstd_reset_cstream() - reset the context using parameters from creation
95109 + * @cstream:          The zstd streaming compression context to reset.
95110 + * @pledged_src_size: Optionally the source size, or zero if unknown.
95111   *
95112   * Resets the context using the parameters from creation. Skips dictionary
95113 - * loading, since it can be reused. If `pledgedSrcSize` is non-zero the frame
95114 + * loading, since it can be reused. If `pledged_src_size` is non-zero the frame
95115   * content size is always written into the frame header.
95116   *
95117 - * Return:          Zero or an error, which can be checked using ZSTD_isError().
95118 + * Return:            Zero or an error, which can be checked using
95119 + *                    zstd_is_error().
95120   */
95121 -size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize);
95122 +size_t zstd_reset_cstream(zstd_cstream *cstream,
95123 +       unsigned long long pledged_src_size);
95125  /**
95126 - * ZSTD_compressStream() - streaming compress some of input into output
95127 - * @zcs:    The zstd streaming compression context.
95128 - * @output: Destination buffer. `output->pos` is updated to indicate how much
95129 - *          compressed data was written.
95130 - * @input:  Source buffer. `input->pos` is updated to indicate how much data was
95131 - *          read. Note that it may not consume the entire input, in which case
95132 - *          `input->pos < input->size`, and it's up to the caller to present
95133 - *          remaining data again.
95134 + * zstd_compress_stream() - streaming compress some of input into output
95135 + * @cstream: The zstd streaming compression context.
95136 + * @output:  Destination buffer. `output->pos` is updated to indicate how much
95137 + *           compressed data was written.
95138 + * @input:   Source buffer. `input->pos` is updated to indicate how much data
95139 + *           was read. Note that it may not consume the entire input, in which
95140 + *           case `input->pos < input->size`, and it's up to the caller to
95141 + *           present remaining data again.
95142   *
95143   * The `input` and `output` buffers may be any size. Guaranteed to make some
95144   * forward progress if `input` and `output` are not empty.
95145   *
95146 - * Return:  A hint for the number of bytes to use as the input for the next
95147 - *          function call or an error, which can be checked using
95148 - *          ZSTD_isError().
95149 + * Return:   A hint for the number of bytes to use as the input for the next
95150 + *           function call or an error, which can be checked using
95151 + *           zstd_is_error().
95152   */
95153 -size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output,
95154 -       ZSTD_inBuffer *input);
95155 +size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output,
95156 +       zstd_in_buffer *input);
95158  /**
95159 - * ZSTD_flushStream() - flush internal buffers into output
95160 - * @zcs:    The zstd streaming compression context.
95161 - * @output: Destination buffer. `output->pos` is updated to indicate how much
95162 - *          compressed data was written.
95163 + * zstd_flush_stream() - flush internal buffers into output
95164 + * @cstream: The zstd streaming compression context.
95165 + * @output:  Destination buffer. `output->pos` is updated to indicate how much
95166 + *           compressed data was written.
95167   *
95168 - * ZSTD_flushStream() must be called until it returns 0, meaning all the data
95169 - * has been flushed. Since ZSTD_flushStream() causes a block to be ended,
95170 + * zstd_flush_stream() must be called until it returns 0, meaning all the data
95171 + * has been flushed. Since zstd_flush_stream() causes a block to be ended,
95172   * calling it too often will degrade the compression ratio.
95173   *
95174 - * Return:  The number of bytes still present within internal buffers or an
95175 - *          error, which can be checked using ZSTD_isError().
95176 + * Return:   The number of bytes still present within internal buffers or an
95177 + *           error, which can be checked using zstd_is_error().
95178   */
95179 -size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
95180 -/**
95181 - * ZSTD_endStream() - flush internal buffers into output and end the frame
95182 - * @zcs:    The zstd streaming compression context.
95183 - * @output: Destination buffer. `output->pos` is updated to indicate how much
95184 - *          compressed data was written.
95185 - *
95186 - * ZSTD_endStream() must be called until it returns 0, meaning all the data has
95187 - * been flushed and the frame epilogue has been written.
95188 - *
95189 - * Return:  The number of bytes still present within internal buffers or an
95190 - *          error, which can be checked using ZSTD_isError().
95191 - */
95192 -size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
95193 +size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output);
95195  /**
95196 - * ZSTD_CStreamInSize() - recommended size for the input buffer
95197 - *
95198 - * Return: The recommended size for the input buffer.
95199 - */
95200 -size_t ZSTD_CStreamInSize(void);
95201 -/**
95202 - * ZSTD_CStreamOutSize() - recommended size for the output buffer
95203 + * zstd_end_stream() - flush internal buffers into output and end the frame
95204 + * @cstream: The zstd streaming compression context.
95205 + * @output:  Destination buffer. `output->pos` is updated to indicate how much
95206 + *           compressed data was written.
95207   *
95208 - * When the output buffer is at least this large, it is guaranteed to be large
95209 - * enough to flush at least one complete compressed block.
95210 + * zstd_end_stream() must be called until it returns 0, meaning all the data has
95211 + * been flushed and the frame epilogue has been written.
95212   *
95213 - * Return: The recommended size for the output buffer.
95214 + * Return:   The number of bytes still present within internal buffers or an
95215 + *           error, which can be checked using zstd_is_error().
95216   */
95217 -size_t ZSTD_CStreamOutSize(void);
95218 +size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output);
95220 +/* ======   Streaming Decompression   ====== */
95223 -/*-*****************************************************************************
95224 - * Streaming decompression - HowTo
95225 - *
95226 - * A ZSTD_DStream object is required to track streaming operations.
95227 - * Use ZSTD_initDStream() to initialize a ZSTD_DStream object.
95228 - * ZSTD_DStream objects can be re-used multiple times.
95229 - *
95230 - * Use ZSTD_decompressStream() repetitively to consume your input.
95231 - * The function will update both `pos` fields.
95232 - * If `input->pos < input->size`, some input has not been consumed.
95233 - * It's up to the caller to present again remaining data.
95234 - * If `output->pos < output->size`, decoder has flushed everything it could.
95235 - * Returns 0 iff a frame is completely decoded and fully flushed.
95236 - * Otherwise it returns a suggested next input size that will never load more
95237 - * than the current frame.
95238 - ******************************************************************************/
95239 +typedef ZSTD_DStream zstd_dstream;
95241  /**
95242 - * ZSTD_DStreamWorkspaceBound() - memory needed to initialize a ZSTD_DStream
95243 - * @maxWindowSize: The maximum window size allowed for compressed frames.
95244 + * zstd_dstream_workspace_bound() - memory needed to initialize a zstd_dstream
95245 + * @max_window_size: The maximum window size allowed for compressed frames.
95246   *
95247 - * Return:         A lower bound on the size of the workspace that is passed to
95248 - *                 ZSTD_initDStream() and ZSTD_initDStream_usingDDict().
95249 + * Return:           A lower bound on the size of the workspace that is passed
95250 + *                   to zstd_init_dstream().
95251   */
95252 -size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize);
95253 +size_t zstd_dstream_workspace_bound(size_t max_window_size);
95255  /**
95256 - * struct ZSTD_DStream - the zstd streaming decompression context
95257 - */
95258 -typedef struct ZSTD_DStream_s ZSTD_DStream;
95259 -/*===== ZSTD_DStream management functions =====*/
95260 -/**
95261 - * ZSTD_initDStream() - initialize a zstd streaming decompression context
95262 - * @maxWindowSize: The maximum window size allowed for compressed frames.
95263 - * @workspace:     The workspace to emplace the context into. It must outlive
95264 - *                 the returned context.
95265 - * @workspaceSize: The size of workspace.
95266 - *                 Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
95267 - *                 how large the workspace must be.
95268 - *
95269 - * Return:         The zstd streaming decompression context.
95270 - */
95271 -ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace,
95272 -       size_t workspaceSize);
95273 -/**
95274 - * ZSTD_initDStream_usingDDict() - initialize streaming decompression context
95275 - * @maxWindowSize: The maximum window size allowed for compressed frames.
95276 - * @ddict:         The digested dictionary to use for decompression.
95277 - * @workspace:     The workspace to emplace the context into. It must outlive
95278 - *                 the returned context.
95279 - * @workspaceSize: The size of workspace.
95280 - *                 Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
95281 - *                 how large the workspace must be.
95282 + * zstd_init_dstream() - initialize a zstd streaming decompression context
95283 + * @max_window_size: The maximum window size allowed for compressed frames.
95284 + * @workspace:       The workspace to emplace the context into. It must outlive
95285 + *                   the returned context.
95286 + * @workspaceSize:   The size of workspace.
95287 + *                   Use zstd_dstream_workspace_bound(max_window_size) to
95288 + *                   determine how large the workspace must be.
95289   *
95290 - * Return:         The zstd streaming decompression context.
95291 + * Return:           The zstd streaming decompression context.
95292   */
95293 -ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize,
95294 -       const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize);
95295 +zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace,
95296 +       size_t workspace_size);
95298 -/*===== Streaming decompression functions =====*/
95299  /**
95300 - * ZSTD_resetDStream() - reset the context using parameters from creation
95301 - * @zds:   The zstd streaming decompression context to reset.
95302 + * zstd_reset_dstream() - reset the context using parameters from creation
95303 + * @dstream: The zstd streaming decompression context to reset.
95304   *
95305   * Resets the context using the parameters from creation. Skips dictionary
95306   * loading, since it can be reused.
95307   *
95308 - * Return: Zero or an error, which can be checked using ZSTD_isError().
95309 + * Return:   Zero or an error, which can be checked using zstd_is_error().
95310   */
95311 -size_t ZSTD_resetDStream(ZSTD_DStream *zds);
95312 +size_t zstd_reset_dstream(zstd_dstream *dstream);
95314  /**
95315 - * ZSTD_decompressStream() - streaming decompress some of input into output
95316 - * @zds:    The zstd streaming decompression context.
95317 - * @output: Destination buffer. `output.pos` is updated to indicate how much
95318 - *          decompressed data was written.
95319 - * @input:  Source buffer. `input.pos` is updated to indicate how much data was
95320 - *          read. Note that it may not consume the entire input, in which case
95321 - *          `input.pos < input.size`, and it's up to the caller to present
95322 - *          remaining data again.
95323 + * zstd_decompress_stream() - streaming decompress some of input into output
95324 + * @dstream: The zstd streaming decompression context.
95325 + * @output:  Destination buffer. `output.pos` is updated to indicate how much
95326 + *           decompressed data was written.
95327 + * @input:   Source buffer. `input.pos` is updated to indicate how much data was
95328 + *           read. Note that it may not consume the entire input, in which case
95329 + *           `input.pos < input.size`, and it's up to the caller to present
95330 + *           remaining data again.
95331   *
95332   * The `input` and `output` buffers may be any size. Guaranteed to make some
95333   * forward progress if `input` and `output` are not empty.
95334 - * ZSTD_decompressStream() will not consume the last byte of the frame until
95335 + * zstd_decompress_stream() will not consume the last byte of the frame until
95336   * the entire frame is flushed.
95337   *
95338 - * Return:  Returns 0 iff a frame is completely decoded and fully flushed.
95339 - *          Otherwise returns a hint for the number of bytes to use as the input
95340 - *          for the next function call or an error, which can be checked using
95341 - *          ZSTD_isError(). The size hint will never load more than the frame.
95342 + * Return:   Returns 0 iff a frame is completely decoded and fully flushed.
95343 + *           Otherwise returns a hint for the number of bytes to use as the
95344 + *           input for the next function call or an error, which can be checked
95345 + *           using zstd_is_error(). The size hint will never load more than the
95346 + *           frame.
95347   */
95348 -size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output,
95349 -       ZSTD_inBuffer *input);
95350 +size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output,
95351 +       zstd_in_buffer *input);
95353 -/**
95354 - * ZSTD_DStreamInSize() - recommended size for the input buffer
95355 - *
95356 - * Return: The recommended size for the input buffer.
95357 - */
95358 -size_t ZSTD_DStreamInSize(void);
95359 -/**
95360 - * ZSTD_DStreamOutSize() - recommended size for the output buffer
95361 - *
95362 - * When the output buffer is at least this large, it is guaranteed to be large
95363 - * enough to flush at least one complete decompressed block.
95364 - *
95365 - * Return: The recommended size for the output buffer.
95366 - */
95367 -size_t ZSTD_DStreamOutSize(void);
95370 -/* --- Constants ---*/
95371 -#define ZSTD_MAGICNUMBER            0xFD2FB528   /* >= v0.8.0 */
95372 -#define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50U
95374 -#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
95375 -#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
95377 -#define ZSTD_WINDOWLOG_MAX_32  27
95378 -#define ZSTD_WINDOWLOG_MAX_64  27
95379 -#define ZSTD_WINDOWLOG_MAX \
95380 -       ((unsigned int)(sizeof(size_t) == 4 \
95381 -               ? ZSTD_WINDOWLOG_MAX_32 \
95382 -               : ZSTD_WINDOWLOG_MAX_64))
95383 -#define ZSTD_WINDOWLOG_MIN 10
95384 -#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX
95385 -#define ZSTD_HASHLOG_MIN        6
95386 -#define ZSTD_CHAINLOG_MAX     (ZSTD_WINDOWLOG_MAX+1)
95387 -#define ZSTD_CHAINLOG_MIN      ZSTD_HASHLOG_MIN
95388 -#define ZSTD_HASHLOG3_MAX      17
95389 -#define ZSTD_SEARCHLOG_MAX    (ZSTD_WINDOWLOG_MAX-1)
95390 -#define ZSTD_SEARCHLOG_MIN      1
95391 -/* only for ZSTD_fast, other strategies are limited to 6 */
95392 -#define ZSTD_SEARCHLENGTH_MAX   7
95393 -/* only for ZSTD_btopt, other strategies are limited to 4 */
95394 -#define ZSTD_SEARCHLENGTH_MIN   3
95395 -#define ZSTD_TARGETLENGTH_MIN   4
95396 -#define ZSTD_TARGETLENGTH_MAX 999
95398 -/* for static allocation */
95399 -#define ZSTD_FRAMEHEADERSIZE_MAX 18
95400 -#define ZSTD_FRAMEHEADERSIZE_MIN  6
95401 -#define ZSTD_frameHeaderSize_prefix 5
95402 -#define ZSTD_frameHeaderSize_min ZSTD_FRAMEHEADERSIZE_MIN
95403 -#define ZSTD_frameHeaderSize_max ZSTD_FRAMEHEADERSIZE_MAX
95404 -/* magic number + skippable frame length */
95405 -#define ZSTD_skippableHeaderSize 8
95408 -/*-*************************************
95409 - * Compressed size functions
95410 - **************************************/
95412 -/**
95413 - * ZSTD_findFrameCompressedSize() - returns the size of a compressed frame
95414 - * @src:     Source buffer. It should point to the start of a zstd encoded frame
95415 - *           or a skippable frame.
95416 - * @srcSize: The size of the source buffer. It must be at least as large as the
95417 - *           size of the frame.
95418 - *
95419 - * Return:   The compressed size of the frame pointed to by `src` or an error,
95420 - *           which can be check with ZSTD_isError().
95421 - *           Suitable to pass to ZSTD_decompress() or similar functions.
95422 - */
95423 -size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize);
95425 -/*-*************************************
95426 - * Decompressed size functions
95427 - **************************************/
95428 -/**
95429 - * ZSTD_getFrameContentSize() - returns the content size in a zstd frame header
95430 - * @src:     It should point to the start of a zstd encoded frame.
95431 - * @srcSize: The size of the source buffer. It must be at least as large as the
95432 - *           frame header. `ZSTD_frameHeaderSize_max` is always large enough.
95433 - *
95434 - * Return:   The frame content size stored in the frame header if known.
95435 - *           `ZSTD_CONTENTSIZE_UNKNOWN` if the content size isn't stored in the
95436 - *           frame header. `ZSTD_CONTENTSIZE_ERROR` on invalid input.
95437 - */
95438 -unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
95439 +/* ======   Frame Inspection Functions ====== */
95441  /**
95442 - * ZSTD_findDecompressedSize() - returns decompressed size of a series of frames
95443 - * @src:     It should point to the start of a series of zstd encoded and/or
95444 - *           skippable frames.
95445 - * @srcSize: The exact size of the series of frames.
95446 + * zstd_find_frame_compressed_size() - returns the size of a compressed frame
95447 + * @src:      Source buffer. It should point to the start of a zstd encoded
95448 + *            frame or a skippable frame.
95449 + * @src_size: The size of the source buffer. It must be at least as large as the
95450 + *            size of the frame.
95451   *
95452 - * If any zstd encoded frame in the series doesn't have the frame content size
95453 - * set, `ZSTD_CONTENTSIZE_UNKNOWN` is returned. But frame content size is always
95454 - * set when using ZSTD_compress(). The decompressed size can be very large.
95455 - * If the source is untrusted, the decompressed size could be wrong or
95456 - * intentionally modified. Always ensure the result fits within the
95457 - * application's authorized limits. ZSTD_findDecompressedSize() handles multiple
95458 - * frames, and so it must traverse the input to read each frame header. This is
95459 - * efficient as most of the data is skipped, however it does mean that all frame
95460 - * data must be present and valid.
95461 - *
95462 - * Return:   Decompressed size of all the data contained in the frames if known.
95463 - *           `ZSTD_CONTENTSIZE_UNKNOWN` if the decompressed size is unknown.
95464 - *           `ZSTD_CONTENTSIZE_ERROR` if an error occurred.
95465 - */
95466 -unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize);
95468 -/*-*************************************
95469 - * Advanced compression functions
95470 - **************************************/
95471 -/**
95472 - * ZSTD_checkCParams() - ensure parameter values remain within authorized range
95473 - * @cParams: The zstd compression parameters.
95474 - *
95475 - * Return:   Zero or an error, which can be checked using ZSTD_isError().
95476 + * Return:    The compressed size of the frame pointed to by `src` or an error,
95477 + *            which can be check with zstd_is_error().
95478 + *            Suitable to pass to ZSTD_decompress() or similar functions.
95479   */
95480 -size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams);
95481 +size_t zstd_find_frame_compressed_size(const void *src, size_t src_size);
95483  /**
95484 - * ZSTD_adjustCParams() - optimize parameters for a given srcSize and dictSize
95485 - * @srcSize:  Optionally the estimated source size, or zero if unknown.
95486 - * @dictSize: Optionally the estimated dictionary size, or zero if unknown.
95487 - *
95488 - * Return:    The optimized parameters.
95489 - */
95490 -ZSTD_compressionParameters ZSTD_adjustCParams(
95491 -       ZSTD_compressionParameters cParams, unsigned long long srcSize,
95492 -       size_t dictSize);
95494 -/*--- Advanced decompression functions ---*/
95496 -/**
95497 - * ZSTD_isFrame() - returns true iff the buffer starts with a valid frame
95498 - * @buffer: The source buffer to check.
95499 - * @size:   The size of the source buffer, must be at least 4 bytes.
95500 - *
95501 - * Return: True iff the buffer starts with a zstd or skippable frame identifier.
95502 - */
95503 -unsigned int ZSTD_isFrame(const void *buffer, size_t size);
95505 -/**
95506 - * ZSTD_getDictID_fromDict() - returns the dictionary id stored in a dictionary
95507 - * @dict:     The dictionary buffer.
95508 - * @dictSize: The size of the dictionary buffer.
95509 - *
95510 - * Return:    The dictionary id stored within the dictionary or 0 if the
95511 - *            dictionary is not a zstd dictionary. If it returns 0 the
95512 - *            dictionary can still be loaded as a content-only dictionary.
95513 - */
95514 -unsigned int ZSTD_getDictID_fromDict(const void *dict, size_t dictSize);
95516 -/**
95517 - * ZSTD_getDictID_fromDDict() - returns the dictionary id stored in a ZSTD_DDict
95518 - * @ddict: The ddict to find the id of.
95519 - *
95520 - * Return: The dictionary id stored within `ddict` or 0 if the dictionary is not
95521 - *         a zstd dictionary. If it returns 0 `ddict` will be loaded as a
95522 - *         content-only dictionary.
95523 - */
95524 -unsigned int ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict);
95526 -/**
95527 - * ZSTD_getDictID_fromFrame() - returns the dictionary id stored in a zstd frame
95528 - * @src:     Source buffer. It must be a zstd encoded frame.
95529 - * @srcSize: The size of the source buffer. It must be at least as large as the
95530 - *           frame header. `ZSTD_frameHeaderSize_max` is always large enough.
95531 - *
95532 - * Return:   The dictionary id required to decompress the frame stored within
95533 - *           `src` or 0 if the dictionary id could not be decoded. It can return
95534 - *           0 if the frame does not require a dictionary, the dictionary id
95535 - *           wasn't stored in the frame, `src` is not a zstd frame, or `srcSize`
95536 - *           is too small.
95537 - */
95538 -unsigned int ZSTD_getDictID_fromFrame(const void *src, size_t srcSize);
95540 -/**
95541 - * struct ZSTD_frameParams - zstd frame parameters stored in the frame header
95542 - * @frameContentSize: The frame content size, or 0 if not present.
95543 + * struct zstd_frame_params - zstd frame parameters stored in the frame header
95544 + * @frameContentSize: The frame content size, or ZSTD_CONTENTSIZE_UNKNOWN if not
95545 + *                    present.
95546   * @windowSize:       The window size, or 0 if the frame is a skippable frame.
95547 + * @blockSizeMax:     The maximum block size.
95548 + * @frameType:        The frame type (zstd or skippable)
95549 + * @headerSize:       The size of the frame header.
95550   * @dictID:           The dictionary id, or 0 if not present.
95551   * @checksumFlag:     Whether a checksum was used.
95552 + *
95553 + * See zstd_lib.h.
95554   */
95555 -typedef struct {
95556 -       unsigned long long frameContentSize;
95557 -       unsigned int windowSize;
95558 -       unsigned int dictID;
95559 -       unsigned int checksumFlag;
95560 -} ZSTD_frameParams;
95561 +typedef ZSTD_frameHeader zstd_frame_header;
95563  /**
95564 - * ZSTD_getFrameParams() - extracts parameters from a zstd or skippable frame
95565 - * @fparamsPtr: On success the frame parameters are written here.
95566 - * @src:        The source buffer. It must point to a zstd or skippable frame.
95567 - * @srcSize:    The size of the source buffer. `ZSTD_frameHeaderSize_max` is
95568 - *              always large enough to succeed.
95569 + * zstd_get_frame_header() - extracts parameters from a zstd or skippable frame
95570 + * @params:   On success the frame parameters are written here.
95571 + * @src:      The source buffer. It must point to a zstd or skippable frame.
95572 + * @src_size: The size of the source buffer.
95573   *
95574 - * Return:      0 on success. If more data is required it returns how many bytes
95575 - *              must be provided to make forward progress. Otherwise it returns
95576 - *              an error, which can be checked using ZSTD_isError().
95577 + * Return:    0 on success. If more data is required it returns how many bytes
95578 + *            must be provided to make forward progress. Otherwise it returns
95579 + *            an error, which can be checked using zstd_is_error().
95580   */
95581 -size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src,
95582 -       size_t srcSize);
95584 -/*-*****************************************************************************
95585 - * Buffer-less and synchronous inner streaming functions
95586 - *
95587 - * This is an advanced API, giving full control over buffer management, for
95588 - * users which need direct control over memory.
95589 - * But it's also a complex one, with many restrictions (documented below).
95590 - * Prefer using normal streaming API for an easier experience
95591 - ******************************************************************************/
95593 -/*-*****************************************************************************
95594 - * Buffer-less streaming compression (synchronous mode)
95595 - *
95596 - * A ZSTD_CCtx object is required to track streaming operations.
95597 - * Use ZSTD_initCCtx() to initialize a context.
95598 - * ZSTD_CCtx object can be re-used multiple times within successive compression
95599 - * operations.
95600 - *
95601 - * Start by initializing a context.
95602 - * Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary
95603 - * compression,
95604 - * or ZSTD_compressBegin_advanced(), for finer parameter control.
95605 - * It's also possible to duplicate a reference context which has already been
95606 - * initialized, using ZSTD_copyCCtx()
95607 - *
95608 - * Then, consume your input using ZSTD_compressContinue().
95609 - * There are some important considerations to keep in mind when using this
95610 - * advanced function :
95611 - * - ZSTD_compressContinue() has no internal buffer. It uses externally provided
95612 - *   buffer only.
95613 - * - Interface is synchronous : input is consumed entirely and produce 1+
95614 - *   (or more) compressed blocks.
95615 - * - Caller must ensure there is enough space in `dst` to store compressed data
95616 - *   under worst case scenario. Worst case evaluation is provided by
95617 - *   ZSTD_compressBound().
95618 - *   ZSTD_compressContinue() doesn't guarantee recover after a failed
95619 - *   compression.
95620 - * - ZSTD_compressContinue() presumes prior input ***is still accessible and
95621 - *   unmodified*** (up to maximum distance size, see WindowLog).
95622 - *   It remembers all previous contiguous blocks, plus one separated memory
95623 - *   segment (which can itself consists of multiple contiguous blocks)
95624 - * - ZSTD_compressContinue() detects that prior input has been overwritten when
95625 - *   `src` buffer overlaps. In which case, it will "discard" the relevant memory
95626 - *   section from its history.
95627 - *
95628 - * Finish a frame with ZSTD_compressEnd(), which will write the last block(s)
95629 - * and optional checksum. It's possible to use srcSize==0, in which case, it
95630 - * will write a final empty block to end the frame. Without last block mark,
95631 - * frames will be considered unfinished (corrupted) by decoders.
95632 - *
95633 - * `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new
95634 - * frame.
95635 - ******************************************************************************/
95637 -/*=====   Buffer-less streaming compression functions  =====*/
95638 -size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel);
95639 -size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict,
95640 -       size_t dictSize, int compressionLevel);
95641 -size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict,
95642 -       size_t dictSize, ZSTD_parameters params,
95643 -       unsigned long long pledgedSrcSize);
95644 -size_t ZSTD_copyCCtx(ZSTD_CCtx *cctx, const ZSTD_CCtx *preparedCCtx,
95645 -       unsigned long long pledgedSrcSize);
95646 -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict,
95647 -       unsigned long long pledgedSrcSize);
95648 -size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
95649 -       const void *src, size_t srcSize);
95650 -size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
95651 -       const void *src, size_t srcSize);
95655 -/*-*****************************************************************************
95656 - * Buffer-less streaming decompression (synchronous mode)
95657 - *
95658 - * A ZSTD_DCtx object is required to track streaming operations.
95659 - * Use ZSTD_initDCtx() to initialize a context.
95660 - * A ZSTD_DCtx object can be re-used multiple times.
95661 - *
95662 - * First typical operation is to retrieve frame parameters, using
95663 - * ZSTD_getFrameParams(). It fills a ZSTD_frameParams structure which provide
95664 - * important information to correctly decode the frame, such as the minimum
95665 - * rolling buffer size to allocate to decompress data (`windowSize`), and the
95666 - * dictionary ID used.
95667 - * Note: content size is optional, it may not be present. 0 means unknown.
95668 - * Note that these values could be wrong, either because of data malformation,
95669 - * or because an attacker is spoofing deliberate false information. As a
95670 - * consequence, check that values remain within valid application range,
95671 - * especially `windowSize`, before allocation. Each application can set its own
95672 - * limit, depending on local restrictions. For extended interoperability, it is
95673 - * recommended to support at least 8 MB.
95674 - * Frame parameters are extracted from the beginning of the compressed frame.
95675 - * Data fragment must be large enough to ensure successful decoding, typically
95676 - * `ZSTD_frameHeaderSize_max` bytes.
95677 - * Result: 0: successful decoding, the `ZSTD_frameParams` structure is filled.
95678 - *        >0: `srcSize` is too small, provide at least this many bytes.
95679 - *        errorCode, which can be tested using ZSTD_isError().
95680 - *
95681 - * Start decompression, with ZSTD_decompressBegin() or
95682 - * ZSTD_decompressBegin_usingDict(). Alternatively, you can copy a prepared
95683 - * context, using ZSTD_copyDCtx().
95684 - *
95685 - * Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue()
95686 - * alternatively.
95687 - * ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize'
95688 - * to ZSTD_decompressContinue().
95689 - * ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will
95690 - * fail.
95691 - *
95692 - * The result of ZSTD_decompressContinue() is the number of bytes regenerated
95693 - * within 'dst' (necessarily <= dstCapacity). It can be zero, which is not an
95694 - * error; it just means ZSTD_decompressContinue() has decoded some metadata
95695 - * item. It can also be an error code, which can be tested with ZSTD_isError().
95696 - *
95697 - * ZSTD_decompressContinue() needs previous data blocks during decompression, up
95698 - * to `windowSize`. They should preferably be located contiguously, prior to
95699 - * current block. Alternatively, a round buffer of sufficient size is also
95700 - * possible. Sufficient size is determined by frame parameters.
95701 - * ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't
95702 - * follow each other, make sure that either the compressor breaks contiguity at
95703 - * the same place, or that previous contiguous segment is large enough to
95704 - * properly handle maximum back-reference.
95705 - *
95706 - * A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
95707 - * Context can then be reset to start a new decompression.
95708 - *
95709 - * Note: it's possible to know if next input to present is a header or a block,
95710 - * using ZSTD_nextInputType(). This information is not required to properly
95711 - * decode a frame.
95712 - *
95713 - * == Special case: skippable frames ==
95714 - *
95715 - * Skippable frames allow integration of user-defined data into a flow of
95716 - * concatenated frames. Skippable frames will be ignored (skipped) by a
95717 - * decompressor. The format of skippable frames is as follows:
95718 - * a) Skippable frame ID - 4 Bytes, Little endian format, any value from
95719 - *    0x184D2A50 to 0x184D2A5F
95720 - * b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
95721 - * c) Frame Content - any content (User Data) of length equal to Frame Size
95722 - * For skippable frames ZSTD_decompressContinue() always returns 0.
95723 - * For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0
95724 - * what means that a frame is skippable.
95725 - * Note: If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might
95726 - *       actually be a zstd encoded frame with no content. For purposes of
95727 - *       decompression, it is valid in both cases to skip the frame using
95728 - *       ZSTD_findFrameCompressedSize() to find its size in bytes.
95729 - * It also returns frame size as fparamsPtr->frameContentSize.
95730 - ******************************************************************************/
95732 -/*=====   Buffer-less streaming decompression functions  =====*/
95733 -size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx);
95734 -size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict,
95735 -       size_t dictSize);
95736 -void   ZSTD_copyDCtx(ZSTD_DCtx *dctx, const ZSTD_DCtx *preparedDCtx);
95737 -size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx);
95738 -size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
95739 -       const void *src, size_t srcSize);
95740 -typedef enum {
95741 -       ZSTDnit_frameHeader,
95742 -       ZSTDnit_blockHeader,
95743 -       ZSTDnit_block,
95744 -       ZSTDnit_lastBlock,
95745 -       ZSTDnit_checksum,
95746 -       ZSTDnit_skippableFrame
95747 -} ZSTD_nextInputType_e;
95748 -ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx);
95750 -/*-*****************************************************************************
95751 - * Block functions
95752 - *
95753 - * Block functions produce and decode raw zstd blocks, without frame metadata.
95754 - * Frame metadata cost is typically ~18 bytes, which can be non-negligible for
95755 - * very small blocks (< 100 bytes). User will have to take in charge required
95756 - * information to regenerate data, such as compressed and content sizes.
95757 - *
95758 - * A few rules to respect:
95759 - * - Compressing and decompressing require a context structure
95760 - *   + Use ZSTD_initCCtx() and ZSTD_initDCtx()
95761 - * - It is necessary to init context before starting
95762 - *   + compression : ZSTD_compressBegin()
95763 - *   + decompression : ZSTD_decompressBegin()
95764 - *   + variants _usingDict() are also allowed
95765 - *   + copyCCtx() and copyDCtx() work too
95766 - * - Block size is limited, it must be <= ZSTD_getBlockSizeMax()
95767 - *   + If you need to compress more, cut data into multiple blocks
95768 - *   + Consider using the regular ZSTD_compress() instead, as frame metadata
95769 - *     costs become negligible when source size is large.
95770 - * - When a block is considered not compressible enough, ZSTD_compressBlock()
95771 - *   result will be zero. In which case, nothing is produced into `dst`.
95772 - *   + User must test for such outcome and deal directly with uncompressed data
95773 - *   + ZSTD_decompressBlock() doesn't accept uncompressed data as input!!!
95774 - *   + In case of multiple successive blocks, decoder must be informed of
95775 - *     uncompressed block existence to follow proper history. Use
95776 - *     ZSTD_insertBlock() in such a case.
95777 - ******************************************************************************/
95779 -/* Define for static allocation */
95780 -#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024)
95781 -/*=====   Raw zstd block functions  =====*/
95782 -size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx);
95783 -size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
95784 -       const void *src, size_t srcSize);
95785 -size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
95786 -       const void *src, size_t srcSize);
95787 -size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart,
95788 -       size_t blockSize);
95789 +size_t zstd_get_frame_header(zstd_frame_header *params, const void *src,
95790 +       size_t src_size);
95792 -#endif  /* ZSTD_H */
95793 +#endif  /* LINUX_ZSTD_H */
95794 diff --git a/include/linux/zstd_errors.h b/include/linux/zstd_errors.h
95795 new file mode 100644
95796 index 000000000000..ccb92064ef03
95797 --- /dev/null
95798 +++ b/include/linux/zstd_errors.h
95799 @@ -0,0 +1,77 @@
95801 + * Copyright (c) Yann Collet, Facebook, Inc.
95802 + * All rights reserved.
95803 + *
95804 + * This source code is licensed under both the BSD-style license (found in the
95805 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
95806 + * in the COPYING file in the root directory of this source tree).
95807 + * You may select, at your option, one of the above-listed licenses.
95808 + */
95810 +#ifndef ZSTD_ERRORS_H_398273423
95811 +#define ZSTD_ERRORS_H_398273423
95814 +/*===== dependency =====*/
95815 +#include <linux/types.h>   /* size_t */
95818 +/* =====   ZSTDERRORLIB_API : control library symbols visibility   ===== */
95819 +#define ZSTDERRORLIB_VISIBILITY
95820 +#define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
95822 +/*-*********************************************
95823 + *  Error codes list
95824 + *-*********************************************
95825 + *  Error codes _values_ are pinned down since v1.3.1 only.
95826 + *  Therefore, don't rely on values if you may link to any version < v1.3.1.
95827 + *
95828 + *  Only values < 100 are considered stable.
95829 + *
95830 + *  note 1 : this API shall be used with static linking only.
95831 + *           dynamic linking is not yet officially supported.
95832 + *  note 2 : Prefer relying on the enum than on its value whenever possible
95833 + *           This is the only supported way to use the error list < v1.3.1
95834 + *  note 3 : ZSTD_isError() is always correct, whatever the library version.
95835 + **********************************************/
95836 +typedef enum {
95837 +  ZSTD_error_no_error = 0,
95838 +  ZSTD_error_GENERIC  = 1,
95839 +  ZSTD_error_prefix_unknown                = 10,
95840 +  ZSTD_error_version_unsupported           = 12,
95841 +  ZSTD_error_frameParameter_unsupported    = 14,
95842 +  ZSTD_error_frameParameter_windowTooLarge = 16,
95843 +  ZSTD_error_corruption_detected = 20,
95844 +  ZSTD_error_checksum_wrong      = 22,
95845 +  ZSTD_error_dictionary_corrupted      = 30,
95846 +  ZSTD_error_dictionary_wrong          = 32,
95847 +  ZSTD_error_dictionaryCreation_failed = 34,
95848 +  ZSTD_error_parameter_unsupported   = 40,
95849 +  ZSTD_error_parameter_outOfBound    = 42,
95850 +  ZSTD_error_tableLog_tooLarge       = 44,
95851 +  ZSTD_error_maxSymbolValue_tooLarge = 46,
95852 +  ZSTD_error_maxSymbolValue_tooSmall = 48,
95853 +  ZSTD_error_stage_wrong       = 60,
95854 +  ZSTD_error_init_missing      = 62,
95855 +  ZSTD_error_memory_allocation = 64,
95856 +  ZSTD_error_workSpace_tooSmall= 66,
95857 +  ZSTD_error_dstSize_tooSmall = 70,
95858 +  ZSTD_error_srcSize_wrong    = 72,
95859 +  ZSTD_error_dstBuffer_null   = 74,
95860 +  /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
95861 +  ZSTD_error_frameIndex_tooLarge = 100,
95862 +  ZSTD_error_seekableIO          = 102,
95863 +  ZSTD_error_dstBuffer_wrong     = 104,
95864 +  ZSTD_error_srcBuffer_wrong     = 105,
95865 +  ZSTD_error_maxCode = 120  /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
95866 +} ZSTD_ErrorCode;
95868 +/*! ZSTD_getErrorCode() :
95869 +    convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
95870 +    which can be used to compare with enum list published above */
95871 +ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
95872 +ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code);   /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
95876 +#endif /* ZSTD_ERRORS_H_398273423 */
95877 diff --git a/include/linux/zstd_lib.h b/include/linux/zstd_lib.h
95878 new file mode 100644
95879 index 000000000000..d81779076217
95880 --- /dev/null
95881 +++ b/include/linux/zstd_lib.h
95882 @@ -0,0 +1,2431 @@
95884 + * Copyright (c) Yann Collet, Facebook, Inc.
95885 + * All rights reserved.
95886 + *
95887 + * This source code is licensed under both the BSD-style license (found in the
95888 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
95889 + * in the COPYING file in the root directory of this source tree).
95890 + * You may select, at your option, one of the above-listed licenses.
95891 + */
95893 +#ifndef ZSTD_H_235446
95894 +#define ZSTD_H_235446
95896 +/* ======   Dependency   ======*/
95897 +#include <linux/limits.h>   /* INT_MAX */
95898 +#include <linux/types.h>   /* size_t */
95901 +/* =====   ZSTDLIB_API : control library symbols visibility   ===== */
95902 +#define ZSTDLIB_VISIBILITY
95903 +#define ZSTDLIB_API ZSTDLIB_VISIBILITY
95906 +/*******************************************************************************
95907 +  Introduction
95909 +  zstd, short for Zstandard, is a fast lossless compression algorithm, targeting
95910 +  real-time compression scenarios at zlib-level and better compression ratios.
95911 +  The zstd compression library provides in-memory compression and decompression
95912 +  functions.
95914 +  The library supports regular compression levels from 1 up to ZSTD_maxCLevel(),
95915 +  which is currently 22. Levels >= 20, labeled `--ultra`, should be used with
95916 +  caution, as they require more memory. The library also offers negative
95917 +  compression levels, which extend the range of speed vs. ratio preferences.
95918 +  The lower the level, the faster the speed (at the cost of compression).
95920 +  Compression can be done in:
95921 +    - a single step (described as Simple API)
95922 +    - a single step, reusing a context (described as Explicit context)
95923 +    - unbounded multiple steps (described as Streaming compression)
95925 +  The compression ratio achievable on small data can be highly improved using
95926 +  a dictionary. Dictionary compression can be performed in:
95927 +    - a single step (described as Simple dictionary API)
95928 +    - a single step, reusing a dictionary (described as Bulk-processing
95929 +      dictionary API)
95931 +  Advanced experimental functions can be accessed using
95932 +  `#define ZSTD_STATIC_LINKING_ONLY` before including zstd.h.
95934 +  Advanced experimental APIs should never be used with a dynamically-linked
95935 +  library. They are not "stable"; their definitions or signatures may change in
95936 +  the future. Only static linking is allowed.
95937 +*******************************************************************************/
95939 +/*------   Version   ------*/
95940 +#define ZSTD_VERSION_MAJOR    1
95941 +#define ZSTD_VERSION_MINOR    4
95942 +#define ZSTD_VERSION_RELEASE  10
95943 +#define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
95945 +/*! ZSTD_versionNumber() :
95946 + *  Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE). */
95947 +ZSTDLIB_API unsigned ZSTD_versionNumber(void);
95949 +#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
95950 +#define ZSTD_QUOTE(str) #str
95951 +#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)
95952 +#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
95954 +/*! ZSTD_versionString() :
95955 + *  Return runtime library version, like "1.4.5". Requires v1.3.0+. */
95956 +ZSTDLIB_API const char* ZSTD_versionString(void);
95958 +/* *************************************
95959 + *  Default constant
95960 + ***************************************/
95961 +#ifndef ZSTD_CLEVEL_DEFAULT
95962 +#  define ZSTD_CLEVEL_DEFAULT 3
95963 +#endif
95965 +/* *************************************
95966 + *  Constants
95967 + ***************************************/
95969 +/* All magic numbers are supposed read/written to/from files/memory using little-endian convention */
95970 +#define ZSTD_MAGICNUMBER            0xFD2FB528    /* valid since v0.8.0 */
95971 +#define ZSTD_MAGIC_DICTIONARY       0xEC30A437    /* valid since v0.7.0 */
95972 +#define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50    /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */
95973 +#define ZSTD_MAGIC_SKIPPABLE_MASK   0xFFFFFFF0
95975 +#define ZSTD_BLOCKSIZELOG_MAX  17
95976 +#define ZSTD_BLOCKSIZE_MAX     (1<<ZSTD_BLOCKSIZELOG_MAX)
95980 +/***************************************
95981 +*  Simple API
95982 +***************************************/
95983 +/*! ZSTD_compress() :
95984 + *  Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
95985 + *  Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.
95986 + *  @return : compressed size written into `dst` (<= `dstCapacity),
95987 + *            or an error code if it fails (which can be tested using ZSTD_isError()). */
95988 +ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
95989 +                            const void* src, size_t srcSize,
95990 +                                  int compressionLevel);
95992 +/*! ZSTD_decompress() :
95993 + *  `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
95994 + *  `dstCapacity` is an upper bound of originalSize to regenerate.
95995 + *  If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
95996 + *  @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
95997 + *            or an errorCode if it fails (which can be tested using ZSTD_isError()). */
95998 +ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
95999 +                              const void* src, size_t compressedSize);
96001 +/*! ZSTD_getFrameContentSize() : requires v1.3.0+
96002 + *  `src` should point to the start of a ZSTD encoded frame.
96003 + *  `srcSize` must be at least as large as the frame header.
96004 + *            hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
96005 + *  @return : - decompressed size of `src` frame content, if known
96006 + *            - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
96007 + *            - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
96008 + *   note 1 : a 0 return value means the frame is valid but "empty".
96009 + *   note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
96010 + *            When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
96011 + *            In which case, it's necessary to use streaming mode to decompress data.
96012 + *            Optionally, application can rely on some implicit limit,
96013 + *            as ZSTD_decompress() only needs an upper bound of decompressed size.
96014 + *            (For example, data could be necessarily cut into blocks <= 16 KB).
96015 + *   note 3 : decompressed size is always present when compression is completed using single-pass functions,
96016 + *            such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
96017 + *   note 4 : decompressed size can be very large (64-bits value),
96018 + *            potentially larger than what local system can handle as a single memory segment.
96019 + *            In which case, it's necessary to use streaming mode to decompress data.
96020 + *   note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
96021 + *            Always ensure return value fits within application's authorized limits.
96022 + *            Each application can set its own limits.
96023 + *   note 6 : This function replaces ZSTD_getDecompressedSize() */
96024 +#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
96025 +#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
96026 +ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
96028 +/*! ZSTD_getDecompressedSize() :
96029 + *  NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
96030 + *  Both functions work the same way, but ZSTD_getDecompressedSize() blends
96031 + *  "empty", "unknown" and "error" results to the same return value (0),
96032 + *  while ZSTD_getFrameContentSize() gives them separate return values.
96033 + * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
96034 +ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
96036 +/*! ZSTD_findFrameCompressedSize() :
96037 + * `src` should point to the start of a ZSTD frame or skippable frame.
96038 + * `srcSize` must be >= first frame size
96039 + * @return : the compressed size of the first frame starting at `src`,
96040 + *           suitable to pass as `srcSize` to `ZSTD_decompress` or similar,
96041 + *        or an error code if input is invalid */
96042 +ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
96045 +/*======  Helper functions  ======*/
96046 +#define ZSTD_COMPRESSBOUND(srcSize)   ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0))  /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
96047 +ZSTDLIB_API size_t      ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
96048 +ZSTDLIB_API unsigned    ZSTD_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
96049 +ZSTDLIB_API const char* ZSTD_getErrorName(size_t code);     /*!< provides readable string from an error code */
96050 +ZSTDLIB_API int         ZSTD_minCLevel(void);               /*!< minimum negative compression level allowed */
96051 +ZSTDLIB_API int         ZSTD_maxCLevel(void);               /*!< maximum compression level available */
96054 +/***************************************
96055 +*  Explicit context
96056 +***************************************/
96057 +/*= Compression context
96058 + *  When compressing many times,
96059 + *  it is recommended to allocate a context just once,
96060 + *  and re-use it for each successive compression operation.
96061 + *  This will make workload friendlier for system's memory.
96062 + *  Note : re-using context is just a speed / resource optimization.
96063 + *         It doesn't change the compression ratio, which remains identical.
96064 + *  Note 2 : In multi-threaded environments,
96065 + *         use one different context per thread for parallel execution.
96066 + */
96067 +typedef struct ZSTD_CCtx_s ZSTD_CCtx;
96068 +ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
96069 +ZSTDLIB_API size_t     ZSTD_freeCCtx(ZSTD_CCtx* cctx);  /* accept NULL pointer */
96071 +/*! ZSTD_compressCCtx() :
96072 + *  Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
96073 + *  Important : in order to behave similarly to `ZSTD_compress()`,
96074 + *  this function compresses at requested compression level,
96075 + *  __ignoring any other parameter__ .
96076 + *  If any advanced parameter was set using the advanced API,
96077 + *  they will all be reset. Only `compressionLevel` remains.
96078 + */
96079 +ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
96080 +                                     void* dst, size_t dstCapacity,
96081 +                               const void* src, size_t srcSize,
96082 +                                     int compressionLevel);
96084 +/*= Decompression context
96085 + *  When decompressing many times,
96086 + *  it is recommended to allocate a context only once,
96087 + *  and re-use it for each successive compression operation.
96088 + *  This will make workload friendlier for system's memory.
96089 + *  Use one context per thread for parallel execution. */
96090 +typedef struct ZSTD_DCtx_s ZSTD_DCtx;
96091 +ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);
96092 +ZSTDLIB_API size_t     ZSTD_freeDCtx(ZSTD_DCtx* dctx);  /* accept NULL pointer */
96094 +/*! ZSTD_decompressDCtx() :
96095 + *  Same as ZSTD_decompress(),
96096 + *  requires an allocated ZSTD_DCtx.
96097 + *  Compatible with sticky parameters.
96098 + */
96099 +ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
96100 +                                       void* dst, size_t dstCapacity,
96101 +                                 const void* src, size_t srcSize);
96104 +/***************************************
96105 +*  Advanced compression API
96106 +***************************************/
96108 +/* API design :
96109 + *   Parameters are pushed one by one into an existing context,
96110 + *   using ZSTD_CCtx_set*() functions.
96111 + *   Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame.
96112 + *   "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` !
96113 + *   __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ .
96114 + *
96115 + *   It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
96116 + *
96117 + *   This API supercedes all other "advanced" API entry points in the experimental section.
96118 + *   In the future, we expect to remove from experimental API entry points which are redundant with this API.
96119 + */
96122 +/* Compression strategies, listed from fastest to strongest */
96123 +typedef enum { ZSTD_fast=1,
96124 +               ZSTD_dfast=2,
96125 +               ZSTD_greedy=3,
96126 +               ZSTD_lazy=4,
96127 +               ZSTD_lazy2=5,
96128 +               ZSTD_btlazy2=6,
96129 +               ZSTD_btopt=7,
96130 +               ZSTD_btultra=8,
96131 +               ZSTD_btultra2=9
96132 +               /* note : new strategies _might_ be added in the future.
96133 +                         Only the order (from fast to strong) is guaranteed */
96134 +} ZSTD_strategy;
96137 +typedef enum {
96139 +    /* compression parameters
96140 +     * Note: When compressing with a ZSTD_CDict these parameters are superseded
96141 +     * by the parameters used to construct the ZSTD_CDict.
96142 +     * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */
96143 +    ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table.
96144 +                              * Note that exact compression parameters are dynamically determined,
96145 +                              * depending on both compression level and srcSize (when known).
96146 +                              * Default level is ZSTD_CLEVEL_DEFAULT==3.
96147 +                              * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
96148 +                              * Note 1 : it's possible to pass a negative compression level.
96149 +                              * Note 2 : setting a level does not automatically set all other compression parameters
96150 +                              *   to default. Setting this will however eventually dynamically impact the compression
96151 +                              *   parameters which have not been manually set. The manually set
96152 +                              *   ones will 'stick'. */
96153 +    /* Advanced compression parameters :
96154 +     * It's possible to pin down compression parameters to some specific values.
96155 +     * In which case, these values are no longer dynamically selected by the compressor */
96156 +    ZSTD_c_windowLog=101,    /* Maximum allowed back-reference distance, expressed as power of 2.
96157 +                              * This will set a memory budget for streaming decompression,
96158 +                              * with larger values requiring more memory
96159 +                              * and typically compressing more.
96160 +                              * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
96161 +                              * Special: value 0 means "use default windowLog".
96162 +                              * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT
96163 +                              *       requires explicitly allowing such size at streaming decompression stage. */
96164 +    ZSTD_c_hashLog=102,      /* Size of the initial probe table, as a power of 2.
96165 +                              * Resulting memory usage is (1 << (hashLog+2)).
96166 +                              * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
96167 +                              * Larger tables improve compression ratio of strategies <= dFast,
96168 +                              * and improve speed of strategies > dFast.
96169 +                              * Special: value 0 means "use default hashLog". */
96170 +    ZSTD_c_chainLog=103,     /* Size of the multi-probe search table, as a power of 2.
96171 +                              * Resulting memory usage is (1 << (chainLog+2)).
96172 +                              * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.
96173 +                              * Larger tables result in better and slower compression.
96174 +                              * This parameter is useless for "fast" strategy.
96175 +                              * It's still useful when using "dfast" strategy,
96176 +                              * in which case it defines a secondary probe table.
96177 +                              * Special: value 0 means "use default chainLog". */
96178 +    ZSTD_c_searchLog=104,    /* Number of search attempts, as a power of 2.
96179 +                              * More attempts result in better and slower compression.
96180 +                              * This parameter is useless for "fast" and "dFast" strategies.
96181 +                              * Special: value 0 means "use default searchLog". */
96182 +    ZSTD_c_minMatch=105,     /* Minimum size of searched matches.
96183 +                              * Note that Zstandard can still find matches of smaller size,
96184 +                              * it just tweaks its search algorithm to look for this size and larger.
96185 +                              * Larger values increase compression and decompression speed, but decrease ratio.
96186 +                              * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX.
96187 +                              * Note that currently, for all strategies < btopt, effective minimum is 4.
96188 +                              *                    , for all strategies > fast, effective maximum is 6.
96189 +                              * Special: value 0 means "use default minMatchLength". */
96190 +    ZSTD_c_targetLength=106, /* Impact of this field depends on strategy.
96191 +                              * For strategies btopt, btultra & btultra2:
96192 +                              *     Length of Match considered "good enough" to stop search.
96193 +                              *     Larger values make compression stronger, and slower.
96194 +                              * For strategy fast:
96195 +                              *     Distance between match sampling.
96196 +                              *     Larger values make compression faster, and weaker.
96197 +                              * Special: value 0 means "use default targetLength". */
96198 +    ZSTD_c_strategy=107,     /* See ZSTD_strategy enum definition.
96199 +                              * The higher the value of selected strategy, the more complex it is,
96200 +                              * resulting in stronger and slower compression.
96201 +                              * Special: value 0 means "use default strategy". */
96203 +    /* LDM mode parameters */
96204 +    ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching.
96205 +                                     * This parameter is designed to improve compression ratio
96206 +                                     * for large inputs, by finding large matches at long distance.
96207 +                                     * It increases memory usage and window size.
96208 +                                     * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB
96209 +                                     * except when expressly set to a different value.
96210 +                                     * Note: will be enabled by default if ZSTD_c_windowLog >= 128 MB and
96211 +                                     * compression strategy >= ZSTD_btopt (== compression level 16+) */
96212 +    ZSTD_c_ldmHashLog=161,   /* Size of the table for long distance matching, as a power of 2.
96213 +                              * Larger values increase memory usage and compression ratio,
96214 +                              * but decrease compression speed.
96215 +                              * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
96216 +                              * default: windowlog - 7.
96217 +                              * Special: value 0 means "automatically determine hashlog". */
96218 +    ZSTD_c_ldmMinMatch=162,  /* Minimum match size for long distance matcher.
96219 +                              * Larger/too small values usually decrease compression ratio.
96220 +                              * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX.
96221 +                              * Special: value 0 means "use default value" (default: 64). */
96222 +    ZSTD_c_ldmBucketSizeLog=163, /* Log size of each bucket in the LDM hash table for collision resolution.
96223 +                              * Larger values improve collision resolution but decrease compression speed.
96224 +                              * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX.
96225 +                              * Special: value 0 means "use default value" (default: 3). */
96226 +    ZSTD_c_ldmHashRateLog=164, /* Frequency of inserting/looking up entries into the LDM hash table.
96227 +                              * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN).
96228 +                              * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage.
96229 +                              * Larger values improve compression speed.
96230 +                              * Deviating far from default value will likely result in a compression ratio decrease.
96231 +                              * Special: value 0 means "automatically determine hashRateLog". */
96233 +    /* frame parameters */
96234 +    ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
96235 +                              * Content size must be known at the beginning of compression.
96236 +                              * This is automatically the case when using ZSTD_compress2(),
96237 +                              * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
96238 +    ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */
96239 +    ZSTD_c_dictIDFlag=202,   /* When applicable, dictionary's ID is written into frame header (default:1) */
96241 +    /* multi-threading parameters */
96242 +    /* These parameters are only active if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD).
96243 +     * Otherwise, trying to set any other value than default (0) will be a no-op and return an error.
96244 +     * In a situation where it's unknown if the linked library supports multi-threading or not,
96245 +     * setting ZSTD_c_nbWorkers to any value >= 1 and consulting the return value provides a quick way to check this property.
96246 +     */
96247 +    ZSTD_c_nbWorkers=400,    /* Select how many threads will be spawned to compress in parallel.
96248 +                              * When nbWorkers >= 1, triggers asynchronous mode when invoking ZSTD_compressStream*() :
96249 +                              * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller,
96250 +                              * while compression is performed in parallel, within worker thread(s).
96251 +                              * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end :
96252 +                              *  in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call).
96253 +                              * More workers improve speed, but also increase memory usage.
96254 +                              * Default value is `0`, aka "single-threaded mode" : no worker is spawned,
96255 +                              * compression is performed inside Caller's thread, and all invocations are blocking */
96256 +    ZSTD_c_jobSize=401,      /* Size of a compression job. This value is enforced only when nbWorkers >= 1.
96257 +                              * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.
96258 +                              * 0 means default, which is dynamically determined based on compression parameters.
96259 +                              * Job size must be a minimum of overlap size, or 1 MB, whichever is largest.
96260 +                              * The minimum size is automatically and transparently enforced. */
96261 +    ZSTD_c_overlapLog=402,   /* Control the overlap size, as a fraction of window size.
96262 +                              * The overlap size is an amount of data reloaded from previous job at the beginning of a new job.
96263 +                              * It helps preserve compression ratio, while each job is compressed in parallel.
96264 +                              * This value is enforced only when nbWorkers >= 1.
96265 +                              * Larger values increase compression ratio, but decrease speed.
96266 +                              * Possible values range from 0 to 9 :
96267 +                              * - 0 means "default" : value will be determined by the library, depending on strategy
96268 +                              * - 1 means "no overlap"
96269 +                              * - 9 means "full overlap", using a full window size.
96270 +                              * Each intermediate rank increases/decreases load size by a factor 2 :
96271 +                              * 9: full window;  8: w/2;  7: w/4;  6: w/8;  5:w/16;  4: w/32;  3:w/64;  2:w/128;  1:no overlap;  0:default
96272 +                              * default value varies between 6 and 9, depending on strategy */
96274 +    /* note : additional experimental parameters are also available
96275 +     * within the experimental section of the API.
96276 +     * At the time of this writing, they include :
96277 +     * ZSTD_c_rsyncable
96278 +     * ZSTD_c_format
96279 +     * ZSTD_c_forceMaxWindow
96280 +     * ZSTD_c_forceAttachDict
96281 +     * ZSTD_c_literalCompressionMode
96282 +     * ZSTD_c_targetCBlockSize
96283 +     * ZSTD_c_srcSizeHint
96284 +     * ZSTD_c_enableDedicatedDictSearch
96285 +     * ZSTD_c_stableInBuffer
96286 +     * ZSTD_c_stableOutBuffer
96287 +     * ZSTD_c_blockDelimiters
96288 +     * ZSTD_c_validateSequences
96289 +     * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
96290 +     * note : never ever use experimentalParam? names directly;
96291 +     *        also, the enums values themselves are unstable and can still change.
96292 +     */
96293 +     ZSTD_c_experimentalParam1=500,
96294 +     ZSTD_c_experimentalParam2=10,
96295 +     ZSTD_c_experimentalParam3=1000,
96296 +     ZSTD_c_experimentalParam4=1001,
96297 +     ZSTD_c_experimentalParam5=1002,
96298 +     ZSTD_c_experimentalParam6=1003,
96299 +     ZSTD_c_experimentalParam7=1004,
96300 +     ZSTD_c_experimentalParam8=1005,
96301 +     ZSTD_c_experimentalParam9=1006,
96302 +     ZSTD_c_experimentalParam10=1007,
96303 +     ZSTD_c_experimentalParam11=1008,
96304 +     ZSTD_c_experimentalParam12=1009
96305 +} ZSTD_cParameter;
96307 +typedef struct {
96308 +    size_t error;
96309 +    int lowerBound;
96310 +    int upperBound;
96311 +} ZSTD_bounds;
96313 +/*! ZSTD_cParam_getBounds() :
96314 + *  All parameters must belong to an interval with lower and upper bounds,
96315 + *  otherwise they will either trigger an error or be automatically clamped.
96316 + * @return : a structure, ZSTD_bounds, which contains
96317 + *         - an error status field, which must be tested using ZSTD_isError()
96318 + *         - lower and upper bounds, both inclusive
96319 + */
96320 +ZSTDLIB_API ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter cParam);
96322 +/*! ZSTD_CCtx_setParameter() :
96323 + *  Set one compression parameter, selected by enum ZSTD_cParameter.
96324 + *  All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds().
96325 + *  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
96326 + *  Setting a parameter is generally only possible during frame initialization (before starting compression).
96327 + *  Exception : when using multi-threading mode (nbWorkers >= 1),
96328 + *              the following parameters can be updated _during_ compression (within same frame):
96329 + *              => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.
96330 + *              new parameters will be active for next job only (after a flush()).
96331 + * @return : an error code (which can be tested using ZSTD_isError()).
96332 + */
96333 +ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value);
96335 +/*! ZSTD_CCtx_setPledgedSrcSize() :
96336 + *  Total input data size to be compressed as a single frame.
96337 + *  Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag.
96338 + *  This value will also be controlled at end of frame, and trigger an error if not respected.
96339 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
96340 + *  Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame.
96341 + *           In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN.
96342 + *           ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame.
96343 + *  Note 2 : pledgedSrcSize is only valid once, for the next frame.
96344 + *           It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN.
96345 + *  Note 3 : Whenever all input data is provided and consumed in a single round,
96346 + *           for example with ZSTD_compress2(),
96347 + *           or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),
96348 + *           this value is automatically overridden by srcSize instead.
96349 + */
96350 +ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
96352 +typedef enum {
96353 +    ZSTD_reset_session_only = 1,
96354 +    ZSTD_reset_parameters = 2,
96355 +    ZSTD_reset_session_and_parameters = 3
96356 +} ZSTD_ResetDirective;
96358 +/*! ZSTD_CCtx_reset() :
96359 + *  There are 2 different things that can be reset, independently or jointly :
96360 + *  - The session : will stop compressing current frame, and make CCtx ready to start a new one.
96361 + *                  Useful after an error, or to interrupt any ongoing compression.
96362 + *                  Any internal data not yet flushed is cancelled.
96363 + *                  Compression parameters and dictionary remain unchanged.
96364 + *                  They will be used to compress next frame.
96365 + *                  Resetting session never fails.
96366 + *  - The parameters : changes all parameters back to "default".
96367 + *                  This removes any reference to any dictionary too.
96368 + *                  Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
96369 + *                  otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
96370 + *  - Both : similar to resetting the session, followed by resetting parameters.
96371 + */
96372 +ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);
96374 +/*! ZSTD_compress2() :
96375 + *  Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.
96376 + *  ZSTD_compress2() always starts a new frame.
96377 + *  Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
96378 + *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
96379 + *  - The function is always blocking, returns when compression is completed.
96380 + *  Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.
96381 + * @return : compressed size written into `dst` (<= `dstCapacity),
96382 + *           or an error code if it fails (which can be tested using ZSTD_isError()).
96383 + */
96384 +ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx,
96385 +                                   void* dst, size_t dstCapacity,
96386 +                             const void* src, size_t srcSize);
96389 +/***************************************
96390 +*  Advanced decompression API
96391 +***************************************/
96393 +/* The advanced API pushes parameters one by one into an existing DCtx context.
96394 + * Parameters are sticky, and remain valid for all following frames
96395 + * using the same DCtx context.
96396 + * It's possible to reset parameters to default values using ZSTD_DCtx_reset().
96397 + * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream().
96398 + *        Therefore, no new decompression function is necessary.
96399 + */
96401 +typedef enum {
96403 +    ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which
96404 +                              * the streaming API will refuse to allocate memory buffer
96405 +                              * in order to protect the host from unreasonable memory requirements.
96406 +                              * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
96407 +                              * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT).
96408 +                              * Special: value 0 means "use default maximum windowLog". */
96410 +    /* note : additional experimental parameters are also available
96411 +     * within the experimental section of the API.
96412 +     * At the time of this writing, they include :
96413 +     * ZSTD_d_format
96414 +     * ZSTD_d_stableOutBuffer
96415 +     * ZSTD_d_forceIgnoreChecksum
96416 +     * ZSTD_d_refMultipleDDicts
96417 +     * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
96418 +     * note : never ever use experimentalParam? names directly
96419 +     */
96420 +     ZSTD_d_experimentalParam1=1000,
96421 +     ZSTD_d_experimentalParam2=1001,
96422 +     ZSTD_d_experimentalParam3=1002,
96423 +     ZSTD_d_experimentalParam4=1003
96425 +} ZSTD_dParameter;
96427 +/*! ZSTD_dParam_getBounds() :
96428 + *  All parameters must belong to an interval with lower and upper bounds,
96429 + *  otherwise they will either trigger an error or be automatically clamped.
96430 + * @return : a structure, ZSTD_bounds, which contains
96431 + *         - an error status field, which must be tested using ZSTD_isError()
96432 + *         - both lower and upper bounds, inclusive
96433 + */
96434 +ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam);
96436 +/*! ZSTD_DCtx_setParameter() :
96437 + *  Set one compression parameter, selected by enum ZSTD_dParameter.
96438 + *  All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().
96439 + *  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
96440 + *  Setting a parameter is only possible during frame initialization (before starting decompression).
96441 + * @return : 0, or an error code (which can be tested using ZSTD_isError()).
96442 + */
96443 +ZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value);
96445 +/*! ZSTD_DCtx_reset() :
96446 + *  Return a DCtx to clean state.
96447 + *  Session and parameters can be reset jointly or separately.
96448 + *  Parameters can only be reset when no active frame is being decompressed.
96449 + * @return : 0, or an error code, which can be tested with ZSTD_isError()
96450 + */
96451 +ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset);
96454 +/****************************
96455 +*  Streaming
96456 +****************************/
96458 +typedef struct ZSTD_inBuffer_s {
96459 +  const void* src;    /**< start of input buffer */
96460 +  size_t size;        /**< size of input buffer */
96461 +  size_t pos;         /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */
96462 +} ZSTD_inBuffer;
96464 +typedef struct ZSTD_outBuffer_s {
96465 +  void*  dst;         /**< start of output buffer */
96466 +  size_t size;        /**< size of output buffer */
96467 +  size_t pos;         /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */
96468 +} ZSTD_outBuffer;
96472 +/*-***********************************************************************
96473 +*  Streaming compression - HowTo
96475 +*  A ZSTD_CStream object is required to track streaming operation.
96476 +*  Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
96477 +*  ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
96478 +*  It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
96480 +*  For parallel execution, use one separate ZSTD_CStream per thread.
96482 +*  note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
96484 +*  Parameters are sticky : when starting a new compression on the same context,
96485 +*  it will re-use the same sticky parameters as previous compression session.
96486 +*  When in doubt, it's recommended to fully initialize the context before usage.
96487 +*  Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),
96488 +*  ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to
96489 +*  set more specific parameters, the pledged source size, or load a dictionary.
96491 +*  Use ZSTD_compressStream2() with ZSTD_e_continue as many times as necessary to
96492 +*  consume input stream. The function will automatically update both `pos`
96493 +*  fields within `input` and `output`.
96494 +*  Note that the function may not consume the entire input, for example, because
96495 +*  the output buffer is already full, in which case `input.pos < input.size`.
96496 +*  The caller must check if input has been entirely consumed.
96497 +*  If not, the caller must make some room to receive more compressed data,
96498 +*  and then present again remaining input data.
96499 +*  note: ZSTD_e_continue is guaranteed to make some forward progress when called,
96500 +*        but doesn't guarantee maximal forward progress. This is especially relevant
96501 +*        when compressing with multiple threads. The call won't block if it can
96502 +*        consume some input, but if it can't it will wait for some, but not all,
96503 +*        output to be flushed.
96504 +* @return : provides a minimum amount of data remaining to be flushed from internal buffers
96505 +*           or an error code, which can be tested using ZSTD_isError().
96507 +*  At any moment, it's possible to flush whatever data might remain stuck within internal buffer,
96508 +*  using ZSTD_compressStream2() with ZSTD_e_flush. `output->pos` will be updated.
96509 +*  Note that, if `output->size` is too small, a single invocation with ZSTD_e_flush might not be enough (return code > 0).
96510 +*  In which case, make some room to receive more compressed data, and call again ZSTD_compressStream2() with ZSTD_e_flush.
96511 +*  You must continue calling ZSTD_compressStream2() with ZSTD_e_flush until it returns 0, at which point you can change the
96512 +*  operation.
96513 +*  note: ZSTD_e_flush will flush as much output as possible, meaning when compressing with multiple threads, it will
96514 +*        block until the flush is complete or the output buffer is full.
96515 +*  @return : 0 if internal buffers are entirely flushed,
96516 +*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
96517 +*            or an error code, which can be tested using ZSTD_isError().
96519 +*  Calling ZSTD_compressStream2() with ZSTD_e_end instructs to finish a frame.
96520 +*  It will perform a flush and write frame epilogue.
96521 +*  The epilogue is required for decoders to consider a frame completed.
96522 +*  flush operation is the same, and follows same rules as calling ZSTD_compressStream2() with ZSTD_e_flush.
96523 +*  You must continue calling ZSTD_compressStream2() with ZSTD_e_end until it returns 0, at which point you are free to
96524 +*  start a new frame.
96525 +*  note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will
96526 +*        block until the flush is complete or the output buffer is full.
96527 +*  @return : 0 if frame fully completed and fully flushed,
96528 +*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
96529 +*            or an error code, which can be tested using ZSTD_isError().
96531 +* *******************************************************************/
96533 +typedef ZSTD_CCtx ZSTD_CStream;  /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
96534 +                                 /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
96535 +/*===== ZSTD_CStream management functions =====*/
96536 +ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
96537 +ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);  /* accept NULL pointer */
96539 +/*===== Streaming compression functions =====*/
96540 +typedef enum {
96541 +    ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */
96542 +    ZSTD_e_flush=1,    /* flush any data provided so far,
96543 +                        * it creates (at least) one new block, that can be decoded immediately on reception;
96544 +                        * frame will continue: any future data can still reference previously compressed data, improving compression.
96545 +                        * note : multithreaded compression will block to flush as much output as possible. */
96546 +    ZSTD_e_end=2       /* flush any remaining data _and_ close current frame.
96547 +                        * note that frame is only closed after compressed data is fully flushed (return value == 0).
96548 +                        * After that point, any additional data starts a new frame.
96549 +                        * note : each frame is independent (does not reference any content from previous frame).
96550 +                        : note : multithreaded compression will block to flush as much output as possible. */
96551 +} ZSTD_EndDirective;
96553 +/*! ZSTD_compressStream2() :
96554 + *  Behaves about the same as ZSTD_compressStream, with additional control on end directive.
96555 + *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
96556 + *  - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
96557 + *  - output->pos must be <= dstCapacity, input->pos must be <= srcSize
96558 + *  - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
96559 + *  - endOp must be a valid directive
96560 + *  - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
96561 + *  - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,
96562 + *                                                  and then immediately returns, just indicating that there is some data remaining to be flushed.
96563 + *                                                  The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
96564 + *  - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.
96565 + *  - @return provides a minimum amount of data remaining to be flushed from internal buffers
96566 + *            or an error code, which can be tested using ZSTD_isError().
96567 + *            if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
96568 + *            This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
96569 + *            For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
96570 + *  - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
96571 + *            only ZSTD_e_end or ZSTD_e_flush operations are allowed.
96572 + *            Before starting a new compression job, or changing compression parameters,
96573 + *            it is required to fully flush internal buffers.
96574 + */
96575 +ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
96576 +                                         ZSTD_outBuffer* output,
96577 +                                         ZSTD_inBuffer* input,
96578 +                                         ZSTD_EndDirective endOp);
96581 +/* These buffer sizes are softly recommended.
96582 + * They are not required : ZSTD_compressStream*() happily accepts any buffer size, for both input and output.
96583 + * Respecting the recommended size just makes it a bit easier for ZSTD_compressStream*(),
96584 + * reducing the amount of memory shuffling and buffering, resulting in minor performance savings.
96585 + *
96586 + * However, note that these recommendations are from the perspective of a C caller program.
96587 + * If the streaming interface is invoked from some other language,
96588 + * especially managed ones such as Java or Go, through a foreign function interface such as jni or cgo,
96589 + * a major performance rule is to reduce crossing such interface to an absolute minimum.
96590 + * It's not rare that performance ends being spent more into the interface, rather than compression itself.
96591 + * In which cases, prefer using large buffers, as large as practical,
96592 + * for both input and output, to reduce the nb of roundtrips.
96593 + */
96594 +ZSTDLIB_API size_t ZSTD_CStreamInSize(void);    /**< recommended size for input buffer */
96595 +ZSTDLIB_API size_t ZSTD_CStreamOutSize(void);   /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */
96598 +/* *****************************************************************************
96599 + * This following is a legacy streaming API.
96600 + * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().
96601 + * It is redundant, but remains fully supported.
96602 + * Advanced parameters and dictionary compression can only be used through the
96603 + * new API.
96604 + ******************************************************************************/
96606 +/*!
96607 + * Equivalent to:
96608 + *
96609 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
96610 + *     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
96611 + *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
96612 + */
96613 +ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
96614 +/*!
96615 + * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).
96616 + * NOTE: The return value is different. ZSTD_compressStream() returns a hint for
96617 + * the next read size (if non-zero and not an error). ZSTD_compressStream2()
96618 + * returns the minimum nb of bytes left to flush (if non-zero and not an error).
96619 + */
96620 +ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
96621 +/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */
96622 +ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
96623 +/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */
96624 +ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
96627 +/*-***************************************************************************
96628 +*  Streaming decompression - HowTo
96630 +*  A ZSTD_DStream object is required to track streaming operations.
96631 +*  Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
96632 +*  ZSTD_DStream objects can be re-used multiple times.
96634 +*  Use ZSTD_initDStream() to start a new decompression operation.
96635 +* @return : recommended first input size
96636 +*  Alternatively, use advanced API to set specific properties.
96638 +*  Use ZSTD_decompressStream() repetitively to consume your input.
96639 +*  The function will update both `pos` fields.
96640 +*  If `input.pos < input.size`, some input has not been consumed.
96641 +*  It's up to the caller to present again remaining data.
96642 +*  The function tries to flush all data decoded immediately, respecting output buffer size.
96643 +*  If `output.pos < output.size`, decoder has flushed everything it could.
96644 +*  But if `output.pos == output.size`, there might be some data left within internal buffers.,
96645 +*  In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
96646 +*  Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
96647 +* @return : 0 when a frame is completely decoded and fully flushed,
96648 +*        or an error code, which can be tested using ZSTD_isError(),
96649 +*        or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
96650 +*                                the return value is a suggested next input size (just a hint for better latency)
96651 +*                                that will never request more than the remaining frame size.
96652 +* *******************************************************************************/
96654 +typedef ZSTD_DCtx ZSTD_DStream;  /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
96655 +                                 /* For compatibility with versions <= v1.2.0, prefer differentiating them. */
96656 +/*===== ZSTD_DStream management functions =====*/
96657 +ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
96658 +ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);  /* accept NULL pointer */
96660 +/*===== Streaming decompression functions =====*/
96662 +/* This function is redundant with the advanced API and equivalent to:
96663 + *
96664 + *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
96665 + *     ZSTD_DCtx_refDDict(zds, NULL);
96666 + */
96667 +ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
96669 +ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
96671 +ZSTDLIB_API size_t ZSTD_DStreamInSize(void);    /*!< recommended size for input buffer */
96672 +ZSTDLIB_API size_t ZSTD_DStreamOutSize(void);   /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
96675 +/**************************
96676 +*  Simple dictionary API
96677 +***************************/
96678 +/*! ZSTD_compress_usingDict() :
96679 + *  Compression at an explicit compression level using a Dictionary.
96680 + *  A dictionary can be any arbitrary data segment (also called a prefix),
96681 + *  or a buffer with specified information (see dictBuilder/zdict.h).
96682 + *  Note : This function loads the dictionary, resulting in significant startup delay.
96683 + *         It's intended for a dictionary used only once.
96684 + *  Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */
96685 +ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
96686 +                                           void* dst, size_t dstCapacity,
96687 +                                     const void* src, size_t srcSize,
96688 +                                     const void* dict,size_t dictSize,
96689 +                                           int compressionLevel);
96691 +/*! ZSTD_decompress_usingDict() :
96692 + *  Decompression using a known Dictionary.
96693 + *  Dictionary must be identical to the one used during compression.
96694 + *  Note : This function loads the dictionary, resulting in significant startup delay.
96695 + *         It's intended for a dictionary used only once.
96696 + *  Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
96697 +ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
96698 +                                             void* dst, size_t dstCapacity,
96699 +                                       const void* src, size_t srcSize,
96700 +                                       const void* dict,size_t dictSize);
96703 +/***********************************
96704 + *  Bulk processing dictionary API
96705 + **********************************/
96706 +typedef struct ZSTD_CDict_s ZSTD_CDict;
96708 +/*! ZSTD_createCDict() :
96709 + *  When compressing multiple messages or blocks using the same dictionary,
96710 + *  it's recommended to digest the dictionary only once, since it's a costly operation.
96711 + *  ZSTD_createCDict() will create a state from digesting a dictionary.
96712 + *  The resulting state can be used for future compression operations with very limited startup cost.
96713 + *  ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
96714 + * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.
96715 + *  Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.
96716 + *  Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,
96717 + *      in which case the only thing that it transports is the @compressionLevel.
96718 + *      This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,
96719 + *      expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */
96720 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
96721 +                                         int compressionLevel);
96723 +/*! ZSTD_freeCDict() :
96724 + *  Function frees memory allocated by ZSTD_createCDict().
96725 + *  If a NULL pointer is passed, no operation is performed. */
96726 +ZSTDLIB_API size_t      ZSTD_freeCDict(ZSTD_CDict* CDict);
96728 +/*! ZSTD_compress_usingCDict() :
96729 + *  Compression using a digested Dictionary.
96730 + *  Recommended when same dictionary is used multiple times.
96731 + *  Note : compression level is _decided at dictionary creation time_,
96732 + *     and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
96733 +ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
96734 +                                            void* dst, size_t dstCapacity,
96735 +                                      const void* src, size_t srcSize,
96736 +                                      const ZSTD_CDict* cdict);
96739 +typedef struct ZSTD_DDict_s ZSTD_DDict;
96741 +/*! ZSTD_createDDict() :
96742 + *  Create a digested dictionary, ready to start decompression operation without startup delay.
96743 + *  dictBuffer can be released after DDict creation, as its content is copied inside DDict. */
96744 +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
96746 +/*! ZSTD_freeDDict() :
96747 + *  Function frees memory allocated with ZSTD_createDDict()
96748 + *  If a NULL pointer is passed, no operation is performed. */
96749 +ZSTDLIB_API size_t      ZSTD_freeDDict(ZSTD_DDict* ddict);
96751 +/*! ZSTD_decompress_usingDDict() :
96752 + *  Decompression using a digested Dictionary.
96753 + *  Recommended when same dictionary is used multiple times. */
96754 +ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
96755 +                                              void* dst, size_t dstCapacity,
96756 +                                        const void* src, size_t srcSize,
96757 +                                        const ZSTD_DDict* ddict);
96760 +/********************************
96761 + *  Dictionary helper functions
96762 + *******************************/
96764 +/*! ZSTD_getDictID_fromDict() :
96765 + *  Provides the dictID stored within dictionary.
96766 + *  if @return == 0, the dictionary is not conformant with Zstandard specification.
96767 + *  It can still be loaded, but as a content-only dictionary. */
96768 +ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
96770 +/*! ZSTD_getDictID_fromDDict() :
96771 + *  Provides the dictID of the dictionary loaded into `ddict`.
96772 + *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
96773 + *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
96774 +ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
96776 +/*! ZSTD_getDictID_fromFrame() :
96777 + *  Provides the dictID required to decompressed the frame stored within `src`.
96778 + *  If @return == 0, the dictID could not be decoded.
96779 + *  This could for one of the following reasons :
96780 + *  - The frame does not require a dictionary to be decoded (most common case).
96781 + *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
96782 + *    Note : this use case also happens when using a non-conformant dictionary.
96783 + *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
96784 + *  - This is not a Zstandard frame.
96785 + *  When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
96786 +ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
96789 +/*******************************************************************************
96790 + * Advanced dictionary and prefix API
96791 + *
96792 + * This API allows dictionaries to be used with ZSTD_compress2(),
96793 + * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and
96794 + * only reset with the context is reset with ZSTD_reset_parameters or
96795 + * ZSTD_reset_session_and_parameters. Prefixes are single-use.
96796 + ******************************************************************************/
96799 +/*! ZSTD_CCtx_loadDictionary() :
96800 + *  Create an internal CDict from `dict` buffer.
96801 + *  Decompression will have to use same dictionary.
96802 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
96803 + *  Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,
96804 + *           meaning "return to no-dictionary mode".
96805 + *  Note 1 : Dictionary is sticky, it will be used for all future compressed frames.
96806 + *           To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters).
96807 + *  Note 2 : Loading a dictionary involves building tables.
96808 + *           It's also a CPU consuming operation, with non-negligible impact on latency.
96809 + *           Tables are dependent on compression parameters, and for this reason,
96810 + *           compression parameters can no longer be changed after loading a dictionary.
96811 + *  Note 3 :`dict` content will be copied internally.
96812 + *           Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.
96813 + *           In such a case, dictionary buffer must outlive its users.
96814 + *  Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
96815 + *           to precisely select how dictionary content must be interpreted. */
96816 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
96818 +/*! ZSTD_CCtx_refCDict() :
96819 + *  Reference a prepared dictionary, to be used for all next compressed frames.
96820 + *  Note that compression parameters are enforced from within CDict,
96821 + *  and supersede any compression parameter previously set within CCtx.
96822 + *  The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
96823 + *  The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.
96824 + *  The dictionary will remain valid for future compressed frames using same CCtx.
96825 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
96826 + *  Special : Referencing a NULL CDict means "return to no-dictionary mode".
96827 + *  Note 1 : Currently, only one dictionary can be managed.
96828 + *           Referencing a new dictionary effectively "discards" any previous one.
96829 + *  Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */
96830 +ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
96832 +/*! ZSTD_CCtx_refPrefix() :
96833 + *  Reference a prefix (single-usage dictionary) for next compressed frame.
96834 + *  A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).
96835 + *  Decompression will need same prefix to properly regenerate data.
96836 + *  Compressing with a prefix is similar in outcome as performing a diff and compressing it,
96837 + *  but performs much faster, especially during decompression (compression speed is tunable with compression level).
96838 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
96839 + *  Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
96840 + *  Note 1 : Prefix buffer is referenced. It **must** outlive compression.
96841 + *           Its content must remain unmodified during compression.
96842 + *  Note 2 : If the intention is to diff some large src data blob with some prior version of itself,
96843 + *           ensure that the window size is large enough to contain the entire source.
96844 + *           See ZSTD_c_windowLog.
96845 + *  Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
96846 + *           It's a CPU consuming operation, with non-negligible impact on latency.
96847 + *           If there is a need to use the same prefix multiple times, consider loadDictionary instead.
96848 + *  Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).
96849 + *           Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */
96850 +ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
96851 +                                 const void* prefix, size_t prefixSize);
96853 +/*! ZSTD_DCtx_loadDictionary() :
96854 + *  Create an internal DDict from dict buffer,
96855 + *  to be used to decompress next frames.
96856 + *  The dictionary remains valid for all future frames, until explicitly invalidated.
96857 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
96858 + *  Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
96859 + *            meaning "return to no-dictionary mode".
96860 + *  Note 1 : Loading a dictionary involves building tables,
96861 + *           which has a non-negligible impact on CPU usage and latency.
96862 + *           It's recommended to "load once, use many times", to amortize the cost
96863 + *  Note 2 :`dict` content will be copied internally, so `dict` can be released after loading.
96864 + *           Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead.
96865 + *  Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of
96866 + *           how dictionary content is loaded and interpreted.
96867 + */
96868 +ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
96870 +/*! ZSTD_DCtx_refDDict() :
96871 + *  Reference a prepared dictionary, to be used to decompress next frames.
96872 + *  The dictionary remains active for decompression of future frames using same DCtx.
96873 + *
96874 + *  If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function
96875 + *  will store the DDict references in a table, and the DDict used for decompression
96876 + *  will be determined at decompression time, as per the dict ID in the frame.
96877 + *  The memory for the table is allocated on the first call to refDDict, and can be
96878 + *  freed with ZSTD_freeDCtx().
96879 + *
96880 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
96881 + *  Note 1 : Currently, only one dictionary can be managed.
96882 + *           Referencing a new dictionary effectively "discards" any previous one.
96883 + *  Special: referencing a NULL DDict means "return to no-dictionary mode".
96884 + *  Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
96885 + */
96886 +ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
96888 +/*! ZSTD_DCtx_refPrefix() :
96889 + *  Reference a prefix (single-usage dictionary) to decompress next frame.
96890 + *  This is the reverse operation of ZSTD_CCtx_refPrefix(),
96891 + *  and must use the same prefix as the one used during compression.
96892 + *  Prefix is **only used once**. Reference is discarded at end of frame.
96893 + *  End of frame is reached when ZSTD_decompressStream() returns 0.
96894 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
96895 + *  Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary
96896 + *  Note 2 : Prefix buffer is referenced. It **must** outlive decompression.
96897 + *           Prefix buffer must remain unmodified up to the end of frame,
96898 + *           reached when ZSTD_decompressStream() returns 0.
96899 + *  Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).
96900 + *           Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)
96901 + *  Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
96902 + *           A full dictionary is more costly, as it requires building tables.
96903 + */
96904 +ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx,
96905 +                                 const void* prefix, size_t prefixSize);
96907 +/* ===   Memory management   === */
96909 +/*! ZSTD_sizeof_*() :
96910 + *  These functions give the _current_ memory usage of selected object.
96911 + *  Note that object memory usage can evolve (increase or decrease) over time. */
96912 +ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
96913 +ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
96914 +ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
96915 +ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
96916 +ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
96917 +ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
96919 +#endif  /* ZSTD_H_235446 */
96922 +/* **************************************************************************************
96923 + *   ADVANCED AND EXPERIMENTAL FUNCTIONS
96924 + ****************************************************************************************
96925 + * The definitions in the following section are considered experimental.
96926 + * They are provided for advanced scenarios.
96927 + * They should never be used with a dynamic library, as prototypes may change in the future.
96928 + * Use them only in association with static linking.
96929 + * ***************************************************************************************/
96931 +#if !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
96932 +#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
96934 +/****************************************************************************************
96935 + *   experimental API (static linking only)
96936 + ****************************************************************************************
96937 + * The following symbols and constants
96938 + * are not planned to join "stable API" status in the near future.
96939 + * They can still change in future versions.
96940 + * Some of them are planned to remain in the static_only section indefinitely.
96941 + * Some of them might be removed in the future (especially when redundant with existing stable functions)
96942 + * ***************************************************************************************/
96944 +#define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1)   /* minimum input size required to query frame header size */
96945 +#define ZSTD_FRAMEHEADERSIZE_MIN(format)    ((format) == ZSTD_f_zstd1 ? 6 : 2)
96946 +#define ZSTD_FRAMEHEADERSIZE_MAX   18   /* can be useful for static allocation */
96947 +#define ZSTD_SKIPPABLEHEADERSIZE    8
96949 +/* compression parameter bounds */
96950 +#define ZSTD_WINDOWLOG_MAX_32    30
96951 +#define ZSTD_WINDOWLOG_MAX_64    31
96952 +#define ZSTD_WINDOWLOG_MAX     ((int)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
96953 +#define ZSTD_WINDOWLOG_MIN       10
96954 +#define ZSTD_HASHLOG_MAX       ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30)
96955 +#define ZSTD_HASHLOG_MIN          6
96956 +#define ZSTD_CHAINLOG_MAX_32     29
96957 +#define ZSTD_CHAINLOG_MAX_64     30
96958 +#define ZSTD_CHAINLOG_MAX      ((int)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64))
96959 +#define ZSTD_CHAINLOG_MIN        ZSTD_HASHLOG_MIN
96960 +#define ZSTD_SEARCHLOG_MAX      (ZSTD_WINDOWLOG_MAX-1)
96961 +#define ZSTD_SEARCHLOG_MIN        1
96962 +#define ZSTD_MINMATCH_MAX         7   /* only for ZSTD_fast, other strategies are limited to 6 */
96963 +#define ZSTD_MINMATCH_MIN         3   /* only for ZSTD_btopt+, faster strategies are limited to 4 */
96964 +#define ZSTD_TARGETLENGTH_MAX    ZSTD_BLOCKSIZE_MAX
96965 +#define ZSTD_TARGETLENGTH_MIN     0   /* note : comparing this constant to an unsigned results in a tautological test */
96966 +#define ZSTD_STRATEGY_MIN        ZSTD_fast
96967 +#define ZSTD_STRATEGY_MAX        ZSTD_btultra2
96970 +#define ZSTD_OVERLAPLOG_MIN       0
96971 +#define ZSTD_OVERLAPLOG_MAX       9
96973 +#define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27   /* by default, the streaming decoder will refuse any frame
96974 +                                           * requiring larger than (1<<ZSTD_WINDOWLOG_LIMIT_DEFAULT) window size,
96975 +                                           * to preserve host's memory from unreasonable requirements.
96976 +                                           * This limit can be overridden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).
96977 +                                           * The limit does not apply for one-pass decoders (such as ZSTD_decompress()), since no additional memory is allocated */
96980 +/* LDM parameter bounds */
96981 +#define ZSTD_LDM_HASHLOG_MIN      ZSTD_HASHLOG_MIN
96982 +#define ZSTD_LDM_HASHLOG_MAX      ZSTD_HASHLOG_MAX
96983 +#define ZSTD_LDM_MINMATCH_MIN        4
96984 +#define ZSTD_LDM_MINMATCH_MAX     4096
96985 +#define ZSTD_LDM_BUCKETSIZELOG_MIN   1
96986 +#define ZSTD_LDM_BUCKETSIZELOG_MAX   8
96987 +#define ZSTD_LDM_HASHRATELOG_MIN     0
96988 +#define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
96990 +/* Advanced parameter bounds */
96991 +#define ZSTD_TARGETCBLOCKSIZE_MIN   64
96992 +#define ZSTD_TARGETCBLOCKSIZE_MAX   ZSTD_BLOCKSIZE_MAX
96993 +#define ZSTD_SRCSIZEHINT_MIN        0
96994 +#define ZSTD_SRCSIZEHINT_MAX        INT_MAX
96996 +/* internal */
96997 +#define ZSTD_HASHLOG3_MAX           17
97000 +/* ---  Advanced types  --- */
97002 +typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
97004 +typedef struct {
97005 +    unsigned int offset;      /* The offset of the match. (NOT the same as the offset code)
97006 +                               * If offset == 0 and matchLength == 0, this sequence represents the last
97007 +                               * literals in the block of litLength size.
97008 +                               */
97010 +    unsigned int litLength;   /* Literal length of the sequence. */
97011 +    unsigned int matchLength; /* Match length of the sequence. */
97013 +                              /* Note: Users of this API may provide a sequence with matchLength == litLength == offset == 0.
97014 +                               * In this case, we will treat the sequence as a marker for a block boundary.
97015 +                               */
97017 +    unsigned int rep;         /* Represents which repeat offset is represented by the field 'offset'.
97018 +                               * Ranges from [0, 3].
97019 +                               *
97020 +                               * Repeat offsets are essentially previous offsets from previous sequences sorted in
97021 +                               * recency order. For more detail, see doc/zstd_compression_format.md
97022 +                               *
97023 +                               * If rep == 0, then 'offset' does not contain a repeat offset.
97024 +                               * If rep > 0:
97025 +                               *  If litLength != 0:
97026 +                               *      rep == 1 --> offset == repeat_offset_1
97027 +                               *      rep == 2 --> offset == repeat_offset_2
97028 +                               *      rep == 3 --> offset == repeat_offset_3
97029 +                               *  If litLength == 0:
97030 +                               *      rep == 1 --> offset == repeat_offset_2
97031 +                               *      rep == 2 --> offset == repeat_offset_3
97032 +                               *      rep == 3 --> offset == repeat_offset_1 - 1
97033 +                               *
97034 +                               * Note: This field is optional. ZSTD_generateSequences() will calculate the value of
97035 +                               * 'rep', but repeat offsets do not necessarily need to be calculated from an external
97036 +                               * sequence provider's perspective. For example, ZSTD_compressSequences() does not
97037 +                               * use this 'rep' field at all (as of now).
97038 +                               */
97039 +} ZSTD_Sequence;
97041 +typedef struct {
97042 +    unsigned windowLog;       /**< largest match distance : larger == more compression, more memory needed during decompression */
97043 +    unsigned chainLog;        /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
97044 +    unsigned hashLog;         /**< dispatch table : larger == faster, more memory */
97045 +    unsigned searchLog;       /**< nb of searches : larger == more compression, slower */
97046 +    unsigned minMatch;        /**< match length searched : larger == faster decompression, sometimes less compression */
97047 +    unsigned targetLength;    /**< acceptable match size for optimal parser (only) : larger == more compression, slower */
97048 +    ZSTD_strategy strategy;   /**< see ZSTD_strategy definition above */
97049 +} ZSTD_compressionParameters;
97051 +typedef struct {
97052 +    int contentSizeFlag; /**< 1: content size will be in frame header (when known) */
97053 +    int checksumFlag;    /**< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */
97054 +    int noDictIDFlag;    /**< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */
97055 +} ZSTD_frameParameters;
97057 +typedef struct {
97058 +    ZSTD_compressionParameters cParams;
97059 +    ZSTD_frameParameters fParams;
97060 +} ZSTD_parameters;
97062 +typedef enum {
97063 +    ZSTD_dct_auto = 0,       /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
97064 +    ZSTD_dct_rawContent = 1, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
97065 +    ZSTD_dct_fullDict = 2    /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */
97066 +} ZSTD_dictContentType_e;
97068 +typedef enum {
97069 +    ZSTD_dlm_byCopy = 0,  /**< Copy dictionary content internally */
97070 +    ZSTD_dlm_byRef = 1    /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
97071 +} ZSTD_dictLoadMethod_e;
97073 +typedef enum {
97074 +    ZSTD_f_zstd1 = 0,           /* zstd frame format, specified in zstd_compression_format.md (default) */
97075 +    ZSTD_f_zstd1_magicless = 1  /* Variant of zstd frame format, without initial 4-bytes magic number.
97076 +                                 * Useful to save 4 bytes per generated frame.
97077 +                                 * Decoder cannot recognise automatically this format, requiring this instruction. */
97078 +} ZSTD_format_e;
97080 +typedef enum {
97081 +    /* Note: this enum controls ZSTD_d_forceIgnoreChecksum */
97082 +    ZSTD_d_validateChecksum = 0,
97083 +    ZSTD_d_ignoreChecksum = 1
97084 +} ZSTD_forceIgnoreChecksum_e;
97086 +typedef enum {
97087 +    /* Note: this enum controls ZSTD_d_refMultipleDDicts */
97088 +    ZSTD_rmd_refSingleDDict = 0,
97089 +    ZSTD_rmd_refMultipleDDicts = 1
97090 +} ZSTD_refMultipleDDicts_e;
97092 +typedef enum {
97093 +    /* Note: this enum and the behavior it controls are effectively internal
97094 +     * implementation details of the compressor. They are expected to continue
97095 +     * to evolve and should be considered only in the context of extremely
97096 +     * advanced performance tuning.
97097 +     *
97098 +     * Zstd currently supports the use of a CDict in three ways:
97099 +     *
97100 +     * - The contents of the CDict can be copied into the working context. This
97101 +     *   means that the compression can search both the dictionary and input
97102 +     *   while operating on a single set of internal tables. This makes
97103 +     *   the compression faster per-byte of input. However, the initial copy of
97104 +     *   the CDict's tables incurs a fixed cost at the beginning of the
97105 +     *   compression. For small compressions (< 8 KB), that copy can dominate
97106 +     *   the cost of the compression.
97107 +     *
97108 +     * - The CDict's tables can be used in-place. In this model, compression is
97109 +     *   slower per input byte, because the compressor has to search two sets of
97110 +     *   tables. However, this model incurs no start-up cost (as long as the
97111 +     *   working context's tables can be reused). For small inputs, this can be
97112 +     *   faster than copying the CDict's tables.
97113 +     *
97114 +     * - The CDict's tables are not used at all, and instead we use the working
97115 +     *   context alone to reload the dictionary and use params based on the source
97116 +     *   size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict().
97117 +     *   This method is effective when the dictionary sizes are very small relative
97118 +     *   to the input size, and the input size is fairly large to begin with.
97119 +     *
97120 +     * Zstd has a simple internal heuristic that selects which strategy to use
97121 +     * at the beginning of a compression. However, if experimentation shows that
97122 +     * Zstd is making poor choices, it is possible to override that choice with
97123 +     * this enum.
97124 +     */
97125 +    ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */
97126 +    ZSTD_dictForceAttach   = 1, /* Never copy the dictionary. */
97127 +    ZSTD_dictForceCopy     = 2, /* Always copy the dictionary. */
97128 +    ZSTD_dictForceLoad     = 3  /* Always reload the dictionary */
97129 +} ZSTD_dictAttachPref_e;
97131 +typedef enum {
97132 +  ZSTD_lcm_auto = 0,          /**< Automatically determine the compression mode based on the compression level.
97133 +                               *   Negative compression levels will be uncompressed, and positive compression
97134 +                               *   levels will be compressed. */
97135 +  ZSTD_lcm_huffman = 1,       /**< Always attempt Huffman compression. Uncompressed literals will still be
97136 +                               *   emitted if Huffman compression is not profitable. */
97137 +  ZSTD_lcm_uncompressed = 2   /**< Always emit uncompressed literals. */
97138 +} ZSTD_literalCompressionMode_e;
97141 +/***************************************
97142 +*  Frame size functions
97143 +***************************************/
97145 +/*! ZSTD_findDecompressedSize() :
97146 + *  `src` should point to the start of a series of ZSTD encoded and/or skippable frames
97147 + *  `srcSize` must be the _exact_ size of this series
97148 + *       (i.e. there should be a frame boundary at `src + srcSize`)
97149 + *  @return : - decompressed size of all data in all successive frames
97150 + *            - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
97151 + *            - if an error occurred: ZSTD_CONTENTSIZE_ERROR
97152 + *
97153 + *   note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
97154 + *            When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
97155 + *            In which case, it's necessary to use streaming mode to decompress data.
97156 + *   note 2 : decompressed size is always present when compression is done with ZSTD_compress()
97157 + *   note 3 : decompressed size can be very large (64-bits value),
97158 + *            potentially larger than what local system can handle as a single memory segment.
97159 + *            In which case, it's necessary to use streaming mode to decompress data.
97160 + *   note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
97161 + *            Always ensure result fits within application's authorized limits.
97162 + *            Each application can set its own limits.
97163 + *   note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
97164 + *            read each contained frame header.  This is fast as most of the data is skipped,
97165 + *            however it does mean that all frame data must be present and valid. */
97166 +ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
97168 +/*! ZSTD_decompressBound() :
97169 + *  `src` should point to the start of a series of ZSTD encoded and/or skippable frames
97170 + *  `srcSize` must be the _exact_ size of this series
97171 + *       (i.e. there should be a frame boundary at `src + srcSize`)
97172 + *  @return : - upper-bound for the decompressed size of all data in all successive frames
97173 + *            - if an error occurred: ZSTD_CONTENTSIZE_ERROR
97174 + *
97175 + *  note 1  : an error can occur if `src` contains an invalid or incorrectly formatted frame.
97176 + *  note 2  : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.
97177 + *            in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value.
97178 + *  note 3  : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:
97179 + *              upper-bound = # blocks * min(128 KB, Window_Size)
97180 + */
97181 +ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
97183 +/*! ZSTD_frameHeaderSize() :
97184 + *  srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
97185 + * @return : size of the Frame Header,
97186 + *           or an error code (if srcSize is too small) */
97187 +ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
97189 +typedef enum {
97190 +  ZSTD_sf_noBlockDelimiters = 0,         /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
97191 +  ZSTD_sf_explicitBlockDelimiters = 1    /* Representation of ZSTD_Sequence contains explicit block delimiters */
97192 +} ZSTD_sequenceFormat_e;
97194 +/*! ZSTD_generateSequences() :
97195 + * Generate sequences using ZSTD_compress2, given a source buffer.
97196 + *
97197 + * Each block will end with a dummy sequence
97198 + * with offset == 0, matchLength == 0, and litLength == length of last literals.
97199 + * litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)
97200 + * simply acts as a block delimiter.
97201 + *
97202 + * zc can be used to insert custom compression params.
97203 + * This function invokes ZSTD_compress2
97204 + *
97205 + * The output of this function can be fed into ZSTD_compressSequences() with CCtx
97206 + * setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters
97207 + * @return : number of sequences generated
97208 + */
97210 +ZSTDLIB_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
97211 +                                          size_t outSeqsSize, const void* src, size_t srcSize);
97213 +/*! ZSTD_mergeBlockDelimiters() :
97214 + * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals
97215 + * by merging them into into the literals of the next sequence.
97216 + *
97217 + * As such, the final generated result has no explicit representation of block boundaries,
97218 + * and the final last literals segment is not represented in the sequences.
97219 + *
97220 + * The output of this function can be fed into ZSTD_compressSequences() with CCtx
97221 + * setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters
97222 + * @return : number of sequences left after merging
97223 + */
97224 +ZSTDLIB_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
97226 +/*! ZSTD_compressSequences() :
97227 + * Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst.
97228 + * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.)
97229 + * The entire source is compressed into a single frame.
97230 + *
97231 + * The compression behavior changes based on cctx params. In particular:
97232 + *    If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain
97233 + *    no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on
97234 + *    the block size derived from the cctx, and sequences may be split. This is the default setting.
97235 + *
97236 + *    If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain
97237 + *    block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.
97238 + *
97239 + *    If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined
97240 + *    behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for
97241 + *    specifics regarding offset/matchlength requirements) then the function will bail out and return an error.
97242 + *
97243 + *    In addition to the two adjustable experimental params, there are other important cctx params.
97244 + *    - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN.
97245 + *    - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression.
97246 + *    - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset
97247 + *      is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md
97248 + *
97249 + * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused.
97250 + * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly,
97251 + *         and cannot emit an RLE block that disagrees with the repcode history
97252 + * @return : final compressed size or a ZSTD error.
97253 + */
97254 +ZSTDLIB_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize,
97255 +                                  const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
97256 +                                  const void* src, size_t srcSize);
97259 +/*! ZSTD_writeSkippableFrame() :
97260 + * Generates a zstd skippable frame containing data given by src, and writes it to dst buffer.
97261 + *
97262 + * Skippable frames begin with a a 4-byte magic number. There are 16 possible choices of magic number,
97263 + * ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.
97264 + * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so
97265 + * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.
97266 + *
97267 + * Returns an error if destination buffer is not large enough, if the source size is not representable
97268 + * with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).
97269 + *
97270 + * @return : number of bytes written or a ZSTD error.
97271 + */
97272 +ZSTDLIB_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
97273 +                                            const void* src, size_t srcSize, unsigned magicVariant);
97276 +/***************************************
97277 +*  Memory management
97278 +***************************************/
97280 +/*! ZSTD_estimate*() :
97281 + *  These functions make it possible to estimate memory usage
97282 + *  of a future {D,C}Ctx, before its creation.
97283 + *
97284 + *  ZSTD_estimateCCtxSize() will provide a memory budget large enough
97285 + *  for any compression level up to selected one.
97286 + *  Note : Unlike ZSTD_estimateCStreamSize*(), this estimate
97287 + *         does not include space for a window buffer.
97288 + *         Therefore, the estimation is only guaranteed for single-shot compressions, not streaming.
97289 + *  The estimate will assume the input may be arbitrarily large,
97290 + *  which is the worst case.
97291 + *
97292 + *  When srcSize can be bound by a known and rather "small" value,
97293 + *  this fact can be used to provide a tighter estimation
97294 + *  because the CCtx compression context will need less memory.
97295 + *  This tighter estimation can be provided by more advanced functions
97296 + *  ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(),
97297 + *  and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().
97298 + *  Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.
97299 + *
97300 + *  Note 2 : only single-threaded compression is supported.
97301 + *  ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
97302 + */
97303 +ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
97304 +ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
97305 +ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
97306 +ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
97308 +/*! ZSTD_estimateCStreamSize() :
97309 + *  ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
97310 + *  It will also consider src size to be arbitrarily "large", which is worst case.
97311 + *  If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
97312 + *  ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
97313 + *  ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
97314 + *  Note : CStream size estimation is only correct for single-threaded compression.
97315 + *  ZSTD_DStream memory budget depends on window Size.
97316 + *  This information can be passed manually, using ZSTD_estimateDStreamSize,
97317 + *  or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
97318 + *  Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
97319 + *         an internal ?Dict will be created, which additional size is not estimated here.
97320 + *         In this case, get total size by adding ZSTD_estimate?DictSize */
97321 +ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
97322 +ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
97323 +ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
97324 +ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
97325 +ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
97327 +/*! ZSTD_estimate?DictSize() :
97328 + *  ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
97329 + *  ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().
97330 + *  Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.
97331 + */
97332 +ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
97333 +ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
97334 +ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
97336 +/*! ZSTD_initStatic*() :
97337 + *  Initialize an object using a pre-allocated fixed-size buffer.
97338 + *  workspace: The memory area to emplace the object into.
97339 + *             Provided pointer *must be 8-bytes aligned*.
97340 + *             Buffer must outlive object.
97341 + *  workspaceSize: Use ZSTD_estimate*Size() to determine
97342 + *                 how large workspace must be to support target scenario.
97343 + * @return : pointer to object (same address as workspace, just different type),
97344 + *           or NULL if error (size too small, incorrect alignment, etc.)
97345 + *  Note : zstd will never resize nor malloc() when using a static buffer.
97346 + *         If the object requires more memory than available,
97347 + *         zstd will just error out (typically ZSTD_error_memory_allocation).
97348 + *  Note 2 : there is no corresponding "free" function.
97349 + *           Since workspace is allocated externally, it must be freed externally too.
97350 + *  Note 3 : cParams : use ZSTD_getCParams() to convert a compression level
97351 + *           into its associated cParams.
97352 + *  Limitation 1 : currently not compatible with internal dictionary creation, triggered by
97353 + *                 ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().
97354 + *  Limitation 2 : static cctx currently not compatible with multi-threading.
97355 + *  Limitation 3 : static dctx is incompatible with legacy support.
97356 + */
97357 +ZSTDLIB_API ZSTD_CCtx*    ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
97358 +ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticCCtx() */
97360 +ZSTDLIB_API ZSTD_DCtx*    ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
97361 +ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticDCtx() */
97363 +ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict(
97364 +                                        void* workspace, size_t workspaceSize,
97365 +                                        const void* dict, size_t dictSize,
97366 +                                        ZSTD_dictLoadMethod_e dictLoadMethod,
97367 +                                        ZSTD_dictContentType_e dictContentType,
97368 +                                        ZSTD_compressionParameters cParams);
97370 +ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict(
97371 +                                        void* workspace, size_t workspaceSize,
97372 +                                        const void* dict, size_t dictSize,
97373 +                                        ZSTD_dictLoadMethod_e dictLoadMethod,
97374 +                                        ZSTD_dictContentType_e dictContentType);
97377 +/*! Custom memory allocation :
97378 + *  These prototypes make it possible to pass your own allocation/free functions.
97379 + *  ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.
97380 + *  All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
97381 + */
97382 +typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
97383 +typedef void  (*ZSTD_freeFunction) (void* opaque, void* address);
97384 +typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
97385 +static
97386 +__attribute__((__unused__))
97387 +ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL };  /**< this constant defers to stdlib's functions */
97389 +ZSTDLIB_API ZSTD_CCtx*    ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
97390 +ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
97391 +ZSTDLIB_API ZSTD_DCtx*    ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
97392 +ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
97394 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
97395 +                                                  ZSTD_dictLoadMethod_e dictLoadMethod,
97396 +                                                  ZSTD_dictContentType_e dictContentType,
97397 +                                                  ZSTD_compressionParameters cParams,
97398 +                                                  ZSTD_customMem customMem);
97400 +/* ! Thread pool :
97401 + * These prototypes make it possible to share a thread pool among multiple compression contexts.
97402 + * This can limit resources for applications with multiple threads where each one uses
97403 + * a threaded compression mode (via ZSTD_c_nbWorkers parameter).
97404 + * ZSTD_createThreadPool creates a new thread pool with a given number of threads.
97405 + * Note that the lifetime of such pool must exist while being used.
97406 + * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value
97407 + * to use an internal thread pool).
97408 + * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer.
97409 + */
97410 +typedef struct POOL_ctx_s ZSTD_threadPool;
97411 +ZSTDLIB_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
97412 +ZSTDLIB_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool);  /* accept NULL pointer */
97413 +ZSTDLIB_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool);
97417 + * This API is temporary and is expected to change or disappear in the future!
97418 + */
97419 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
97420 +    const void* dict, size_t dictSize,
97421 +    ZSTD_dictLoadMethod_e dictLoadMethod,
97422 +    ZSTD_dictContentType_e dictContentType,
97423 +    const ZSTD_CCtx_params* cctxParams,
97424 +    ZSTD_customMem customMem);
97426 +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(
97427 +    const void* dict, size_t dictSize,
97428 +    ZSTD_dictLoadMethod_e dictLoadMethod,
97429 +    ZSTD_dictContentType_e dictContentType,
97430 +    ZSTD_customMem customMem);
97433 +/***************************************
97434 +*  Advanced compression functions
97435 +***************************************/
97437 +/*! ZSTD_createCDict_byReference() :
97438 + *  Create a digested dictionary for compression
97439 + *  Dictionary content is just referenced, not duplicated.
97440 + *  As a consequence, `dictBuffer` **must** outlive CDict,
97441 + *  and its content must remain unmodified throughout the lifetime of CDict.
97442 + *  note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */
97443 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
97445 +/*! ZSTD_getDictID_fromCDict() :
97446 + *  Provides the dictID of the dictionary loaded into `cdict`.
97447 + *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
97448 + *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
97449 +ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict);
97451 +/*! ZSTD_getCParams() :
97452 + * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
97453 + * `estimatedSrcSize` value is optional, select 0 if not known */
97454 +ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
97456 +/*! ZSTD_getParams() :
97457 + *  same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
97458 + *  All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
97459 +ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
97461 +/*! ZSTD_checkCParams() :
97462 + *  Ensure param values remain within authorized range.
97463 + * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
97464 +ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
97466 +/*! ZSTD_adjustCParams() :
97467 + *  optimize params for a given `srcSize` and `dictSize`.
97468 + * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.
97469 + * `dictSize` must be `0` when there is no dictionary.
97470 + *  cPar can be invalid : all parameters will be clamped within valid range in the @return struct.
97471 + *  This function never fails (wide contract) */
97472 +ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
97474 +/*! ZSTD_compress_advanced() :
97475 + *  Note : this function is now DEPRECATED.
97476 + *         It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
97477 + *  This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x */
97478 +ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
97479 +                                          void* dst, size_t dstCapacity,
97480 +                                    const void* src, size_t srcSize,
97481 +                                    const void* dict,size_t dictSize,
97482 +                                          ZSTD_parameters params);
97484 +/*! ZSTD_compress_usingCDict_advanced() :
97485 + *  Note : this function is now REDUNDANT.
97486 + *         It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
97487 + *  This prototype will be marked as deprecated and generate compilation warning in some future version */
97488 +ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
97489 +                                              void* dst, size_t dstCapacity,
97490 +                                        const void* src, size_t srcSize,
97491 +                                        const ZSTD_CDict* cdict,
97492 +                                              ZSTD_frameParameters fParams);
97495 +/*! ZSTD_CCtx_loadDictionary_byReference() :
97496 + *  Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.
97497 + *  It saves some memory, but also requires that `dict` outlives its usage within `cctx` */
97498 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
97500 +/*! ZSTD_CCtx_loadDictionary_advanced() :
97501 + *  Same as ZSTD_CCtx_loadDictionary(), but gives finer control over
97502 + *  how to load the dictionary (by copy ? by reference ?)
97503 + *  and how to interpret it (automatic ? force raw mode ? full mode only ?) */
97504 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
97506 +/*! ZSTD_CCtx_refPrefix_advanced() :
97507 + *  Same as ZSTD_CCtx_refPrefix(), but gives finer control over
97508 + *  how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
97509 +ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
97511 +/* ===   experimental parameters   === */
97512 +/* these parameters can be used with ZSTD_setParameter()
97513 + * they are not guaranteed to remain supported in the future */
97515 + /* Enables rsyncable mode,
97516 +  * which makes compressed files more rsync friendly
97517 +  * by adding periodic synchronization points to the compressed data.
97518 +  * The target average block size is ZSTD_c_jobSize / 2.
97519 +  * It's possible to modify the job size to increase or decrease
97520 +  * the granularity of the synchronization point.
97521 +  * Once the jobSize is smaller than the window size,
97522 +  * it will result in compression ratio degradation.
97523 +  * NOTE 1: rsyncable mode only works when multithreading is enabled.
97524 +  * NOTE 2: rsyncable performs poorly in combination with long range mode,
97525 +  * since it will decrease the effectiveness of synchronization points,
97526 +  * though mileage may vary.
97527 +  * NOTE 3: Rsyncable mode limits maximum compression speed to ~400 MB/s.
97528 +  * If the selected compression level is already running significantly slower,
97529 +  * the overall speed won't be significantly impacted.
97530 +  */
97531 + #define ZSTD_c_rsyncable ZSTD_c_experimentalParam1
97533 +/* Select a compression format.
97534 + * The value must be of type ZSTD_format_e.
97535 + * See ZSTD_format_e enum definition for details */
97536 +#define ZSTD_c_format ZSTD_c_experimentalParam2
97538 +/* Force back-reference distances to remain < windowSize,
97539 + * even when referencing into Dictionary content (default:0) */
97540 +#define ZSTD_c_forceMaxWindow ZSTD_c_experimentalParam3
97542 +/* Controls whether the contents of a CDict
97543 + * are used in place, or copied into the working context.
97544 + * Accepts values from the ZSTD_dictAttachPref_e enum.
97545 + * See the comments on that enum for an explanation of the feature. */
97546 +#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4
97548 +/* Controls how the literals are compressed (default is auto).
97549 + * The value must be of type ZSTD_literalCompressionMode_e.
97550 + * See ZSTD_literalCompressionMode_t enum definition for details.
97551 + */
97552 +#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
97554 +/* Tries to fit compressed block size to be around targetCBlockSize.
97555 + * No target when targetCBlockSize == 0.
97556 + * There is no guarantee on compressed block size (default:0) */
97557 +#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
97559 +/* User's best guess of source size.
97560 + * Hint is not valid when srcSizeHint == 0.
97561 + * There is no guarantee that hint is close to actual source size,
97562 + * but compression ratio may regress significantly if guess considerably underestimates */
97563 +#define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7
97565 +/* Controls whether the new and experimental "dedicated dictionary search
97566 + * structure" can be used. This feature is still rough around the edges, be
97567 + * prepared for surprising behavior!
97568 + *
97569 + * How to use it:
97570 + *
97571 + * When using a CDict, whether to use this feature or not is controlled at
97572 + * CDict creation, and it must be set in a CCtxParams set passed into that
97573 + * construction (via ZSTD_createCDict_advanced2()). A compression will then
97574 + * use the feature or not based on how the CDict was constructed; the value of
97575 + * this param, set in the CCtx, will have no effect.
97576 + *
97577 + * However, when a dictionary buffer is passed into a CCtx, such as via
97578 + * ZSTD_CCtx_loadDictionary(), this param can be set on the CCtx to control
97579 + * whether the CDict that is created internally can use the feature or not.
97580 + *
97581 + * What it does:
97582 + *
97583 + * Normally, the internal data structures of the CDict are analogous to what
97584 + * would be stored in a CCtx after compressing the contents of a dictionary.
97585 + * To an approximation, a compression using a dictionary can then use those
97586 + * data structures to simply continue what is effectively a streaming
97587 + * compression where the simulated compression of the dictionary left off.
97588 + * Which is to say, the search structures in the CDict are normally the same
97589 + * format as in the CCtx.
97590 + *
97591 + * It is possible to do better, since the CDict is not like a CCtx: the search
97592 + * structures are written once during CDict creation, and then are only read
97593 + * after that, while the search structures in the CCtx are both read and
97594 + * written as the compression goes along. This means we can choose a search
97595 + * structure for the dictionary that is read-optimized.
97596 + *
97597 + * This feature enables the use of that different structure.
97598 + *
97599 + * Note that some of the members of the ZSTD_compressionParameters struct have
97600 + * different semantics and constraints in the dedicated search structure. It is
97601 + * highly recommended that you simply set a compression level in the CCtxParams
97602 + * you pass into the CDict creation call, and avoid messing with the cParams
97603 + * directly.
97604 + *
97605 + * Effects:
97606 + *
97607 + * This will only have any effect when the selected ZSTD_strategy
97608 + * implementation supports this feature. Currently, that's limited to
97609 + * ZSTD_greedy, ZSTD_lazy, and ZSTD_lazy2.
97610 + *
97611 + * Note that this means that the CDict tables can no longer be copied into the
97612 + * CCtx, so the dict attachment mode ZSTD_dictForceCopy will no longer be
97613 + * useable. The dictionary can only be attached or reloaded.
97614 + *
97615 + * In general, you should expect compression to be faster--sometimes very much
97616 + * so--and CDict creation to be slightly slower. Eventually, we will probably
97617 + * make this mode the default.
97618 + */
97619 +#define ZSTD_c_enableDedicatedDictSearch ZSTD_c_experimentalParam8
97621 +/* ZSTD_c_stableInBuffer
97622 + * Experimental parameter.
97623 + * Default is 0 == disabled. Set to 1 to enable.
97624 + *
97625 + * Tells the compressor that the ZSTD_inBuffer will ALWAYS be the same
97626 + * between calls, except for the modifications that zstd makes to pos (the
97627 + * caller must not modify pos). This is checked by the compressor, and
97628 + * compression will fail if it ever changes. This means the only flush
97629 + * mode that makes sense is ZSTD_e_end, so zstd will error if ZSTD_e_end
97630 + * is not used. The data in the ZSTD_inBuffer in the range [src, src + pos)
97631 + * MUST not be modified during compression or you will get data corruption.
97632 + *
97633 + * When this flag is enabled zstd won't allocate an input window buffer,
97634 + * because the user guarantees it can reference the ZSTD_inBuffer until
97635 + * the frame is complete. But, it will still allocate an output buffer
97636 + * large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also
97637 + * avoid the memcpy() from the input buffer to the input window buffer.
97638 + *
97639 + * NOTE: ZSTD_compressStream2() will error if ZSTD_e_end is not used.
97640 + * That means this flag cannot be used with ZSTD_compressStream().
97641 + *
97642 + * NOTE: So long as the ZSTD_inBuffer always points to valid memory, using
97643 + * this flag is ALWAYS memory safe, and will never access out-of-bounds
97644 + * memory. However, compression WILL fail if you violate the preconditions.
97645 + *
97646 + * WARNING: The data in the ZSTD_inBuffer in the range [dst, dst + pos) MUST
97647 + * not be modified during compression or you will get data corruption. This
97648 + * is because zstd needs to reference data in the ZSTD_inBuffer to find
97649 + * matches. Normally zstd maintains its own window buffer for this purpose,
97650 + * but passing this flag tells zstd to use the user provided buffer.
97651 + */
97652 +#define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9
97654 +/* ZSTD_c_stableOutBuffer
97655 + * Experimental parameter.
97656 + * Default is 0 == disabled. Set to 1 to enable.
97657 + *
97658 + * Tells he compressor that the ZSTD_outBuffer will not be resized between
97659 + * calls. Specifically: (out.size - out.pos) will never grow. This gives the
97660 + * compressor the freedom to say: If the compressed data doesn't fit in the
97661 + * output buffer then return ZSTD_error_dstSizeTooSmall. This allows us to
97662 + * always decompress directly into the output buffer, instead of decompressing
97663 + * into an internal buffer and copying to the output buffer.
97664 + *
97665 + * When this flag is enabled zstd won't allocate an output buffer, because
97666 + * it can write directly to the ZSTD_outBuffer. It will still allocate the
97667 + * input window buffer (see ZSTD_c_stableInBuffer).
97668 + *
97669 + * Zstd will check that (out.size - out.pos) never grows and return an error
97670 + * if it does. While not strictly necessary, this should prevent surprises.
97671 + */
97672 +#define ZSTD_c_stableOutBuffer ZSTD_c_experimentalParam10
97674 +/* ZSTD_c_blockDelimiters
97675 + * Default is 0 == ZSTD_sf_noBlockDelimiters.
97676 + *
97677 + * For use with sequence compression API: ZSTD_compressSequences().
97678 + *
97679 + * Designates whether or not the given array of ZSTD_Sequence contains block delimiters
97680 + * and last literals, which are defined as sequences with offset == 0 and matchLength == 0.
97681 + * See the definition of ZSTD_Sequence for more specifics.
97682 + */
97683 +#define ZSTD_c_blockDelimiters ZSTD_c_experimentalParam11
97685 +/* ZSTD_c_validateSequences
97686 + * Default is 0 == disabled. Set to 1 to enable sequence validation.
97687 + *
97688 + * For use with sequence compression API: ZSTD_compressSequences().
97689 + * Designates whether or not we validate sequences provided to ZSTD_compressSequences()
97690 + * during function execution.
97691 + *
97692 + * Without validation, providing a sequence that does not conform to the zstd spec will cause
97693 + * undefined behavior, and may produce a corrupted block.
97694 + *
97695 + * With validation enabled, a if sequence is invalid (see doc/zstd_compression_format.md for
97696 + * specifics regarding offset/matchlength requirements) then the function will bail out and
97697 + * return an error.
97698 + *
97699 + */
97700 +#define ZSTD_c_validateSequences ZSTD_c_experimentalParam12
97702 +/*! ZSTD_CCtx_getParameter() :
97703 + *  Get the requested compression parameter value, selected by enum ZSTD_cParameter,
97704 + *  and store it into int* value.
97705 + * @return : 0, or an error code (which can be tested with ZSTD_isError()).
97706 + */
97707 +ZSTDLIB_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
97710 +/*! ZSTD_CCtx_params :
97711 + *  Quick howto :
97712 + *  - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure
97713 + *  - ZSTD_CCtxParams_setParameter() : Push parameters one by one into
97714 + *                                     an existing ZSTD_CCtx_params structure.
97715 + *                                     This is similar to
97716 + *                                     ZSTD_CCtx_setParameter().
97717 + *  - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to
97718 + *                                    an existing CCtx.
97719 + *                                    These parameters will be applied to
97720 + *                                    all subsequent frames.
97721 + *  - ZSTD_compressStream2() : Do compression using the CCtx.
97722 + *  - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer.
97723 + *
97724 + *  This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
97725 + *  for static allocation of CCtx for single-threaded compression.
97726 + */
97727 +ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
97728 +ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);  /* accept NULL pointer */
97730 +/*! ZSTD_CCtxParams_reset() :
97731 + *  Reset params to default values.
97732 + */
97733 +ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
97735 +/*! ZSTD_CCtxParams_init() :
97736 + *  Initializes the compression parameters of cctxParams according to
97737 + *  compression level. All other parameters are reset to their default values.
97738 + */
97739 +ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
97741 +/*! ZSTD_CCtxParams_init_advanced() :
97742 + *  Initializes the compression and frame parameters of cctxParams according to
97743 + *  params. All other parameters are reset to their default values.
97744 + */
97745 +ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
97747 +/*! ZSTD_CCtxParams_setParameter() :
97748 + *  Similar to ZSTD_CCtx_setParameter.
97749 + *  Set one compression parameter, selected by enum ZSTD_cParameter.
97750 + *  Parameters must be applied to a ZSTD_CCtx using
97751 + *  ZSTD_CCtx_setParametersUsingCCtxParams().
97752 + * @result : a code representing success or failure (which can be tested with
97753 + *           ZSTD_isError()).
97754 + */
97755 +ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
97757 +/*! ZSTD_CCtxParams_getParameter() :
97758 + * Similar to ZSTD_CCtx_getParameter.
97759 + * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
97760 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
97761 + */
97762 +ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
97764 +/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
97765 + *  Apply a set of ZSTD_CCtx_params to the compression context.
97766 + *  This can be done even after compression is started,
97767 + *    if nbWorkers==0, this will have no impact until a new compression is started.
97768 + *    if nbWorkers>=1, new parameters will be picked up at next job,
97769 + *       with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).
97770 + */
97771 +ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
97772 +        ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
97774 +/*! ZSTD_compressStream2_simpleArgs() :
97775 + *  Same as ZSTD_compressStream2(),
97776 + *  but using only integral types as arguments.
97777 + *  This variant might be helpful for binders from dynamic languages
97778 + *  which have troubles handling structures containing memory pointers.
97779 + */
97780 +ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs (
97781 +                            ZSTD_CCtx* cctx,
97782 +                            void* dst, size_t dstCapacity, size_t* dstPos,
97783 +                      const void* src, size_t srcSize, size_t* srcPos,
97784 +                            ZSTD_EndDirective endOp);
97787 +/***************************************
97788 +*  Advanced decompression functions
97789 +***************************************/
97791 +/*! ZSTD_isFrame() :
97792 + *  Tells if the content of `buffer` starts with a valid Frame Identifier.
97793 + *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
97794 + *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
97795 + *  Note 3 : Skippable Frame Identifiers are considered valid. */
97796 +ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
97798 +/*! ZSTD_createDDict_byReference() :
97799 + *  Create a digested dictionary, ready to start decompression operation without startup delay.
97800 + *  Dictionary content is referenced, and therefore stays in dictBuffer.
97801 + *  It is important that dictBuffer outlives DDict,
97802 + *  it must remain read accessible throughout the lifetime of DDict */
97803 +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
97805 +/*! ZSTD_DCtx_loadDictionary_byReference() :
97806 + *  Same as ZSTD_DCtx_loadDictionary(),
97807 + *  but references `dict` content instead of copying it into `dctx`.
97808 + *  This saves memory if `dict` remains around.,
97809 + *  However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */
97810 +ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
97812 +/*! ZSTD_DCtx_loadDictionary_advanced() :
97813 + *  Same as ZSTD_DCtx_loadDictionary(),
97814 + *  but gives direct control over
97815 + *  how to load the dictionary (by copy ? by reference ?)
97816 + *  and how to interpret it (automatic ? force raw mode ? full mode only ?). */
97817 +ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
97819 +/*! ZSTD_DCtx_refPrefix_advanced() :
97820 + *  Same as ZSTD_DCtx_refPrefix(), but gives finer control over
97821 + *  how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
97822 +ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
97824 +/*! ZSTD_DCtx_setMaxWindowSize() :
97825 + *  Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
97826 + *  This protects a decoder context from reserving too much memory for itself (potential attack scenario).
97827 + *  This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
97828 + *  By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)
97829 + * @return : 0, or an error code (which can be tested using ZSTD_isError()).
97830 + */
97831 +ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
97833 +/*! ZSTD_DCtx_getParameter() :
97834 + *  Get the requested decompression parameter value, selected by enum ZSTD_dParameter,
97835 + *  and store it into int* value.
97836 + * @return : 0, or an error code (which can be tested with ZSTD_isError()).
97837 + */
97838 +ZSTDLIB_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value);
97840 +/* ZSTD_d_format
97841 + * experimental parameter,
97842 + * allowing selection between ZSTD_format_e input compression formats
97843 + */
97844 +#define ZSTD_d_format ZSTD_d_experimentalParam1
97845 +/* ZSTD_d_stableOutBuffer
97846 + * Experimental parameter.
97847 + * Default is 0 == disabled. Set to 1 to enable.
97848 + *
97849 + * Tells the decompressor that the ZSTD_outBuffer will ALWAYS be the same
97850 + * between calls, except for the modifications that zstd makes to pos (the
97851 + * caller must not modify pos). This is checked by the decompressor, and
97852 + * decompression will fail if it ever changes. Therefore the ZSTD_outBuffer
97853 + * MUST be large enough to fit the entire decompressed frame. This will be
97854 + * checked when the frame content size is known. The data in the ZSTD_outBuffer
97855 + * in the range [dst, dst + pos) MUST not be modified during decompression
97856 + * or you will get data corruption.
97857 + *
97858 + * When this flags is enabled zstd won't allocate an output buffer, because
97859 + * it can write directly to the ZSTD_outBuffer, but it will still allocate
97860 + * an input buffer large enough to fit any compressed block. This will also
97861 + * avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer.
97862 + * If you need to avoid the input buffer allocation use the buffer-less
97863 + * streaming API.
97864 + *
97865 + * NOTE: So long as the ZSTD_outBuffer always points to valid memory, using
97866 + * this flag is ALWAYS memory safe, and will never access out-of-bounds
97867 + * memory. However, decompression WILL fail if you violate the preconditions.
97868 + *
97869 + * WARNING: The data in the ZSTD_outBuffer in the range [dst, dst + pos) MUST
97870 + * not be modified during decompression or you will get data corruption. This
97871 + * is because zstd needs to reference data in the ZSTD_outBuffer to regenerate
97872 + * matches. Normally zstd maintains its own buffer for this purpose, but passing
97873 + * this flag tells zstd to use the user provided buffer.
97874 + */
97875 +#define ZSTD_d_stableOutBuffer ZSTD_d_experimentalParam2
97877 +/* ZSTD_d_forceIgnoreChecksum
97878 + * Experimental parameter.
97879 + * Default is 0 == disabled. Set to 1 to enable
97880 + *
97881 + * Tells the decompressor to skip checksum validation during decompression, regardless
97882 + * of whether checksumming was specified during compression. This offers some
97883 + * slight performance benefits, and may be useful for debugging.
97884 + * Param has values of type ZSTD_forceIgnoreChecksum_e
97885 + */
97886 +#define ZSTD_d_forceIgnoreChecksum ZSTD_d_experimentalParam3
97888 +/* ZSTD_d_refMultipleDDicts
97889 + * Experimental parameter.
97890 + * Default is 0 == disabled. Set to 1 to enable
97891 + *
97892 + * If enabled and dctx is allocated on the heap, then additional memory will be allocated
97893 + * to store references to multiple ZSTD_DDict. That is, multiple calls of ZSTD_refDDict()
97894 + * using a given ZSTD_DCtx, rather than overwriting the previous DDict reference, will instead
97895 + * store all references. At decompression time, the appropriate dictID is selected
97896 + * from the set of DDicts based on the dictID in the frame.
97897 + *
97898 + * Usage is simply calling ZSTD_refDDict() on multiple dict buffers.
97899 + *
97900 + * Param has values of byte ZSTD_refMultipleDDicts_e
97901 + *
97902 + * WARNING: Enabling this parameter and calling ZSTD_DCtx_refDDict(), will trigger memory
97903 + * allocation for the hash table. ZSTD_freeDCtx() also frees this memory.
97904 + * Memory is allocated as per ZSTD_DCtx::customMem.
97905 + *
97906 + * Although this function allocates memory for the table, the user is still responsible for
97907 + * memory management of the underlying ZSTD_DDict* themselves.
97908 + */
97909 +#define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4
97912 +/*! ZSTD_DCtx_setFormat() :
97913 + *  Instruct the decoder context about what kind of data to decode next.
97914 + *  This instruction is mandatory to decode data without a fully-formed header,
97915 + *  such ZSTD_f_zstd1_magicless for example.
97916 + * @return : 0, or an error code (which can be tested using ZSTD_isError()). */
97917 +ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
97919 +/*! ZSTD_decompressStream_simpleArgs() :
97920 + *  Same as ZSTD_decompressStream(),
97921 + *  but using only integral types as arguments.
97922 + *  This can be helpful for binders from dynamic languages
97923 + *  which have troubles handling structures containing memory pointers.
97924 + */
97925 +ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs (
97926 +                            ZSTD_DCtx* dctx,
97927 +                            void* dst, size_t dstCapacity, size_t* dstPos,
97928 +                      const void* src, size_t srcSize, size_t* srcPos);
97931 +/********************************************************************
97932 +*  Advanced streaming functions
97933 +*  Warning : most of these functions are now redundant with the Advanced API.
97934 +*  Once Advanced API reaches "stable" status,
97935 +*  redundant functions will be deprecated, and then at some point removed.
97936 +********************************************************************/
97938 +/*=====   Advanced Streaming compression functions  =====*/
97940 +/*! ZSTD_initCStream_srcSize() :
97941 + * This function is deprecated, and equivalent to:
97942 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
97943 + *     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
97944 + *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
97945 + *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
97946 + *
97947 + * pledgedSrcSize must be correct. If it is not known at init time, use
97948 + * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
97949 + * "0" also disables frame content size field. It may be enabled in the future.
97950 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
97951 + */
97952 +ZSTDLIB_API size_t
97953 +ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
97954 +                         int compressionLevel,
97955 +                         unsigned long long pledgedSrcSize);
97957 +/*! ZSTD_initCStream_usingDict() :
97958 + * This function is deprecated, and is equivalent to:
97959 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
97960 + *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
97961 + *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
97962 + *
97963 + * Creates of an internal CDict (incompatible with static CCtx), except if
97964 + * dict == NULL or dictSize < 8, in which case no dict is used.
97965 + * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if
97966 + * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
97967 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
97968 + */
97969 +ZSTDLIB_API size_t
97970 +ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
97971 +                     const void* dict, size_t dictSize,
97972 +                           int compressionLevel);
97974 +/*! ZSTD_initCStream_advanced() :
97975 + * This function is deprecated, and is approximately equivalent to:
97976 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
97977 + *     // Pseudocode: Set each zstd parameter and leave the rest as-is.
97978 + *     for ((param, value) : params) {
97979 + *         ZSTD_CCtx_setParameter(zcs, param, value);
97980 + *     }
97981 + *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
97982 + *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
97983 + *
97984 + * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.
97985 + * pledgedSrcSize must be correct.
97986 + * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
97987 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
97988 + */
97989 +ZSTDLIB_API size_t
97990 +ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
97991 +                    const void* dict, size_t dictSize,
97992 +                          ZSTD_parameters params,
97993 +                          unsigned long long pledgedSrcSize);
97995 +/*! ZSTD_initCStream_usingCDict() :
97996 + * This function is deprecated, and equivalent to:
97997 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
97998 + *     ZSTD_CCtx_refCDict(zcs, cdict);
97999 + *
98000 + * note : cdict will just be referenced, and must outlive compression session
98001 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
98002 + */
98003 +ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
98005 +/*! ZSTD_initCStream_usingCDict_advanced() :
98006 + *   This function is DEPRECATED, and is approximately equivalent to:
98007 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
98008 + *     // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
98009 + *     for ((fParam, value) : fParams) {
98010 + *         ZSTD_CCtx_setParameter(zcs, fParam, value);
98011 + *     }
98012 + *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
98013 + *     ZSTD_CCtx_refCDict(zcs, cdict);
98014 + *
98015 + * same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
98016 + * pledgedSrcSize must be correct. If srcSize is not known at init time, use
98017 + * value ZSTD_CONTENTSIZE_UNKNOWN.
98018 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
98019 + */
98020 +ZSTDLIB_API size_t
98021 +ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
98022 +                               const ZSTD_CDict* cdict,
98023 +                                     ZSTD_frameParameters fParams,
98024 +                                     unsigned long long pledgedSrcSize);
98026 +/*! ZSTD_resetCStream() :
98027 + * This function is deprecated, and is equivalent to:
98028 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
98029 + *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
98030 + *
98031 + *  start a new frame, using same parameters from previous frame.
98032 + *  This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
98033 + *  Note that zcs must be init at least once before using ZSTD_resetCStream().
98034 + *  If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
98035 + *  If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
98036 + *  For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
98037 + *  but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
98038 + * @return : 0, or an error code (which can be tested using ZSTD_isError())
98039 + *  Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
98040 + */
98041 +ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
98044 +typedef struct {
98045 +    unsigned long long ingested;   /* nb input bytes read and buffered */
98046 +    unsigned long long consumed;   /* nb input bytes actually compressed */
98047 +    unsigned long long produced;   /* nb of compressed bytes generated and buffered */
98048 +    unsigned long long flushed;    /* nb of compressed bytes flushed : not provided; can be tracked from caller side */
98049 +    unsigned currentJobID;         /* MT only : latest started job nb */
98050 +    unsigned nbActiveWorkers;      /* MT only : nb of workers actively compressing at probe time */
98051 +} ZSTD_frameProgression;
98053 +/* ZSTD_getFrameProgression() :
98054 + * tells how much data has been ingested (read from input)
98055 + * consumed (input actually compressed) and produced (output) for current frame.
98056 + * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed.
98057 + * Aggregates progression inside active worker threads.
98058 + */
98059 +ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
98061 +/*! ZSTD_toFlushNow() :
98062 + *  Tell how many bytes are ready to be flushed immediately.
98063 + *  Useful for multithreading scenarios (nbWorkers >= 1).
98064 + *  Probe the oldest active job, defined as oldest job not yet entirely flushed,
98065 + *  and check its output buffer.
98066 + * @return : amount of data stored in oldest job and ready to be flushed immediately.
98067 + *  if @return == 0, it means either :
98068 + *  + there is no active job (could be checked with ZSTD_frameProgression()), or
98069 + *  + oldest job is still actively compressing data,
98070 + *    but everything it has produced has also been flushed so far,
98071 + *    therefore flush speed is limited by production speed of oldest job
98072 + *    irrespective of the speed of concurrent (and newer) jobs.
98073 + */
98074 +ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
98077 +/*=====   Advanced Streaming decompression functions  =====*/
98079 +/*!
98080 + * This function is deprecated, and is equivalent to:
98081 + *
98082 + *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
98083 + *     ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
98084 + *
98085 + * note: no dictionary will be used if dict == NULL or dictSize < 8
98086 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
98087 + */
98088 +ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
98090 +/*!
98091 + * This function is deprecated, and is equivalent to:
98092 + *
98093 + *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
98094 + *     ZSTD_DCtx_refDDict(zds, ddict);
98095 + *
98096 + * note : ddict is referenced, it must outlive decompression session
98097 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
98098 + */
98099 +ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
98101 +/*!
98102 + * This function is deprecated, and is equivalent to:
98103 + *
98104 + *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
98105 + *
98106 + * re-use decompression parameters from previous init; saves dictionary loading
98107 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
98108 + */
98109 +ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
98112 +/*********************************************************************
98113 +*  Buffer-less and synchronous inner streaming functions
98115 +*  This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
98116 +*  But it's also a complex one, with several restrictions, documented below.
98117 +*  Prefer normal streaming API for an easier experience.
98118 +********************************************************************* */
98120 +/**
98121 +  Buffer-less streaming compression (synchronous mode)
98123 +  A ZSTD_CCtx object is required to track streaming operations.
98124 +  Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
98125 +  ZSTD_CCtx object can be re-used multiple times within successive compression operations.
98127 +  Start by initializing a context.
98128 +  Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression,
98129 +  or ZSTD_compressBegin_advanced(), for finer parameter control.
98130 +  It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
98132 +  Then, consume your input using ZSTD_compressContinue().
98133 +  There are some important considerations to keep in mind when using this advanced function :
98134 +  - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
98135 +  - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
98136 +  - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
98137 +    Worst case evaluation is provided by ZSTD_compressBound().
98138 +    ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
98139 +  - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).
98140 +    It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)
98141 +  - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.
98142 +    In which case, it will "discard" the relevant memory section from its history.
98144 +  Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
98145 +  It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
98146 +  Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
98148 +  `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
98151 +/*=====   Buffer-less streaming compression functions  =====*/
98152 +ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
98153 +ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
98154 +ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
98155 +ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
98156 +ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize);   /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
98157 +ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**<  note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
98159 +ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
98160 +ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
98163 +/**
98164 +  Buffer-less streaming decompression (synchronous mode)
98166 +  A ZSTD_DCtx object is required to track streaming operations.
98167 +  Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
98168 +  A ZSTD_DCtx object can be re-used multiple times.
98170 +  First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
98171 +  Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
98172 +  Data fragment must be large enough to ensure successful decoding.
98173 + `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
98174 +  @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
98175 +           >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
98176 +           errorCode, which can be tested using ZSTD_isError().
98178 +  It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
98179 +  such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
98180 +  Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
98181 +  As a consequence, check that values remain within valid application range.
98182 +  For example, do not allocate memory blindly, check that `windowSize` is within expectation.
98183 +  Each application can set its own limits, depending on local restrictions.
98184 +  For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
98186 +  ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
98187 +  ZSTD_decompressContinue() is very sensitive to contiguity,
98188 +  if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
98189 +  or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
98190 +  There are multiple ways to guarantee this condition.
98192 +  The most memory efficient way is to use a round buffer of sufficient size.
98193 +  Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
98194 +  which can @return an error code if required value is too large for current system (in 32-bits mode).
98195 +  In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
98196 +  up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
98197 +  which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
98198 +  At which point, decoding can resume from the beginning of the buffer.
98199 +  Note that already decoded data stored in the buffer should be flushed before being overwritten.
98201 +  There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
98203 +  Finally, if you control the compression process, you can also ignore all buffer size rules,
98204 +  as long as the encoder and decoder progress in "lock-step",
98205 +  aka use exactly the same buffer sizes, break contiguity at the same place, etc.
98207 +  Once buffers are setup, start decompression, with ZSTD_decompressBegin().
98208 +  If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
98210 +  Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
98211 +  ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
98212 +  ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
98214 + @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
98215 +  It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
98216 +  It can also be an error code, which can be tested with ZSTD_isError().
98218 +  A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
98219 +  Context can then be reset to start a new decompression.
98221 +  Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().
98222 +  This information is not required to properly decode a frame.
98224 +  == Special case : skippable frames ==
98226 +  Skippable frames allow integration of user-defined data into a flow of concatenated frames.
98227 +  Skippable frames will be ignored (skipped) by decompressor.
98228 +  The format of skippable frames is as follows :
98229 +  a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
98230 +  b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
98231 +  c) Frame Content - any content (User Data) of length equal to Frame Size
98232 +  For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
98233 +  For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
98236 +/*=====   Buffer-less streaming decompression functions  =====*/
98237 +typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
98238 +typedef struct {
98239 +    unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
98240 +    unsigned long long windowSize;       /* can be very large, up to <= frameContentSize */
98241 +    unsigned blockSizeMax;
98242 +    ZSTD_frameType_e frameType;          /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
98243 +    unsigned headerSize;
98244 +    unsigned dictID;
98245 +    unsigned checksumFlag;
98246 +} ZSTD_frameHeader;
98248 +/*! ZSTD_getFrameHeader() :
98249 + *  decode Frame Header, or requires larger `srcSize`.
98250 + * @return : 0, `zfhPtr` is correctly filled,
98251 + *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
98252 + *           or an error code, which can be tested using ZSTD_isError() */
98253 +ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize);   /**< doesn't consume input */
98254 +/*! ZSTD_getFrameHeader_advanced() :
98255 + *  same as ZSTD_getFrameHeader(),
98256 + *  with added capability to select a format (like ZSTD_f_zstd1_magicless) */
98257 +ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
98258 +ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize);  /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
98260 +ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
98261 +ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
98262 +ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
98264 +ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
98265 +ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
98267 +/* misc */
98268 +ZSTDLIB_API void   ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
98269 +typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
98270 +ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
98275 +/* ============================ */
98276 +/**       Block level API       */
98277 +/* ============================ */
98279 +/*!
98280 +    Block functions produce and decode raw zstd blocks, without frame metadata.
98281 +    Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
98282 +    But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
98284 +    A few rules to respect :
98285 +    - Compressing and decompressing require a context structure
98286 +      + Use ZSTD_createCCtx() and ZSTD_createDCtx()
98287 +    - It is necessary to init context before starting
98288 +      + compression : any ZSTD_compressBegin*() variant, including with dictionary
98289 +      + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
98290 +      + copyCCtx() and copyDCtx() can be used too
98291 +    - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
98292 +      + If input is larger than a block size, it's necessary to split input data into multiple blocks
98293 +      + For inputs larger than a single block, consider using regular ZSTD_compress() instead.
98294 +        Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
98295 +    - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
98296 +      ===> In which case, nothing is produced into `dst` !
98297 +      + User __must__ test for such outcome and deal directly with uncompressed data
98298 +      + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
98299 +        Doing so would mess up with statistics history, leading to potential data corruption.
98300 +      + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
98301 +      + In case of multiple successive blocks, should some of them be uncompressed,
98302 +        decoder must be informed of their existence in order to follow proper history.
98303 +        Use ZSTD_insertBlock() for such a case.
98306 +/*=====   Raw zstd block functions  =====*/
98307 +ZSTDLIB_API size_t ZSTD_getBlockSize   (const ZSTD_CCtx* cctx);
98308 +ZSTDLIB_API size_t ZSTD_compressBlock  (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
98309 +ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
98310 +ZSTDLIB_API size_t ZSTD_insertBlock    (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize);  /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
98313 +#endif   /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
98314 diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
98315 index 167ca8c8424f..2fe4019b749f 100644
98316 --- a/include/media/v4l2-ctrls.h
98317 +++ b/include/media/v4l2-ctrls.h
98318 @@ -301,12 +301,14 @@ struct v4l2_ctrl {
98319   *             the control has been applied. This prevents applying controls
98320   *             from a cluster with multiple controls twice (when the first
98321   *             control of a cluster is applied, they all are).
98322 - * @req:       If set, this refers to another request that sets this control.
98323 + * @valid_p_req: If set, then p_req contains the control value for the request.
98324   * @p_req:     If the control handler containing this control reference
98325   *             is bound to a media request, then this points to the
98326 - *             value of the control that should be applied when the request
98327 + *             value of the control that must be applied when the request
98328   *             is executed, or to the value of the control at the time
98329 - *             that the request was completed.
98330 + *             that the request was completed. If @valid_p_req is false,
98331 + *             then this control was never set for this request and the
98332 + *             control will not be updated when this request is applied.
98333   *
98334   * Each control handler has a list of these refs. The list_head is used to
98335   * keep a sorted-by-control-ID list of all controls, while the next pointer
98336 @@ -319,7 +321,7 @@ struct v4l2_ctrl_ref {
98337         struct v4l2_ctrl_helper *helper;
98338         bool from_other_dev;
98339         bool req_done;
98340 -       struct v4l2_ctrl_ref *req;
98341 +       bool valid_p_req;
98342         union v4l2_ctrl_ptr p_req;
98343  };
98345 @@ -346,7 +348,7 @@ struct v4l2_ctrl_ref {
98346   * @error:     The error code of the first failed control addition.
98347   * @request_is_queued: True if the request was queued.
98348   * @requests:  List to keep track of open control handler request objects.
98349 - *             For the parent control handler (@req_obj.req == NULL) this
98350 + *             For the parent control handler (@req_obj.ops == NULL) this
98351   *             is the list header. When the parent control handler is
98352   *             removed, it has to unbind and put all these requests since
98353   *             they refer to the parent.
98354 diff --git a/include/net/addrconf.h b/include/net/addrconf.h
98355 index 18f783dcd55f..78ea3e332688 100644
98356 --- a/include/net/addrconf.h
98357 +++ b/include/net/addrconf.h
98358 @@ -233,7 +233,6 @@ void ipv6_mc_unmap(struct inet6_dev *idev);
98359  void ipv6_mc_remap(struct inet6_dev *idev);
98360  void ipv6_mc_init_dev(struct inet6_dev *idev);
98361  void ipv6_mc_destroy_dev(struct inet6_dev *idev);
98362 -int ipv6_mc_check_icmpv6(struct sk_buff *skb);
98363  int ipv6_mc_check_mld(struct sk_buff *skb);
98364  void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp);
98366 diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
98367 index ebdd4afe30d2..ca4ac6603b9a 100644
98368 --- a/include/net/bluetooth/hci_core.h
98369 +++ b/include/net/bluetooth/hci_core.h
98370 @@ -704,6 +704,7 @@ struct hci_chan {
98371         struct sk_buff_head data_q;
98372         unsigned int    sent;
98373         __u8            state;
98374 +       bool            amp;
98375  };
98377  struct hci_conn_params {
98378 diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
98379 index 3c8c59471bc1..2cdc5a0709fe 100644
98380 --- a/include/net/inet_connection_sock.h
98381 +++ b/include/net/inet_connection_sock.h
98382 @@ -134,8 +134,9 @@ struct inet_connection_sock {
98383         u32                       icsk_probes_tstamp;
98384         u32                       icsk_user_timeout;
98386 -       u64                       icsk_ca_priv[104 / sizeof(u64)];
98387 -#define ICSK_CA_PRIV_SIZE      (13 * sizeof(u64))
98388 +/* XXX inflated by temporary internal debugging info */
98389 +#define ICSK_CA_PRIV_SIZE      (216)
98390 +       u64                       icsk_ca_priv[ICSK_CA_PRIV_SIZE / sizeof(u64)];
98391  };
98393  #define ICSK_TIME_RETRANS      1       /* Retransmit timer */
98394 diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
98395 index 1d34fe154fe0..434a6158852f 100644
98396 --- a/include/net/netfilter/nf_tables_offload.h
98397 +++ b/include/net/netfilter/nf_tables_offload.h
98398 @@ -4,11 +4,16 @@
98399  #include <net/flow_offload.h>
98400  #include <net/netfilter/nf_tables.h>
98402 +enum nft_offload_reg_flags {
98403 +       NFT_OFFLOAD_F_NETWORK2HOST      = (1 << 0),
98406  struct nft_offload_reg {
98407         u32             key;
98408         u32             len;
98409         u32             base_offset;
98410         u32             offset;
98411 +       u32             flags;
98412         struct nft_data data;
98413         struct nft_data mask;
98414  };
98415 @@ -45,6 +50,7 @@ struct nft_flow_key {
98416         struct flow_dissector_key_ports                 tp;
98417         struct flow_dissector_key_ip                    ip;
98418         struct flow_dissector_key_vlan                  vlan;
98419 +       struct flow_dissector_key_vlan                  cvlan;
98420         struct flow_dissector_key_eth_addrs             eth_addrs;
98421         struct flow_dissector_key_meta                  meta;
98422  } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
98423 @@ -71,13 +77,17 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rul
98424  void nft_flow_rule_destroy(struct nft_flow_rule *flow);
98425  int nft_flow_rule_offload_commit(struct net *net);
98427 -#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)                \
98428 +#define NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, __flags) \
98429         (__reg)->base_offset    =                                       \
98430                 offsetof(struct nft_flow_key, __base);                  \
98431         (__reg)->offset         =                                       \
98432                 offsetof(struct nft_flow_key, __base.__field);          \
98433         (__reg)->len            = __len;                                \
98434         (__reg)->key            = __key;                                \
98435 +       (__reg)->flags          = __flags;
98437 +#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)                \
98438 +       NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, 0)
98440  #define NFT_OFFLOAD_MATCH_EXACT(__key, __base, __field, __len, __reg)  \
98441         NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)         \
98442 diff --git a/include/net/page_pool.h b/include/net/page_pool.h
98443 index b5b195305346..e05744b9a1bc 100644
98444 --- a/include/net/page_pool.h
98445 +++ b/include/net/page_pool.h
98446 @@ -198,7 +198,17 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
98448  static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
98450 -       return page->dma_addr;
98451 +       dma_addr_t ret = page->dma_addr[0];
98452 +       if (sizeof(dma_addr_t) > sizeof(unsigned long))
98453 +               ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
98454 +       return ret;
98457 +static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
98459 +       page->dma_addr[0] = addr;
98460 +       if (sizeof(dma_addr_t) > sizeof(unsigned long))
98461 +               page->dma_addr[1] = upper_32_bits(addr);
98464  static inline bool is_page_pool_compiled_in(void)
98465 diff --git a/include/net/tcp.h b/include/net/tcp.h
98466 index 963cd86d12dd..5a86fa1d2ff1 100644
98467 --- a/include/net/tcp.h
98468 +++ b/include/net/tcp.h
98469 @@ -799,6 +799,11 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
98470         return max_t(s64, t1 - t0, 0);
98473 +static inline u32 tcp_stamp32_us_delta(u32 t1, u32 t0)
98475 +       return max_t(s32, t1 - t0, 0);
98478  static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
98480         return tcp_ns_to_ts(skb->skb_mstamp_ns);
98481 @@ -866,16 +871,22 @@ struct tcp_skb_cb {
98482         __u32           ack_seq;        /* Sequence number ACK'd        */
98483         union {
98484                 struct {
98485 +#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
98486                         /* There is space for up to 24 bytes */
98487 -                       __u32 in_flight:30,/* Bytes in flight at transmit */
98488 -                             is_app_limited:1, /* cwnd not fully used? */
98489 -                             unused:1;
98490 +                       __u32 is_app_limited:1, /* cwnd not fully used? */
98491 +                             delivered_ce:20,
98492 +                             unused:11;
98493                         /* pkts S/ACKed so far upon tx of skb, incl retrans: */
98494                         __u32 delivered;
98495                         /* start of send pipeline phase */
98496 -                       u64 first_tx_mstamp;
98497 +                       u32 first_tx_mstamp;
98498                         /* when we reached the "delivered" count */
98499 -                       u64 delivered_mstamp;
98500 +                       u32 delivered_mstamp;
98501 +#define TCPCB_IN_FLIGHT_BITS 20
98502 +#define TCPCB_IN_FLIGHT_MAX ((1U << TCPCB_IN_FLIGHT_BITS) - 1)
98503 +                       u32 in_flight:20,   /* packets in flight at transmit */
98504 +                           unused2:12;
98505 +                       u32 lost;       /* packets lost so far upon tx of skb */
98506                 } tx;   /* only used for outgoing skbs */
98507                 union {
98508                         struct inet_skb_parm    h4;
98509 @@ -1025,7 +1036,11 @@ enum tcp_ca_ack_event_flags {
98510  #define TCP_CONG_NON_RESTRICTED 0x1
98511  /* Requires ECN/ECT set on all packets */
98512  #define TCP_CONG_NEEDS_ECN     0x2
98513 -#define TCP_CONG_MASK  (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
98514 +/* Wants notification of CE events (CA_EVENT_ECN_IS_CE, CA_EVENT_ECN_NO_CE). */
98515 +#define TCP_CONG_WANTS_CE_EVENTS       0x4
98516 +#define TCP_CONG_MASK  (TCP_CONG_NON_RESTRICTED | \
98517 +                        TCP_CONG_NEEDS_ECN | \
98518 +                        TCP_CONG_WANTS_CE_EVENTS)
98520  union tcp_cc_info;
98522 @@ -1045,8 +1060,13 @@ struct ack_sample {
98523   */
98524  struct rate_sample {
98525         u64  prior_mstamp; /* starting timestamp for interval */
98526 +       u32  prior_lost;        /* tp->lost at "prior_mstamp" */
98527         u32  prior_delivered;   /* tp->delivered at "prior_mstamp" */
98528 +       u32  prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
98529 +       u32 tx_in_flight;       /* packets in flight at starting timestamp */
98530 +       s32  lost;              /* number of packets lost over interval */
98531         s32  delivered;         /* number of packets delivered over interval */
98532 +       s32  delivered_ce;      /* packets delivered w/ CE mark over interval */
98533         long interval_us;       /* time for tp->delivered to incr "delivered" */
98534         u32 snd_interval_us;    /* snd interval for delivered packets */
98535         u32 rcv_interval_us;    /* rcv interval for delivered packets */
98536 @@ -1057,6 +1077,7 @@ struct rate_sample {
98537         bool is_app_limited;    /* is sample from packet with bubble in pipe? */
98538         bool is_retrans;        /* is sample from retransmission? */
98539         bool is_ack_delayed;    /* is this (likely) a delayed ACK? */
98540 +       bool is_ece;            /* did this ACK have ECN marked? */
98541  };
98543  struct tcp_congestion_ops {
98544 @@ -1083,10 +1104,12 @@ struct tcp_congestion_ops {
98545         u32  (*undo_cwnd)(struct sock *sk);
98546         /* hook for packet ack accounting (optional) */
98547         void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
98548 -       /* override sysctl_tcp_min_tso_segs */
98549 -       u32 (*min_tso_segs)(struct sock *sk);
98550 +       /* pick target number of segments per TSO/GSO skb (optional): */
98551 +       u32 (*tso_segs)(struct sock *sk, unsigned int mss_now);
98552         /* returns the multiplier used in tcp_sndbuf_expand (optional) */
98553         u32 (*sndbuf_expand)(struct sock *sk);
98554 +       /* react to a specific lost skb (optional) */
98555 +       void (*skb_marked_lost)(struct sock *sk, const struct sk_buff *skb);
98556         /* call when packets are delivered to update cwnd and pacing rate,
98557          * after all the ca_state processing. (optional)
98558          */
98559 @@ -1132,6 +1155,14 @@ static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
98561  #endif
98563 +static inline bool tcp_ca_wants_ce_events(const struct sock *sk)
98565 +       const struct inet_connection_sock *icsk = inet_csk(sk);
98567 +       return icsk->icsk_ca_ops->flags & (TCP_CONG_NEEDS_ECN |
98568 +                                          TCP_CONG_WANTS_CE_EVENTS);
98571  static inline bool tcp_ca_needs_ecn(const struct sock *sk)
98573         const struct inet_connection_sock *icsk = inet_csk(sk);
98574 @@ -1157,6 +1188,7 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
98577  /* From tcp_rate.c */
98578 +void tcp_set_tx_in_flight(struct sock *sk, struct sk_buff *skb);
98579  void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
98580  void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
98581                             struct rate_sample *rs);
98582 diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
98583 index 2568cb0627ec..fac8e89aed81 100644
98584 --- a/include/scsi/libfcoe.h
98585 +++ b/include/scsi/libfcoe.h
98586 @@ -249,7 +249,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *,
98587                          struct fc_frame *);
98589  /* libfcoe funcs */
98590 -u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
98591 +u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], unsigned int, unsigned int);
98592  int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *,
98593                       const struct libfc_function_template *, int init_fcp);
98594  u32 fcoe_fc_crc(struct fc_frame *fp);
98595 diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
98596 index 036eb1f5c133..2f01314de73a 100644
98597 --- a/include/trace/events/sunrpc.h
98598 +++ b/include/trace/events/sunrpc.h
98599 @@ -1141,7 +1141,6 @@ DECLARE_EVENT_CLASS(xprt_writelock_event,
98601  DEFINE_WRITELOCK_EVENT(reserve_xprt);
98602  DEFINE_WRITELOCK_EVENT(release_xprt);
98603 -DEFINE_WRITELOCK_EVENT(transmit_queued);
98605  DECLARE_EVENT_CLASS(xprt_cong_event,
98606         TP_PROTO(
98607 diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
98608 index ce58cff99b66..2778da551846 100644
98609 --- a/include/uapi/asm-generic/unistd.h
98610 +++ b/include/uapi/asm-generic/unistd.h
98611 @@ -864,8 +864,20 @@ __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
98612  #define __NR_mount_setattr 442
98613  __SYSCALL(__NR_mount_setattr, sys_mount_setattr)
98615 +#define __NR_futex_wait 443
98616 +__SYSCALL(__NR_futex_wait, sys_futex_wait)
98618 +#define __NR_futex_wake 444
98619 +__SYSCALL(__NR_futex_wake, sys_futex_wake)
98621 +#define __NR_futex_waitv 445
98622 +__SC_COMP(__NR_futex_waitv, sys_futex_waitv, compat_sys_futex_waitv)
98624 +#define __NR_futex_requeue 446
98625 +__SC_COMP(__NR_futex_requeue, sys_futex_requeue, compat_sys_futex_requeue)
98627  #undef __NR_syscalls
98628 -#define __NR_syscalls 443
98629 +#define __NR_syscalls 447
98631  /*
98632   * 32 bit systems traditionally used different
98633 diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h
98634 index a89eb0accd5e..afc3245e5728 100644
98635 --- a/include/uapi/linux/futex.h
98636 +++ b/include/uapi/linux/futex.h
98637 @@ -21,6 +21,7 @@
98638  #define FUTEX_WAKE_BITSET      10
98639  #define FUTEX_WAIT_REQUEUE_PI  11
98640  #define FUTEX_CMP_REQUEUE_PI   12
98641 +#define FUTEX_WAIT_MULTIPLE    31
98643  #define FUTEX_PRIVATE_FLAG     128
98644  #define FUTEX_CLOCK_REALTIME   256
98645 @@ -40,6 +41,39 @@
98646                                          FUTEX_PRIVATE_FLAG)
98647  #define FUTEX_CMP_REQUEUE_PI_PRIVATE   (FUTEX_CMP_REQUEUE_PI | \
98648                                          FUTEX_PRIVATE_FLAG)
98649 +#define FUTEX_WAIT_MULTIPLE_PRIVATE    (FUTEX_WAIT_MULTIPLE | \
98650 +                                        FUTEX_PRIVATE_FLAG)
98652 +/* Size argument to futex2 syscall */
98653 +#define FUTEX_32       2
98655 +#define FUTEX_SIZE_MASK        0x3
98657 +#define FUTEX_SHARED_FLAG 8
98659 +#define FUTEX_WAITV_MAX 128
98661 +/**
98662 + * struct futex_waitv - A waiter for vectorized wait
98663 + * @uaddr: User address to wait on
98664 + * @val:   Expected value at uaddr
98665 + * @flags: Flags for this waiter
98666 + */
98667 +struct futex_waitv {
98668 +       void __user *uaddr;
98669 +       unsigned int val;
98670 +       unsigned int flags;
98673 +/**
98674 + * struct futex_requeue - Define an address and its flags for requeue operation
98675 + * @uaddr: User address of one of the requeue arguments
98676 + * @flags: Flags for this address
98677 + */
98678 +struct futex_requeue {
98679 +       void __user *uaddr;
98680 +       unsigned int flags;
98683  /*
98684   * Support for robust futexes: the kernel cleans up held futexes at
98685 @@ -150,4 +184,21 @@ struct robust_list_head {
98686    (((op & 0xf) << 28) | ((cmp & 0xf) << 24)            \
98687     | ((oparg & 0xfff) << 12) | (cmparg & 0xfff))
98690 + * Maximum number of multiple futexes to wait for
98691 + */
98692 +#define FUTEX_MULTIPLE_MAX_COUNT       128
98694 +/**
98695 + * struct futex_wait_block - Block of futexes to be waited for
98696 + * @uaddr:     User address of the futex
98697 + * @val:       Futex value expected by userspace
98698 + * @bitset:    Bitset for the optional bitmasked wakeup
98699 + */
98700 +struct futex_wait_block {
98701 +       __u32 __user *uaddr;
98702 +       __u32 val;
98703 +       __u32 bitset;
98706  #endif /* _UAPI_LINUX_FUTEX_H */
98707 diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
98708 index 20ee93f0f876..96d52dd9c48a 100644
98709 --- a/include/uapi/linux/inet_diag.h
98710 +++ b/include/uapi/linux/inet_diag.h
98711 @@ -231,9 +231,42 @@ struct tcp_bbr_info {
98712         __u32   bbr_cwnd_gain;          /* cwnd gain shifted left 8 bits */
98713  };
98715 +/* Phase as reported in netlink/ss stats. */
98716 +enum tcp_bbr2_phase {
98717 +       BBR2_PHASE_INVALID              = 0,
98718 +       BBR2_PHASE_STARTUP              = 1,
98719 +       BBR2_PHASE_DRAIN                = 2,
98720 +       BBR2_PHASE_PROBE_RTT            = 3,
98721 +       BBR2_PHASE_PROBE_BW_UP          = 4,
98722 +       BBR2_PHASE_PROBE_BW_DOWN        = 5,
98723 +       BBR2_PHASE_PROBE_BW_CRUISE      = 6,
98724 +       BBR2_PHASE_PROBE_BW_REFILL      = 7
98727 +struct tcp_bbr2_info {
98728 +       /* u64 bw: bandwidth (app throughput) estimate in Byte per sec: */
98729 +       __u32   bbr_bw_lsb;             /* lower 32 bits of bw */
98730 +       __u32   bbr_bw_msb;             /* upper 32 bits of bw */
98731 +       __u32   bbr_min_rtt;            /* min-filtered RTT in uSec */
98732 +       __u32   bbr_pacing_gain;        /* pacing gain shifted left 8 bits */
98733 +       __u32   bbr_cwnd_gain;          /* cwnd gain shifted left 8 bits */
98734 +       __u32   bbr_bw_hi_lsb;          /* lower 32 bits of bw_hi */
98735 +       __u32   bbr_bw_hi_msb;          /* upper 32 bits of bw_hi */
98736 +       __u32   bbr_bw_lo_lsb;          /* lower 32 bits of bw_lo */
98737 +       __u32   bbr_bw_lo_msb;          /* upper 32 bits of bw_lo */
98738 +       __u8    bbr_mode;               /* current bbr_mode in state machine */
98739 +       __u8    bbr_phase;              /* current state machine phase */
98740 +       __u8    unused1;                /* alignment padding; not used yet */
98741 +       __u8    bbr_version;            /* MUST be at this offset in struct */
98742 +       __u32   bbr_inflight_lo;        /* lower/short-term data volume bound */
98743 +       __u32   bbr_inflight_hi;        /* higher/long-term data volume bound */
98744 +       __u32   bbr_extra_acked;        /* max excess packets ACKed in epoch */
98747  union tcp_cc_info {
98748         struct tcpvegas_info    vegas;
98749         struct tcp_dctcp_info   dctcp;
98750         struct tcp_bbr_info     bbr;
98751 +       struct tcp_bbr2_info    bbr2;
98752  };
98753  #endif /* _UAPI_INET_DIAG_H_ */
98754 diff --git a/include/uapi/linux/netfilter/xt_SECMARK.h b/include/uapi/linux/netfilter/xt_SECMARK.h
98755 index 1f2a708413f5..beb2cadba8a9 100644
98756 --- a/include/uapi/linux/netfilter/xt_SECMARK.h
98757 +++ b/include/uapi/linux/netfilter/xt_SECMARK.h
98758 @@ -20,4 +20,10 @@ struct xt_secmark_target_info {
98759         char secctx[SECMARK_SECCTX_MAX];
98760  };
98762 +struct xt_secmark_target_info_v1 {
98763 +       __u8 mode;
98764 +       char secctx[SECMARK_SECCTX_MAX];
98765 +       __u32 secid;
98768  #endif /*_XT_SECMARK_H_target */
98769 diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h
98770 index 900a32e63424..6a3ac496a56c 100644
98771 --- a/include/uapi/linux/tty_flags.h
98772 +++ b/include/uapi/linux/tty_flags.h
98773 @@ -39,7 +39,7 @@
98774   * WARNING: These flags are no longer used and have been superceded by the
98775   *         TTY_PORT_ flags in the iflags field (and not userspace-visible)
98776   */
98777 -#ifndef _KERNEL_
98778 +#ifndef __KERNEL__
98779  #define ASYNCB_INITIALIZED     31 /* Serial port was initialized */
98780  #define ASYNCB_SUSPENDED       30 /* Serial port is suspended */
98781  #define ASYNCB_NORMAL_ACTIVE   29 /* Normal device is active */
98782 @@ -81,7 +81,7 @@
98783  #define ASYNC_SPD_WARP         (ASYNC_SPD_HI|ASYNC_SPD_SHI)
98784  #define ASYNC_SPD_MASK         (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI)
98786 -#ifndef _KERNEL_
98787 +#ifndef __KERNEL__
98788  /* These flags are no longer used (and were always masked from userspace) */
98789  #define ASYNC_INITIALIZED      (1U << ASYNCB_INITIALIZED)
98790  #define ASYNC_NORMAL_ACTIVE    (1U << ASYNCB_NORMAL_ACTIVE)
98791 diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h
98792 index d854cb19c42c..bfdae12cdacf 100644
98793 --- a/include/uapi/linux/usb/video.h
98794 +++ b/include/uapi/linux/usb/video.h
98795 @@ -302,9 +302,10 @@ struct uvc_processing_unit_descriptor {
98796         __u8   bControlSize;
98797         __u8   bmControls[2];
98798         __u8   iProcessing;
98799 +       __u8   bmVideoStandards;
98800  } __attribute__((__packed__));
98802 -#define UVC_DT_PROCESSING_UNIT_SIZE(n)                 (9+(n))
98803 +#define UVC_DT_PROCESSING_UNIT_SIZE(n)                 (10+(n))
98805  /* 3.7.2.6. Extension Unit Descriptor */
98806  struct uvc_extension_unit_descriptor {
98807 diff --git a/init/Kconfig b/init/Kconfig
98808 index 5f5c776ef192..b8054b654d61 100644
98809 --- a/init/Kconfig
98810 +++ b/init/Kconfig
98811 @@ -1220,6 +1220,18 @@ config SCHED_AUTOGROUP
98812           desktop applications.  Task group autogeneration is currently based
98813           upon task session.
98815 +config SCHED_AUTOGROUP_DEFAULT_ENABLED
98816 +       bool "Enable automatic process group scheduling feature"
98817 +       default y
98818 +       depends on SCHED_AUTOGROUP
98819 +       help
98820 +         If set, automatic process group scheduling will be enabled per
98821 +         default but can be disabled through passing autogroup=0 on the
98822 +         kernel commandline during boot or a value of 0 via the file
98823 +         proc/sys/kernel/sched_autogroup_enabled.
98825 +         If unsure say Y.
98827  config SYSFS_DEPRECATED
98828         bool "Enable deprecated sysfs features to support old userspace tools"
98829         depends on SYSFS
98830 @@ -1316,7 +1328,6 @@ config CC_OPTIMIZE_FOR_PERFORMANCE
98832  config CC_OPTIMIZE_FOR_PERFORMANCE_O3
98833         bool "Optimize more for performance (-O3)"
98834 -       depends on ARC
98835         help
98836           Choosing this option will pass "-O3" to your compiler to optimize
98837           the kernel yet more for performance.
98838 @@ -1537,6 +1548,13 @@ config FUTEX
98839           support for "fast userspace mutexes".  The resulting kernel may not
98840           run glibc-based applications correctly.
98842 +config FUTEX2
98843 +       bool "Enable futex2 support" if EXPERT
98844 +       depends on FUTEX
98845 +       default y
98846 +       help
98847 +         Support for futex2 interface.
98849  config FUTEX_PI
98850         bool
98851         depends on FUTEX && RT_MUTEXES
98852 @@ -2217,8 +2235,8 @@ config MODULE_COMPRESS
98853         bool "Compress modules on installation"
98854         help
98856 -         Compresses kernel modules when 'make modules_install' is run; gzip or
98857 -         xz depending on "Compression algorithm" below.
98858 +         Compresses kernel modules when 'make modules_install' is run; gzip,
98859 +         xz, or zstd depending on "Compression algorithm" below.
98861           module-init-tools MAY support gzip, and kmod MAY support gzip and xz.
98863 @@ -2240,7 +2258,7 @@ choice
98864           This determines which sort of compression will be used during
98865           'make modules_install'.
98867 -         GZIP (default) and XZ are supported.
98868 +         GZIP (default), XZ, and ZSTD are supported.
98870  config MODULE_COMPRESS_GZIP
98871         bool "GZIP"
98872 @@ -2248,6 +2266,9 @@ config MODULE_COMPRESS_GZIP
98873  config MODULE_COMPRESS_XZ
98874         bool "XZ"
98876 +config MODULE_COMPRESS_ZSTD
98877 +       bool "ZSTD"
98879  endchoice
98881  config MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
98882 diff --git a/init/init_task.c b/init/init_task.c
98883 index 3711cdaafed2..8b08c2e19cbb 100644
98884 --- a/init/init_task.c
98885 +++ b/init/init_task.c
98886 @@ -210,7 +210,7 @@ struct task_struct init_task
98887  #ifdef CONFIG_SECURITY
98888         .security       = NULL,
98889  #endif
98890 -#ifdef CONFIG_SECCOMP
98891 +#ifdef CONFIG_SECCOMP_FILTER
98892         .seccomp        = { .filter_count = ATOMIC_INIT(0) },
98893  #endif
98894  };
98895 diff --git a/ipc/namespace.c b/ipc/namespace.c
98896 index 7bd0766ddc3b..2bb05b2dacd1 100644
98897 --- a/ipc/namespace.c
98898 +++ b/ipc/namespace.c
98899 @@ -172,6 +172,23 @@ void put_ipc_ns(struct ipc_namespace *ns)
98900                         schedule_work(&free_ipc_work);
98901         }
98903 +EXPORT_SYMBOL(put_ipc_ns);
98905 +struct ipc_namespace *get_ipc_ns_exported(struct ipc_namespace *ns)
98907 +       return get_ipc_ns(ns);
98909 +EXPORT_SYMBOL(get_ipc_ns_exported);
98911 +struct ipc_namespace *show_init_ipc_ns(void)
98913 +#if defined(CONFIG_IPC_NS)
98914 +       return &init_ipc_ns;
98915 +#else
98916 +       return NULL;
98917 +#endif
98919 +EXPORT_SYMBOL(show_init_ipc_ns);
98921  static inline struct ipc_namespace *to_ipc_ns(struct ns_common *ns)
98923 diff --git a/kernel/.gitignore b/kernel/.gitignore
98924 index 78701ea37c97..5518835ac35c 100644
98925 --- a/kernel/.gitignore
98926 +++ b/kernel/.gitignore
98927 @@ -1,4 +1,5 @@
98928  # SPDX-License-Identifier: GPL-2.0-only
98929 +/config_data
98930  kheaders.md5
98931  timeconst.h
98932  hz.bc
98933 diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
98934 index 38ef6d06888e..b4a1995149d0 100644
98935 --- a/kernel/Kconfig.hz
98936 +++ b/kernel/Kconfig.hz
98937 @@ -5,7 +5,7 @@
98939  choice
98940         prompt "Timer frequency"
98941 -       default HZ_250
98942 +       default HZ_500
98943         help
98944          Allows the configuration of the timer frequency. It is customary
98945          to have the timer interrupt run at 1000 Hz but 100 Hz may be more
98946 @@ -40,6 +40,13 @@ choice
98947          on SMP and NUMA systems and exactly dividing by both PAL and
98948          NTSC frame rates for video and multimedia work.
98950 +       config HZ_500
98951 +               bool "500 HZ"
98952 +       help
98953 +        500 Hz is a balanced timer frequency. Provides fast interactivity
98954 +        on desktops with great smoothness without increasing CPU power
98955 +        consumption and sacrificing the battery life on laptops.
98957         config HZ_1000
98958                 bool "1000 HZ"
98959         help
98960 @@ -53,6 +60,7 @@ config HZ
98961         default 100 if HZ_100
98962         default 250 if HZ_250
98963         default 300 if HZ_300
98964 +       default 500 if HZ_500
98965         default 1000 if HZ_1000
98967  config SCHED_HRTICK
98968 diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
98969 index 416017301660..293725c44cbb 100644
98970 --- a/kernel/Kconfig.preempt
98971 +++ b/kernel/Kconfig.preempt
98972 @@ -2,7 +2,7 @@
98974  choice
98975         prompt "Preemption Model"
98976 -       default PREEMPT_NONE
98977 +       default PREEMPT
98979  config PREEMPT_NONE
98980         bool "No Forced Preemption (Server)"
98981 diff --git a/kernel/Makefile b/kernel/Makefile
98982 index 320f1f3941b7..caf7fca27b62 100644
98983 --- a/kernel/Makefile
98984 +++ b/kernel/Makefile
98985 @@ -57,6 +57,7 @@ obj-$(CONFIG_PROFILING) += profile.o
98986  obj-$(CONFIG_STACKTRACE) += stacktrace.o
98987  obj-y += time/
98988  obj-$(CONFIG_FUTEX) += futex.o
98989 +obj-$(CONFIG_FUTEX2) += futex2.o
98990  obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
98991  obj-$(CONFIG_SMP) += smp.o
98992  ifneq ($(CONFIG_SMP),y)
98993 @@ -138,10 +139,15 @@ obj-$(CONFIG_SCF_TORTURE_TEST) += scftorture.o
98995  $(obj)/configs.o: $(obj)/config_data.gz
98997 -targets += config_data.gz
98998 -$(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
98999 +targets += config_data config_data.gz
99000 +$(obj)/config_data.gz: $(obj)/config_data FORCE
99001         $(call if_changed,gzip)
99003 +filechk_cat = cat $<
99005 +$(obj)/config_data: $(KCONFIG_CONFIG) FORCE
99006 +       $(call filechk,cat)
99008  $(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz
99010  quiet_cmd_genikh = CHK     $(obj)/kheaders_data.tar.xz
99011 diff --git a/kernel/bounds.c b/kernel/bounds.c
99012 index 9795d75b09b2..a8cbf2d0b11a 100644
99013 --- a/kernel/bounds.c
99014 +++ b/kernel/bounds.c
99015 @@ -22,6 +22,12 @@ int main(void)
99016         DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
99017  #endif
99018         DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
99019 +#ifdef CONFIG_LRU_GEN
99020 +       /* bits needed to represent internal values stored in page->flags */
99021 +       DEFINE(LRU_GEN_WIDTH, order_base_2(CONFIG_NR_LRU_GENS + 1));
99022 +       /* bits needed to represent normalized values for external uses */
99023 +       DEFINE(LRU_GEN_SHIFT, order_base_2(CONFIG_NR_LRU_GENS));
99024 +#endif
99025         /* End of constants */
99027         return 0;
99028 diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
99029 index f25b719ac786..84b3b35fc0d0 100644
99030 --- a/kernel/bpf/ringbuf.c
99031 +++ b/kernel/bpf/ringbuf.c
99032 @@ -221,25 +221,20 @@ static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
99033         return -ENOTSUPP;
99036 -static size_t bpf_ringbuf_mmap_page_cnt(const struct bpf_ringbuf *rb)
99038 -       size_t data_pages = (rb->mask + 1) >> PAGE_SHIFT;
99040 -       /* consumer page + producer page + 2 x data pages */
99041 -       return RINGBUF_POS_PAGES + 2 * data_pages;
99044  static int ringbuf_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
99046         struct bpf_ringbuf_map *rb_map;
99047 -       size_t mmap_sz;
99049         rb_map = container_of(map, struct bpf_ringbuf_map, map);
99050 -       mmap_sz = bpf_ringbuf_mmap_page_cnt(rb_map->rb) << PAGE_SHIFT;
99052 -       if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > mmap_sz)
99053 -               return -EINVAL;
99055 +       if (vma->vm_flags & VM_WRITE) {
99056 +               /* allow writable mapping for the consumer_pos only */
99057 +               if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
99058 +                       return -EPERM;
99059 +       } else {
99060 +               vma->vm_flags &= ~VM_MAYWRITE;
99061 +       }
99062 +       /* remap_vmalloc_range() checks size and offset constraints */
99063         return remap_vmalloc_range(vma, rb_map->rb,
99064                                    vma->vm_pgoff + RINGBUF_PGOFF);
99066 @@ -315,6 +310,9 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
99067                 return NULL;
99069         len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
99070 +       if (len > rb->mask + 1)
99071 +               return NULL;
99073         cons_pos = smp_load_acquire(&rb->consumer_pos);
99075         if (in_nmi()) {
99076 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
99077 index 0399ac092b36..7fa6fc6bedf1 100644
99078 --- a/kernel/bpf/verifier.c
99079 +++ b/kernel/bpf/verifier.c
99080 @@ -1362,9 +1362,7 @@ static bool __reg64_bound_s32(s64 a)
99082  static bool __reg64_bound_u32(u64 a)
99084 -       if (a > U32_MIN && a < U32_MAX)
99085 -               return true;
99086 -       return false;
99087 +       return a > U32_MIN && a < U32_MAX;
99090  static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
99091 @@ -1375,10 +1373,10 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
99092                 reg->s32_min_value = (s32)reg->smin_value;
99093                 reg->s32_max_value = (s32)reg->smax_value;
99094         }
99095 -       if (__reg64_bound_u32(reg->umin_value))
99096 +       if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
99097                 reg->u32_min_value = (u32)reg->umin_value;
99098 -       if (__reg64_bound_u32(reg->umax_value))
99099                 reg->u32_max_value = (u32)reg->umax_value;
99100 +       }
99102         /* Intersecting with the old var_off might have improved our bounds
99103          * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
99104 @@ -5952,6 +5950,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
99106         struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
99107         struct bpf_verifier_state *vstate = env->cur_state;
99108 +       bool off_is_imm = tnum_is_const(off_reg->var_off);
99109         bool off_is_neg = off_reg->smin_value < 0;
99110         bool ptr_is_dst_reg = ptr_reg == dst_reg;
99111         u8 opcode = BPF_OP(insn->code);
99112 @@ -5982,6 +5981,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
99113                 alu_limit = abs(tmp_aux->alu_limit - alu_limit);
99114         } else {
99115                 alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
99116 +               alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
99117                 alu_state |= ptr_is_dst_reg ?
99118                              BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
99119         }
99120 @@ -6538,11 +6538,10 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
99121         s32 smin_val = src_reg->s32_min_value;
99122         u32 umax_val = src_reg->u32_max_value;
99124 -       /* Assuming scalar64_min_max_and will be called so its safe
99125 -        * to skip updating register for known 32-bit case.
99126 -        */
99127 -       if (src_known && dst_known)
99128 +       if (src_known && dst_known) {
99129 +               __mark_reg32_known(dst_reg, var32_off.value);
99130                 return;
99131 +       }
99133         /* We get our minimum from the var_off, since that's inherently
99134          * bitwise.  Our maximum is the minimum of the operands' maxima.
99135 @@ -6562,7 +6561,6 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
99136                 dst_reg->s32_min_value = dst_reg->u32_min_value;
99137                 dst_reg->s32_max_value = dst_reg->u32_max_value;
99138         }
99142  static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
99143 @@ -6609,11 +6607,10 @@ static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
99144         s32 smin_val = src_reg->s32_min_value;
99145         u32 umin_val = src_reg->u32_min_value;
99147 -       /* Assuming scalar64_min_max_or will be called so it is safe
99148 -        * to skip updating register for known case.
99149 -        */
99150 -       if (src_known && dst_known)
99151 +       if (src_known && dst_known) {
99152 +               __mark_reg32_known(dst_reg, var32_off.value);
99153                 return;
99154 +       }
99156         /* We get our maximum from the var_off, and our minimum is the
99157          * maximum of the operands' minima
99158 @@ -6678,11 +6675,10 @@ static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
99159         struct tnum var32_off = tnum_subreg(dst_reg->var_off);
99160         s32 smin_val = src_reg->s32_min_value;
99162 -       /* Assuming scalar64_min_max_xor will be called so it is safe
99163 -        * to skip updating register for known case.
99164 -        */
99165 -       if (src_known && dst_known)
99166 +       if (src_known && dst_known) {
99167 +               __mark_reg32_known(dst_reg, var32_off.value);
99168                 return;
99169 +       }
99171         /* We get both minimum and maximum from the var32_off. */
99172         dst_reg->u32_min_value = var32_off.value;
99173 @@ -11740,7 +11736,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
99174                         const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
99175                         struct bpf_insn insn_buf[16];
99176                         struct bpf_insn *patch = &insn_buf[0];
99177 -                       bool issrc, isneg;
99178 +                       bool issrc, isneg, isimm;
99179                         u32 off_reg;
99181                         aux = &env->insn_aux_data[i + delta];
99182 @@ -11751,28 +11747,29 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
99183                         isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
99184                         issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
99185                                 BPF_ALU_SANITIZE_SRC;
99186 +                       isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
99188                         off_reg = issrc ? insn->src_reg : insn->dst_reg;
99189 -                       if (isneg)
99190 -                               *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
99191 -                       *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
99192 -                       *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
99193 -                       *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
99194 -                       *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
99195 -                       *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
99196 -                       if (issrc) {
99197 -                               *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
99198 -                                                        off_reg);
99199 -                               insn->src_reg = BPF_REG_AX;
99200 +                       if (isimm) {
99201 +                               *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
99202                         } else {
99203 -                               *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
99204 -                                                        BPF_REG_AX);
99205 +                               if (isneg)
99206 +                                       *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
99207 +                               *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
99208 +                               *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
99209 +                               *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
99210 +                               *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
99211 +                               *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
99212 +                               *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
99213                         }
99214 +                       if (!issrc)
99215 +                               *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
99216 +                       insn->src_reg = BPF_REG_AX;
99217                         if (isneg)
99218                                 insn->code = insn->code == code_add ?
99219                                              code_sub : code_add;
99220                         *patch++ = *insn;
99221 -                       if (issrc && isneg)
99222 +                       if (issrc && isneg && !isimm)
99223                                 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
99224                         cnt = patch - insn_buf;
99226 diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
99227 index c10e855a03bc..fe4c01c14ab2 100644
99228 --- a/kernel/dma/swiotlb.c
99229 +++ b/kernel/dma/swiotlb.c
99230 @@ -608,7 +608,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
99231                 enum dma_data_direction dir, unsigned long attrs)
99233         unsigned int offset = swiotlb_align_offset(dev, orig_addr);
99234 -       unsigned int index, i;
99235 +       unsigned int i;
99236 +       int index;
99237         phys_addr_t tlb_addr;
99239         if (no_iotlb_memory)
99240 diff --git a/kernel/events/core.c b/kernel/events/core.c
99241 index 03db40f6cba9..c24ea952e7ae 100644
99242 --- a/kernel/events/core.c
99243 +++ b/kernel/events/core.c
99244 @@ -2204,6 +2204,26 @@ static void perf_group_detach(struct perf_event *event)
99245         perf_event__header_size(leader);
99248 +static void sync_child_event(struct perf_event *child_event);
99250 +static void perf_child_detach(struct perf_event *event)
99252 +       struct perf_event *parent_event = event->parent;
99254 +       if (!(event->attach_state & PERF_ATTACH_CHILD))
99255 +               return;
99257 +       event->attach_state &= ~PERF_ATTACH_CHILD;
99259 +       if (WARN_ON_ONCE(!parent_event))
99260 +               return;
99262 +       lockdep_assert_held(&parent_event->child_mutex);
99264 +       sync_child_event(event);
99265 +       list_del_init(&event->child_list);
99268  static bool is_orphaned_event(struct perf_event *event)
99270         return event->state == PERF_EVENT_STATE_DEAD;
99271 @@ -2311,6 +2331,7 @@ group_sched_out(struct perf_event *group_event,
99274  #define DETACH_GROUP   0x01UL
99275 +#define DETACH_CHILD   0x02UL
99277  /*
99278   * Cross CPU call to remove a performance event
99279 @@ -2334,6 +2355,8 @@ __perf_remove_from_context(struct perf_event *event,
99280         event_sched_out(event, cpuctx, ctx);
99281         if (flags & DETACH_GROUP)
99282                 perf_group_detach(event);
99283 +       if (flags & DETACH_CHILD)
99284 +               perf_child_detach(event);
99285         list_del_event(event, ctx);
99287         if (!ctx->nr_events && ctx->is_active) {
99288 @@ -2362,25 +2385,21 @@ static void perf_remove_from_context(struct perf_event *event, unsigned long fla
99290         lockdep_assert_held(&ctx->mutex);
99292 -       event_function_call(event, __perf_remove_from_context, (void *)flags);
99294         /*
99295 -        * The above event_function_call() can NO-OP when it hits
99296 -        * TASK_TOMBSTONE. In that case we must already have been detached
99297 -        * from the context (by perf_event_exit_event()) but the grouping
99298 -        * might still be in-tact.
99299 +        * Because of perf_event_exit_task(), perf_remove_from_context() ought
99300 +        * to work in the face of TASK_TOMBSTONE, unlike every other
99301 +        * event_function_call() user.
99302          */
99303 -       WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
99304 -       if ((flags & DETACH_GROUP) &&
99305 -           (event->attach_state & PERF_ATTACH_GROUP)) {
99306 -               /*
99307 -                * Since in that case we cannot possibly be scheduled, simply
99308 -                * detach now.
99309 -                */
99310 -               raw_spin_lock_irq(&ctx->lock);
99311 -               perf_group_detach(event);
99312 +       raw_spin_lock_irq(&ctx->lock);
99313 +       if (!ctx->is_active) {
99314 +               __perf_remove_from_context(event, __get_cpu_context(ctx),
99315 +                                          ctx, (void *)flags);
99316                 raw_spin_unlock_irq(&ctx->lock);
99317 +               return;
99318         }
99319 +       raw_spin_unlock_irq(&ctx->lock);
99321 +       event_function_call(event, __perf_remove_from_context, (void *)flags);
99324  /*
99325 @@ -11829,12 +11848,12 @@ SYSCALL_DEFINE5(perf_event_open,
99326                         return err;
99327         }
99329 -       err = security_locked_down(LOCKDOWN_PERF);
99330 -       if (err && (attr.sample_type & PERF_SAMPLE_REGS_INTR))
99331 -               /* REGS_INTR can leak data, lockdown must prevent this */
99332 -               return err;
99334 -       err = 0;
99335 +       /* REGS_INTR can leak data, lockdown must prevent this */
99336 +       if (attr.sample_type & PERF_SAMPLE_REGS_INTR) {
99337 +               err = security_locked_down(LOCKDOWN_PERF);
99338 +               if (err)
99339 +                       return err;
99340 +       }
99342         /*
99343          * In cgroup mode, the pid argument is used to pass the fd
99344 @@ -12373,14 +12392,17 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
99346  EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
99348 -static void sync_child_event(struct perf_event *child_event,
99349 -                              struct task_struct *child)
99350 +static void sync_child_event(struct perf_event *child_event)
99352         struct perf_event *parent_event = child_event->parent;
99353         u64 child_val;
99355 -       if (child_event->attr.inherit_stat)
99356 -               perf_event_read_event(child_event, child);
99357 +       if (child_event->attr.inherit_stat) {
99358 +               struct task_struct *task = child_event->ctx->task;
99360 +               if (task && task != TASK_TOMBSTONE)
99361 +                       perf_event_read_event(child_event, task);
99362 +       }
99364         child_val = perf_event_count(child_event);
99366 @@ -12395,60 +12417,53 @@ static void sync_child_event(struct perf_event *child_event,
99369  static void
99370 -perf_event_exit_event(struct perf_event *child_event,
99371 -                     struct perf_event_context *child_ctx,
99372 -                     struct task_struct *child)
99373 +perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
99375 -       struct perf_event *parent_event = child_event->parent;
99376 +       struct perf_event *parent_event = event->parent;
99377 +       unsigned long detach_flags = 0;
99379 -       /*
99380 -        * Do not destroy the 'original' grouping; because of the context
99381 -        * switch optimization the original events could've ended up in a
99382 -        * random child task.
99383 -        *
99384 -        * If we were to destroy the original group, all group related
99385 -        * operations would cease to function properly after this random
99386 -        * child dies.
99387 -        *
99388 -        * Do destroy all inherited groups, we don't care about those
99389 -        * and being thorough is better.
99390 -        */
99391 -       raw_spin_lock_irq(&child_ctx->lock);
99392 -       WARN_ON_ONCE(child_ctx->is_active);
99393 +       if (parent_event) {
99394 +               /*
99395 +                * Do not destroy the 'original' grouping; because of the
99396 +                * context switch optimization the original events could've
99397 +                * ended up in a random child task.
99398 +                *
99399 +                * If we were to destroy the original group, all group related
99400 +                * operations would cease to function properly after this
99401 +                * random child dies.
99402 +                *
99403 +                * Do destroy all inherited groups, we don't care about those
99404 +                * and being thorough is better.
99405 +                */
99406 +               detach_flags = DETACH_GROUP | DETACH_CHILD;
99407 +               mutex_lock(&parent_event->child_mutex);
99408 +       }
99410 -       if (parent_event)
99411 -               perf_group_detach(child_event);
99412 -       list_del_event(child_event, child_ctx);
99413 -       perf_event_set_state(child_event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */
99414 -       raw_spin_unlock_irq(&child_ctx->lock);
99415 +       perf_remove_from_context(event, detach_flags);
99417 +       raw_spin_lock_irq(&ctx->lock);
99418 +       if (event->state > PERF_EVENT_STATE_EXIT)
99419 +               perf_event_set_state(event, PERF_EVENT_STATE_EXIT);
99420 +       raw_spin_unlock_irq(&ctx->lock);
99422         /*
99423 -        * Parent events are governed by their filedesc, retain them.
99424 +        * Child events can be freed.
99425          */
99426 -       if (!parent_event) {
99427 -               perf_event_wakeup(child_event);
99428 +       if (parent_event) {
99429 +               mutex_unlock(&parent_event->child_mutex);
99430 +               /*
99431 +                * Kick perf_poll() for is_event_hup();
99432 +                */
99433 +               perf_event_wakeup(parent_event);
99434 +               free_event(event);
99435 +               put_event(parent_event);
99436                 return;
99437         }
99438 -       /*
99439 -        * Child events can be cleaned up.
99440 -        */
99442 -       sync_child_event(child_event, child);
99444         /*
99445 -        * Remove this event from the parent's list
99446 -        */
99447 -       WARN_ON_ONCE(parent_event->ctx->parent_ctx);
99448 -       mutex_lock(&parent_event->child_mutex);
99449 -       list_del_init(&child_event->child_list);
99450 -       mutex_unlock(&parent_event->child_mutex);
99452 -       /*
99453 -        * Kick perf_poll() for is_event_hup().
99454 +        * Parent events are governed by their filedesc, retain them.
99455          */
99456 -       perf_event_wakeup(parent_event);
99457 -       free_event(child_event);
99458 -       put_event(parent_event);
99459 +       perf_event_wakeup(event);
99462  static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
99463 @@ -12505,7 +12520,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
99464         perf_event_task(child, child_ctx, 0);
99466         list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
99467 -               perf_event_exit_event(child_event, child_ctx, child);
99468 +               perf_event_exit_event(child_event, child_ctx);
99470         mutex_unlock(&child_ctx->mutex);
99472 @@ -12765,6 +12780,7 @@ inherit_event(struct perf_event *parent_event,
99473          */
99474         raw_spin_lock_irqsave(&child_ctx->lock, flags);
99475         add_event_to_ctx(child_event, child_ctx);
99476 +       child_event->attach_state |= PERF_ATTACH_CHILD;
99477         raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
99479         /*
99480 diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
99481 index 6addc9780319..4e93e5602723 100644
99482 --- a/kernel/events/uprobes.c
99483 +++ b/kernel/events/uprobes.c
99484 @@ -184,7 +184,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
99485         if (new_page) {
99486                 get_page(new_page);
99487                 page_add_new_anon_rmap(new_page, vma, addr, false);
99488 -               lru_cache_add_inactive_or_unevictable(new_page, vma);
99489 +               lru_cache_add_page_vma(new_page, vma, false);
99490         } else
99491                 /* no new page, just dec_mm_counter for old_page */
99492                 dec_mm_counter(mm, MM_ANONPAGES);
99493 diff --git a/kernel/exit.c b/kernel/exit.c
99494 index 04029e35e69a..e4292717ce37 100644
99495 --- a/kernel/exit.c
99496 +++ b/kernel/exit.c
99497 @@ -422,6 +422,7 @@ void mm_update_next_owner(struct mm_struct *mm)
99498                 goto retry;
99499         }
99500         WRITE_ONCE(mm->owner, c);
99501 +       lru_gen_migrate_mm(mm);
99502         task_unlock(c);
99503         put_task_struct(c);
99505 diff --git a/kernel/fork.c b/kernel/fork.c
99506 index 426cd0c51f9e..c54400f24fb2 100644
99507 --- a/kernel/fork.c
99508 +++ b/kernel/fork.c
99509 @@ -107,6 +107,11 @@
99511  #define CREATE_TRACE_POINTS
99512  #include <trace/events/task.h>
99513 +#ifdef CONFIG_USER_NS
99514 +extern int unprivileged_userns_clone;
99515 +#else
99516 +#define unprivileged_userns_clone 0
99517 +#endif
99519  /*
99520   * Minimum number of threads to boot the kernel
99521 @@ -665,6 +670,7 @@ static void check_mm(struct mm_struct *mm)
99522  #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
99523         VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
99524  #endif
99525 +       VM_BUG_ON_MM(lru_gen_mm_is_active(mm), mm);
99528  #define allocate_mm()  (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
99529 @@ -1055,6 +1061,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
99530                 goto fail_nocontext;
99532         mm->user_ns = get_user_ns(user_ns);
99533 +       lru_gen_init_mm(mm);
99534         return mm;
99536  fail_nocontext:
99537 @@ -1097,6 +1104,7 @@ static inline void __mmput(struct mm_struct *mm)
99538         }
99539         if (mm->binfmt)
99540                 module_put(mm->binfmt->module);
99541 +       lru_gen_del_mm(mm);
99542         mmdrop(mm);
99545 @@ -1128,6 +1136,7 @@ void mmput_async(struct mm_struct *mm)
99546                 schedule_work(&mm->async_put_work);
99547         }
99549 +EXPORT_SYMBOL(mmput_async);
99550  #endif
99552  /**
99553 @@ -1316,6 +1325,8 @@ static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
99554                         put_user(0, tsk->clear_child_tid);
99555                         do_futex(tsk->clear_child_tid, FUTEX_WAKE,
99556                                         1, NULL, NULL, 0, 0);
99557 +                       ksys_futex_wake(tsk->clear_child_tid, 1,
99558 +                                       FUTEX_32 | FUTEX_SHARED_FLAG);
99559                 }
99560                 tsk->clear_child_tid = NULL;
99561         }
99562 @@ -1872,6 +1883,10 @@ static __latent_entropy struct task_struct *copy_process(
99563         if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
99564                 return ERR_PTR(-EINVAL);
99566 +       if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone)
99567 +               if (!capable(CAP_SYS_ADMIN))
99568 +                       return ERR_PTR(-EPERM);
99570         /*
99571          * Thread groups must share signals as well, and detached threads
99572          * can only be started up within the thread group.
99573 @@ -2521,6 +2536,13 @@ pid_t kernel_clone(struct kernel_clone_args *args)
99574                 get_task_struct(p);
99575         }
99577 +       if (IS_ENABLED(CONFIG_LRU_GEN) && !(clone_flags & CLONE_VM)) {
99578 +               /* lock the task to synchronize with memcg migration */
99579 +               task_lock(p);
99580 +               lru_gen_add_mm(p->mm);
99581 +               task_unlock(p);
99582 +       }
99584         wake_up_new_task(p);
99586         /* forking complete and child started to run, tell ptracer */
99587 @@ -2971,6 +2993,12 @@ int ksys_unshare(unsigned long unshare_flags)
99588         if (unshare_flags & CLONE_NEWNS)
99589                 unshare_flags |= CLONE_FS;
99591 +       if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) {
99592 +               err = -EPERM;
99593 +               if (!capable(CAP_SYS_ADMIN))
99594 +                       goto bad_unshare_out;
99595 +       }
99597         err = check_unshare_flags(unshare_flags);
99598         if (err)
99599                 goto bad_unshare_out;
99600 diff --git a/kernel/futex.c b/kernel/futex.c
99601 index 00febd6dea9c..f923d2da4b40 100644
99602 --- a/kernel/futex.c
99603 +++ b/kernel/futex.c
99604 @@ -198,6 +198,8 @@ struct futex_pi_state {
99605   * @rt_waiter:         rt_waiter storage for use with requeue_pi
99606   * @requeue_pi_key:    the requeue_pi target futex key
99607   * @bitset:            bitset for the optional bitmasked wakeup
99608 + * @uaddr:             userspace address of futex
99609 + * @uval:              expected futex's value
99610   *
99611   * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
99612   * we can wake only the relevant ones (hashed queues may be shared).
99613 @@ -220,6 +222,8 @@ struct futex_q {
99614         struct rt_mutex_waiter *rt_waiter;
99615         union futex_key *requeue_pi_key;
99616         u32 bitset;
99617 +       u32 __user *uaddr;
99618 +       u32 uval;
99619  } __randomize_layout;
99621  static const struct futex_q futex_q_init = {
99622 @@ -2313,6 +2317,29 @@ static int unqueue_me(struct futex_q *q)
99623         return ret;
99626 +/**
99627 + * unqueue_multiple() - Remove several futexes from their futex_hash_bucket
99628 + * @q: The list of futexes to unqueue
99629 + * @count: Number of futexes in the list
99630 + *
99631 + * Helper to unqueue a list of futexes. This can't fail.
99632 + *
99633 + * Return:
99634 + *  - >=0 - Index of the last futex that was awoken;
99635 + *  - -1  - If no futex was awoken
99636 + */
99637 +static int unqueue_multiple(struct futex_q *q, int count)
99639 +       int ret = -1;
99640 +       int i;
99642 +       for (i = 0; i < count; i++) {
99643 +               if (!unqueue_me(&q[i]))
99644 +                       ret = i;
99645 +       }
99646 +       return ret;
99649  /*
99650   * PI futexes can not be requeued and must remove themself from the
99651   * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
99652 @@ -2680,6 +2707,205 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
99653         return ret;
99656 +/**
99657 + * futex_wait_multiple_setup() - Prepare to wait and enqueue multiple futexes
99658 + * @qs:                The corresponding futex list
99659 + * @count:     The size of the lists
99660 + * @flags:     Futex flags (FLAGS_SHARED, etc.)
99661 + * @awaken:    Index of the last awoken futex
99662 + *
99663 + * Prepare multiple futexes in a single step and enqueue them. This may fail if
99664 + * the futex list is invalid or if any futex was already awoken. On success the
99665 + * task is ready to interruptible sleep.
99666 + *
99667 + * Return:
99668 + *  -  1 - One of the futexes was awaken by another thread
99669 + *  -  0 - Success
99670 + *  - <0 - -EFAULT, -EWOULDBLOCK or -EINVAL
99671 + */
99672 +static int futex_wait_multiple_setup(struct futex_q *qs, int count,
99673 +                                    unsigned int flags, int *awaken)
99675 +       struct futex_hash_bucket *hb;
99676 +       int ret, i;
99677 +       u32 uval;
99679 +       /*
99680 +        * Enqueuing multiple futexes is tricky, because we need to
99681 +        * enqueue each futex in the list before dealing with the next
99682 +        * one to avoid deadlocking on the hash bucket.  But, before
99683 +        * enqueuing, we need to make sure that current->state is
99684 +        * TASK_INTERRUPTIBLE, so we don't absorb any awake events, which
99685 +        * cannot be done before the get_futex_key of the next key,
99686 +        * because it calls get_user_pages, which can sleep.  Thus, we
99687 +        * fetch the list of futexes keys in two steps, by first pinning
99688 +        * all the memory keys in the futex key, and only then we read
99689 +        * each key and queue the corresponding futex.
99690 +        */
99691 +retry:
99692 +       for (i = 0; i < count; i++) {
99693 +               qs[i].key = FUTEX_KEY_INIT;
99694 +               ret = get_futex_key(qs[i].uaddr, flags & FLAGS_SHARED,
99695 +                                   &qs[i].key, FUTEX_READ);
99696 +               if (unlikely(ret)) {
99697 +                       return ret;
99698 +               }
99699 +       }
99701 +       set_current_state(TASK_INTERRUPTIBLE);
99703 +       for (i = 0; i < count; i++) {
99704 +               struct futex_q *q = &qs[i];
99706 +               hb = queue_lock(q);
99708 +               ret = get_futex_value_locked(&uval, q->uaddr);
99709 +               if (ret) {
99710 +                       /*
99711 +                        * We need to try to handle the fault, which
99712 +                        * cannot be done without sleep, so we need to
99713 +                        * undo all the work already done, to make sure
99714 +                        * we don't miss any wake ups.  Therefore, clean
99715 +                        * up, handle the fault and retry from the
99716 +                        * beginning.
99717 +                        */
99718 +                       queue_unlock(hb);
99720 +                       /*
99721 +                        * Keys 0..(i-1) are implicitly put
99722 +                        * on unqueue_multiple.
99723 +                        */
99724 +                       *awaken = unqueue_multiple(qs, i);
99726 +                       __set_current_state(TASK_RUNNING);
99728 +                       /*
99729 +                        * On a real fault, prioritize the error even if
99730 +                        * some other futex was awoken.  Userspace gave
99731 +                        * us a bad address, -EFAULT them.
99732 +                        */
99733 +                       ret = get_user(uval, q->uaddr);
99734 +                       if (ret)
99735 +                               return ret;
99737 +                       /*
99738 +                        * Even if the page fault was handled, If
99739 +                        * something was already awaken, we can safely
99740 +                        * give up and succeed to give a hint for userspace to
99741 +                        * acquire the right futex faster.
99742 +                        */
99743 +                       if (*awaken >= 0)
99744 +                               return 1;
99746 +                       goto retry;
99747 +               }
99749 +               if (uval != q->uval) {
99750 +                       queue_unlock(hb);
99752 +                       /*
99753 +                        * If something was already awaken, we can
99754 +                        * safely ignore the error and succeed.
99755 +                        */
99756 +                       *awaken = unqueue_multiple(qs, i);
99757 +                       __set_current_state(TASK_RUNNING);
99758 +                       if (*awaken >= 0)
99759 +                               return 1;
99761 +                       return -EWOULDBLOCK;
99762 +               }
99764 +               /*
99765 +                * The bucket lock can't be held while dealing with the
99766 +                * next futex. Queue each futex at this moment so hb can
99767 +                * be unlocked.
99768 +                */
99769 +               queue_me(&qs[i], hb);
99770 +       }
99771 +       return 0;
99774 +/**
99775 + * futex_wait_multiple() - Prepare to wait on and enqueue several futexes
99776 + * @qs:                The list of futexes to wait on
99777 + * @op:                Operation code from futex's syscall
99778 + * @count:     The number of objects
99779 + * @abs_time:  Timeout before giving up and returning to userspace
99780 + *
99781 + * Entry point for the FUTEX_WAIT_MULTIPLE futex operation, this function
99782 + * sleeps on a group of futexes and returns on the first futex that
99783 + * triggered, or after the timeout has elapsed.
99784 + *
99785 + * Return:
99786 + *  - >=0 - Hint to the futex that was awoken
99787 + *  - <0  - On error
99788 + */
99789 +static int futex_wait_multiple(struct futex_q *qs, int op,
99790 +                              u32 count, ktime_t *abs_time)
99792 +       struct hrtimer_sleeper timeout, *to;
99793 +       int ret, flags = 0, hint = 0;
99794 +       unsigned int i;
99796 +       if (!(op & FUTEX_PRIVATE_FLAG))
99797 +               flags |= FLAGS_SHARED;
99799 +       if (op & FUTEX_CLOCK_REALTIME)
99800 +               flags |= FLAGS_CLOCKRT;
99802 +       to = futex_setup_timer(abs_time, &timeout, flags, 0);
99803 +       while (1) {
99804 +               ret = futex_wait_multiple_setup(qs, count, flags, &hint);
99805 +               if (ret) {
99806 +                       if (ret > 0) {
99807 +                               /* A futex was awaken during setup */
99808 +                               ret = hint;
99809 +                       }
99810 +                       break;
99811 +               }
99813 +               if (to)
99814 +                       hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
99816 +               /*
99817 +                * Avoid sleeping if another thread already tried to
99818 +                * wake us.
99819 +                */
99820 +               for (i = 0; i < count; i++) {
99821 +                       if (plist_node_empty(&qs[i].list))
99822 +                               break;
99823 +               }
99825 +               if (i == count && (!to || to->task))
99826 +                       freezable_schedule();
99828 +               ret = unqueue_multiple(qs, count);
99830 +               __set_current_state(TASK_RUNNING);
99832 +               if (ret >= 0)
99833 +                       break;
99834 +               if (to && !to->task) {
99835 +                       ret = -ETIMEDOUT;
99836 +                       break;
99837 +               } else if (signal_pending(current)) {
99838 +                       ret = -ERESTARTSYS;
99839 +                       break;
99840 +               }
99841 +               /*
99842 +                * The final case is a spurious wakeup, for
99843 +                * which just retry.
99844 +                */
99845 +       }
99847 +       if (to) {
99848 +               hrtimer_cancel(&to->timer);
99849 +               destroy_hrtimer_on_stack(&to->timer);
99850 +       }
99852 +       return ret;
99855  static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
99856                       ktime_t *abs_time, u32 bitset)
99858 @@ -3711,8 +3937,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
99860         if (op & FUTEX_CLOCK_REALTIME) {
99861                 flags |= FLAGS_CLOCKRT;
99862 -               if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
99863 -                   cmd != FUTEX_WAIT_REQUEUE_PI)
99864 +               if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
99865                         return -ENOSYS;
99866         }
99868 @@ -3759,6 +3984,43 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
99869         return -ENOSYS;
99872 +/**
99873 + * futex_read_wait_block - Read an array of futex_wait_block from userspace
99874 + * @uaddr:     Userspace address of the block
99875 + * @count:     Number of blocks to be read
99876 + *
99877 + * This function creates and allocate an array of futex_q (we zero it to
99878 + * initialize the fields) and then, for each futex_wait_block element from
99879 + * userspace, fill a futex_q element with proper values.
99880 + */
99881 +inline struct futex_q *futex_read_wait_block(u32 __user *uaddr, u32 count)
99883 +       unsigned int i;
99884 +       struct futex_q *qs;
99885 +       struct futex_wait_block fwb;
99886 +       struct futex_wait_block __user *entry =
99887 +               (struct futex_wait_block __user *)uaddr;
99889 +       if (!count || count > FUTEX_MULTIPLE_MAX_COUNT)
99890 +               return ERR_PTR(-EINVAL);
99892 +       qs = kcalloc(count, sizeof(*qs), GFP_KERNEL);
99893 +       if (!qs)
99894 +               return ERR_PTR(-ENOMEM);
99896 +       for (i = 0; i < count; i++) {
99897 +               if (copy_from_user(&fwb, &entry[i], sizeof(fwb))) {
99898 +                       kfree(qs);
99899 +                       return ERR_PTR(-EFAULT);
99900 +               }
99902 +               qs[i].uaddr = fwb.uaddr;
99903 +               qs[i].uval = fwb.val;
99904 +               qs[i].bitset = fwb.bitset;
99905 +       }
99907 +       return qs;
99910  SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
99911                 const struct __kernel_timespec __user *, utime,
99912 @@ -3771,7 +4033,8 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
99914         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
99915                       cmd == FUTEX_WAIT_BITSET ||
99916 -                     cmd == FUTEX_WAIT_REQUEUE_PI)) {
99917 +                     cmd == FUTEX_WAIT_REQUEUE_PI ||
99918 +                     cmd == FUTEX_WAIT_MULTIPLE)) {
99919                 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
99920                         return -EFAULT;
99921                 if (get_timespec64(&ts, utime))
99922 @@ -3780,9 +4043,9 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
99923                         return -EINVAL;
99925                 t = timespec64_to_ktime(ts);
99926 -               if (cmd == FUTEX_WAIT)
99927 +               if (cmd == FUTEX_WAIT || cmd == FUTEX_WAIT_MULTIPLE)
99928                         t = ktime_add_safe(ktime_get(), t);
99929 -               else if (!(op & FUTEX_CLOCK_REALTIME))
99930 +               else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
99931                         t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
99932                 tp = &t;
99933         }
99934 @@ -3794,6 +4057,25 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
99935             cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
99936                 val2 = (u32) (unsigned long) utime;
99938 +       if (cmd == FUTEX_WAIT_MULTIPLE) {
99939 +               int ret;
99940 +               struct futex_q *qs;
99942 +#ifdef CONFIG_X86_X32
99943 +               if (unlikely(in_x32_syscall()))
99944 +                       return -ENOSYS;
99945 +#endif
99946 +               qs = futex_read_wait_block(uaddr, val);
99948 +               if (IS_ERR(qs))
99949 +                       return PTR_ERR(qs);
99951 +               ret = futex_wait_multiple(qs, op, val, tp);
99952 +               kfree(qs);
99954 +               return ret;
99955 +       }
99957         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
99960 @@ -3956,6 +4238,58 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
99961  #endif /* CONFIG_COMPAT */
99963  #ifdef CONFIG_COMPAT_32BIT_TIME
99964 +/**
99965 + * struct compat_futex_wait_block - Block of futexes to be waited for
99966 + * @uaddr:     User address of the futex (compatible pointer)
99967 + * @val:       Futex value expected by userspace
99968 + * @bitset:    Bitset for the optional bitmasked wakeup
99969 + */
99970 +struct compat_futex_wait_block {
99971 +       compat_uptr_t   uaddr;
99972 +       __u32 pad;
99973 +       __u32 val;
99974 +       __u32 bitset;
99977 +/**
99978 + * compat_futex_read_wait_block - Read an array of futex_wait_block from
99979 + * userspace
99980 + * @uaddr:     Userspace address of the block
99981 + * @count:     Number of blocks to be read
99982 + *
99983 + * This function does the same as futex_read_wait_block(), except that it
99984 + * converts the pointer to the futex from the compat version to the regular one.
99985 + */
99986 +inline struct futex_q *compat_futex_read_wait_block(u32 __user *uaddr,
99987 +                                                   u32 count)
99989 +       unsigned int i;
99990 +       struct futex_q *qs;
99991 +       struct compat_futex_wait_block fwb;
99992 +       struct compat_futex_wait_block __user *entry =
99993 +               (struct compat_futex_wait_block __user *)uaddr;
99995 +       if (!count || count > FUTEX_MULTIPLE_MAX_COUNT)
99996 +               return ERR_PTR(-EINVAL);
99998 +       qs = kcalloc(count, sizeof(*qs), GFP_KERNEL);
99999 +       if (!qs)
100000 +               return ERR_PTR(-ENOMEM);
100002 +       for (i = 0; i < count; i++) {
100003 +               if (copy_from_user(&fwb, &entry[i], sizeof(fwb))) {
100004 +                       kfree(qs);
100005 +                       return ERR_PTR(-EFAULT);
100006 +               }
100008 +               qs[i].uaddr = compat_ptr(fwb.uaddr);
100009 +               qs[i].uval = fwb.val;
100010 +               qs[i].bitset = fwb.bitset;
100011 +       }
100013 +       return qs;
100016  SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
100017                 const struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
100018                 u32, val3)
100019 @@ -3967,16 +4301,17 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
100021         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
100022                       cmd == FUTEX_WAIT_BITSET ||
100023 -                     cmd == FUTEX_WAIT_REQUEUE_PI)) {
100024 +                     cmd == FUTEX_WAIT_REQUEUE_PI ||
100025 +                     cmd == FUTEX_WAIT_MULTIPLE)) {
100026                 if (get_old_timespec32(&ts, utime))
100027                         return -EFAULT;
100028                 if (!timespec64_valid(&ts))
100029                         return -EINVAL;
100031                 t = timespec64_to_ktime(ts);
100032 -               if (cmd == FUTEX_WAIT)
100033 +               if (cmd == FUTEX_WAIT || cmd == FUTEX_WAIT_MULTIPLE)
100034                         t = ktime_add_safe(ktime_get(), t);
100035 -               else if (!(op & FUTEX_CLOCK_REALTIME))
100036 +               else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
100037                         t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
100038                 tp = &t;
100039         }
100040 @@ -3984,6 +4319,19 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
100041             cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
100042                 val2 = (int) (unsigned long) utime;
100044 +       if (cmd == FUTEX_WAIT_MULTIPLE) {
100045 +               int ret;
100046 +               struct futex_q *qs = compat_futex_read_wait_block(uaddr, val);
100048 +               if (IS_ERR(qs))
100049 +                       return PTR_ERR(qs);
100051 +               ret = futex_wait_multiple(qs, op, val, tp);
100052 +               kfree(qs);
100054 +               return ret;
100055 +       }
100057         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
100059  #endif /* CONFIG_COMPAT_32BIT_TIME */
100060 diff --git a/kernel/futex2.c b/kernel/futex2.c
100061 new file mode 100644
100062 index 000000000000..dd6f54ae0220
100063 --- /dev/null
100064 +++ b/kernel/futex2.c
100065 @@ -0,0 +1,1239 @@
100066 +// SPDX-License-Identifier: GPL-2.0-or-later
100068 + * futex2 system call interface by André Almeida <andrealmeid@collabora.com>
100070 + * Copyright 2021 Collabora Ltd.
100072 + * Based on original futex implementation by:
100073 + *  (C) 2002 Rusty Russell, IBM
100074 + *  (C) 2003, 2006 Ingo Molnar, Red Hat Inc.
100075 + *  (C) 2003, 2004 Jamie Lokier
100076 + *  (C) 2006 Thomas Gleixner, Timesys Corp.
100077 + *  (C) 2007 Eric Dumazet
100078 + *  (C) 2009 Darren Hart, IBM
100079 + */
100081 +#include <linux/freezer.h>
100082 +#include <linux/hugetlb.h>
100083 +#include <linux/jhash.h>
100084 +#include <linux/memblock.h>
100085 +#include <linux/pagemap.h>
100086 +#include <linux/sched/wake_q.h>
100087 +#include <linux/spinlock.h>
100088 +#include <linux/syscalls.h>
100089 +#include <uapi/linux/futex.h>
100091 +#ifdef CONFIG_X86_64
100092 +#include <linux/compat.h>
100093 +#endif
100096 + * struct futex_key - Components to build unique key for a futex
100097 + * @pointer: Pointer to current->mm or inode's UUID for file backed futexes
100098 + * @index: Start address of the page containing futex or index of the page
100099 + * @offset: Address offset of uaddr in a page
100100 + */
100101 +struct futex_key {
100102 +       u64 pointer;
100103 +       unsigned long index;
100104 +       unsigned long offset;
100108 + * struct futex_waiter - List entry for a waiter
100109 + * @uaddr:        Virtual address of userspace futex
100110 + * @key:          Information that uniquely identify a futex
100111 + * @list:        List node struct
100112 + * @val:         Expected value for this waiter
100113 + * @flags:        Flags
100114 + * @bucket:       Pointer to the bucket for this waiter
100115 + * @index:        Index of waiter in futexv list
100116 + */
100117 +struct futex_waiter {
100118 +       void __user *uaddr;
100119 +       struct futex_key key;
100120 +       struct list_head list;
100121 +       unsigned int val;
100122 +       unsigned int flags;
100123 +       struct futex_bucket *bucket;
100124 +       unsigned int index;
100128 + * struct futex_waiter_head - List of futexes to be waited
100129 + * @task:    Task to be awaken
100130 + * @hint:    Was someone on this list awakened?
100131 + * @objects: List of futexes
100132 + */
100133 +struct futex_waiter_head {
100134 +       struct task_struct *task;
100135 +       bool hint;
100136 +       struct futex_waiter objects[0];
100140 + * struct futex_bucket - A bucket of futex's hash table
100141 + * @waiters: Number of waiters in the bucket
100142 + * @lock:    Bucket lock
100143 + * @list:    List of waiters on this bucket
100144 + */
100145 +struct futex_bucket {
100146 +       atomic_t waiters;
100147 +       spinlock_t lock;
100148 +       struct list_head list;
100151 +/* Mask for futex2 flag operations */
100152 +#define FUTEX2_MASK (FUTEX_SIZE_MASK | FUTEX_CLOCK_REALTIME | FUTEX_SHARED_FLAG)
100154 +/* Mask for sys_futex_waitv flag */
100155 +#define FUTEXV_MASK (FUTEX_CLOCK_REALTIME)
100157 +/* Mask for each futex in futex_waitv list */
100158 +#define FUTEXV_WAITER_MASK (FUTEX_SIZE_MASK | FUTEX_SHARED_FLAG)
100160 +#define is_object_shared ((futexv->objects[i].flags & FUTEX_SHARED_FLAG) ? true : false)
100162 +#define FUT_OFF_INODE    1 /* We set bit 0 if key has a reference on inode */
100163 +#define FUT_OFF_MMSHARED 2 /* We set bit 1 if key has a reference on mm */
100165 +static struct futex_bucket *futex_table;
100166 +static unsigned int futex2_hashsize;
100169 + * Reflects a new waiter being added to the waitqueue.
100170 + */
100171 +static inline void bucket_inc_waiters(struct futex_bucket *bucket)
100173 +#ifdef CONFIG_SMP
100174 +       atomic_inc(&bucket->waiters);
100175 +       /*
100176 +        * Issue a barrier after adding so futex_wake() will see that the
100177 +        * value had increased
100178 +        */
100179 +       smp_mb__after_atomic();
100180 +#endif
100184 + * Reflects a waiter being removed from the waitqueue by wakeup
100185 + * paths.
100186 + */
100187 +static inline void bucket_dec_waiters(struct futex_bucket *bucket)
100189 +#ifdef CONFIG_SMP
100190 +       atomic_dec(&bucket->waiters);
100191 +#endif
100195 + * Get the number of waiters in a bucket
100196 + */
100197 +static inline int bucket_get_waiters(struct futex_bucket *bucket)
100199 +#ifdef CONFIG_SMP
100200 +       /*
100201 +        * Issue a barrier before reading so we get an updated value from
100202 +        * futex_wait()
100203 +        */
100204 +       smp_mb();
100205 +       return atomic_read(&bucket->waiters);
100206 +#else
100207 +       return 1;
100208 +#endif
100212 + * futex_get_inode_uuid - Gets an UUID for an inode
100213 + * @inode: inode to get UUID
100215 + * Generate a machine wide unique identifier for this inode.
100217 + * This relies on u64 not wrapping in the life-time of the machine; which with
100218 + * 1ns resolution means almost 585 years.
100220 + * This further relies on the fact that a well formed program will not unmap
100221 + * the file while it has a (shared) futex waiting on it. This mapping will have
100222 + * a file reference which pins the mount and inode.
100224 + * If for some reason an inode gets evicted and read back in again, it will get
100225 + * a new sequence number and will _NOT_ match, even though it is the exact same
100226 + * file.
100228 + * It is important that match_futex() will never have a false-positive, esp.
100229 + * for PI futexes that can mess up the state. The above argues that false-negatives
100230 + * are only possible for malformed programs.
100232 + * Returns: UUID for the given inode
100233 + */
100234 +static u64 futex_get_inode_uuid(struct inode *inode)
100236 +       static atomic64_t i_seq;
100237 +       u64 old;
100239 +       /* Does the inode already have a sequence number? */
100240 +       old = atomic64_read(&inode->i_sequence2);
100242 +       if (likely(old))
100243 +               return old;
100245 +       for (;;) {
100246 +               u64 new = atomic64_add_return(1, &i_seq);
100248 +               if (WARN_ON_ONCE(!new))
100249 +                       continue;
100251 +               old = atomic64_cmpxchg_relaxed(&inode->i_sequence2, 0, new);
100252 +               if (old)
100253 +                       return old;
100254 +               return new;
100255 +       }
100259 + * futex_get_shared_key - Get a key for a shared futex
100260 + * @address: Futex memory address
100261 + * @mm:      Current process mm_struct pointer
100262 + * @key:     Key struct to be filled
100264 + * Returns: 0 on success, error code otherwise
100265 + */
100266 +static int futex_get_shared_key(uintptr_t address, struct mm_struct *mm,
100267 +                               struct futex_key *key)
100269 +       int ret;
100270 +       struct page *page, *tail;
100271 +       struct address_space *mapping;
100273 +again:
100274 +       ret = get_user_pages_fast(address, 1, 0, &page);
100275 +       if (ret < 0)
100276 +               return ret;
100278 +       /*
100279 +        * The treatment of mapping from this point on is critical. The page
100280 +        * lock protects many things but in this context the page lock
100281 +        * stabilizes mapping, prevents inode freeing in the shared
100282 +        * file-backed region case and guards against movement to swap cache.
100283 +        *
100284 +        * Strictly speaking the page lock is not needed in all cases being
100285 +        * considered here and page lock forces unnecessarily serialization
100286 +        * From this point on, mapping will be re-verified if necessary and
100287 +        * page lock will be acquired only if it is unavoidable
100288 +        *
100289 +        * Mapping checks require the head page for any compound page so the
100290 +        * head page and mapping is looked up now. For anonymous pages, it
100291 +        * does not matter if the page splits in the future as the key is
100292 +        * based on the address. For filesystem-backed pages, the tail is
100293 +        * required as the index of the page determines the key. For
100294 +        * base pages, there is no tail page and tail == page.
100295 +        */
100296 +       tail = page;
100297 +       page = compound_head(page);
100298 +       mapping = READ_ONCE(page->mapping);
100300 +       /*
100301 +        * If page->mapping is NULL, then it cannot be a PageAnon
100302 +        * page; but it might be the ZERO_PAGE or in the gate area or
100303 +        * in a special mapping (all cases which we are happy to fail);
100304 +        * or it may have been a good file page when get_user_pages_fast
100305 +        * found it, but truncated or holepunched or subjected to
100306 +        * invalidate_complete_page2 before we got the page lock (also
100307 +        * cases which we are happy to fail).  And we hold a reference,
100308 +        * so refcount care in invalidate_complete_page's remove_mapping
100309 +        * prevents drop_caches from setting mapping to NULL beneath us.
100310 +        *
100311 +        * The case we do have to guard against is when memory pressure made
100312 +        * shmem_writepage move it from filecache to swapcache beneath us:
100313 +        * an unlikely race, but we do need to retry for page->mapping.
100314 +        */
100315 +       if (unlikely(!mapping)) {
100316 +               int shmem_swizzled;
100318 +               /*
100319 +                * Page lock is required to identify which special case above
100320 +                * applies. If this is really a shmem page then the page lock
100321 +                * will prevent unexpected transitions.
100322 +                */
100323 +               lock_page(page);
100324 +               shmem_swizzled = PageSwapCache(page) || page->mapping;
100325 +               unlock_page(page);
100326 +               put_page(page);
100328 +               if (shmem_swizzled)
100329 +                       goto again;
100331 +               return -EFAULT;
100332 +       }
100334 +       /*
100335 +        * Private mappings are handled in a simple way.
100336 +        *
100337 +        * If the futex key is stored on an anonymous page, then the associated
100338 +        * object is the mm which is implicitly pinned by the calling process.
100339 +        *
100340 +        * NOTE: When userspace waits on a MAP_SHARED mapping, even if
100341 +        * it's a read-only handle, it's expected that futexes attach to
100342 +        * the object not the particular process.
100343 +        */
100344 +       if (PageAnon(page)) {
100345 +               key->offset |= FUT_OFF_MMSHARED;
100346 +       } else {
100347 +               struct inode *inode;
100349 +               /*
100350 +                * The associated futex object in this case is the inode and
100351 +                * the page->mapping must be traversed. Ordinarily this should
100352 +                * be stabilised under page lock but it's not strictly
100353 +                * necessary in this case as we just want to pin the inode, not
100354 +                * update the radix tree or anything like that.
100355 +                *
100356 +                * The RCU read lock is taken as the inode is finally freed
100357 +                * under RCU. If the mapping still matches expectations then the
100358 +                * mapping->host can be safely accessed as being a valid inode.
100359 +                */
100360 +               rcu_read_lock();
100362 +               if (READ_ONCE(page->mapping) != mapping) {
100363 +                       rcu_read_unlock();
100364 +                       put_page(page);
100366 +                       goto again;
100367 +               }
100369 +               inode = READ_ONCE(mapping->host);
100370 +               if (!inode) {
100371 +                       rcu_read_unlock();
100372 +                       put_page(page);
100374 +                       goto again;
100375 +               }
100377 +               key->pointer = futex_get_inode_uuid(inode);
100378 +               key->index = (unsigned long)basepage_index(tail);
100379 +               key->offset |= FUT_OFF_INODE;
100381 +               rcu_read_unlock();
100382 +       }
100384 +       put_page(page);
100386 +       return 0;
100390 + * futex_get_bucket - Check if the user address is valid, prepare internal
100391 + *                    data and calculate the hash
100392 + * @uaddr:   futex user address
100393 + * @key:     data that uniquely identifies a futex
100394 + * @shared:  is this a shared futex?
100396 + * For private futexes, each uaddr will be unique for a given mm_struct, and it
100397 + * won't be freed for the life time of the process. For shared futexes, check
100398 + * futex_get_shared_key().
100400 + * Return: address of bucket on success, error code otherwise
100401 + */
100402 +static struct futex_bucket *futex_get_bucket(void __user *uaddr,
100403 +                                            struct futex_key *key,
100404 +                                            bool shared)
100406 +       uintptr_t address = (uintptr_t)uaddr;
100407 +       u32 hash_key;
100409 +       /* Checking if uaddr is valid and accessible */
100410 +       if (unlikely(!IS_ALIGNED(address, sizeof(u32))))
100411 +               return ERR_PTR(-EINVAL);
100412 +       if (unlikely(!access_ok(uaddr, sizeof(u32))))
100413 +               return ERR_PTR(-EFAULT);
100415 +       key->offset = address % PAGE_SIZE;
100416 +       address -= key->offset;
100417 +       key->pointer = (u64)address;
100418 +       key->index = (unsigned long)current->mm;
100420 +       if (shared)
100421 +               futex_get_shared_key(address, current->mm, key);
100423 +       /* Generate hash key for this futex using uaddr and current->mm */
100424 +       hash_key = jhash2((u32 *)key, sizeof(*key) / sizeof(u32), 0);
100426 +       /* Since HASH_SIZE is 2^n, subtracting 1 makes a perfect bit mask */
100427 +       return &futex_table[hash_key & (futex2_hashsize - 1)];
100431 + * futex_get_user - Get the userspace value on this address
100432 + * @uval:  variable to store the value
100433 + * @uaddr: userspace address
100435 + * Check the comment at futex_enqueue() for more information.
100436 + */
100437 +static int futex_get_user(u32 *uval, u32 __user *uaddr)
100439 +       int ret;
100441 +       pagefault_disable();
100442 +       ret = __get_user(*uval, uaddr);
100443 +       pagefault_enable();
100445 +       return ret;
100449 + * futex_setup_time - Prepare the timeout mechanism and start it.
100450 + * @timo:    Timeout value from userspace
100451 + * @timeout: Pointer to hrtimer handler
100452 + * @flags: Flags from userspace, to decide which clockid to use
100454 + * Return: 0 on success, error code otherwise
100455 + */
100456 +static int futex_setup_time(struct __kernel_timespec __user *timo,
100457 +                           struct hrtimer_sleeper *timeout,
100458 +                           unsigned int flags)
100460 +       ktime_t time;
100461 +       struct timespec64 ts;
100462 +       clockid_t clockid = (flags & FUTEX_CLOCK_REALTIME) ?
100463 +                           CLOCK_REALTIME : CLOCK_MONOTONIC;
100465 +       if (get_timespec64(&ts, timo))
100466 +               return -EFAULT;
100468 +       if (!timespec64_valid(&ts))
100469 +               return -EINVAL;
100471 +       time = timespec64_to_ktime(ts);
100473 +       hrtimer_init_sleeper(timeout, clockid, HRTIMER_MODE_ABS);
100475 +       hrtimer_set_expires(&timeout->timer, time);
100477 +       hrtimer_sleeper_start_expires(timeout, HRTIMER_MODE_ABS);
100479 +       return 0;
100483 + * futex_dequeue_multiple - Remove multiple futexes from hash table
100484 + * @futexv: list of waiters
100485 + * @nr:     number of futexes to be removed
100487 + * This function is used if (a) something went wrong while enqueuing, and we
100488 + * need to undo our work (then nr <= nr_futexes) or (b) we woke up, and thus
100489 + * need to remove every waiter, check if some was indeed woken and return.
100490 + * Before removing a waiter, we check if it's on the list, since we have no
100491 + * clue who have been waken.
100493 + * Return:
100494 + *  * -1  - If no futex was woken during the removal
100495 + *  * 0>= - At least one futex was found woken, index of the last one
100496 + */
100497 +static int futex_dequeue_multiple(struct futex_waiter_head *futexv, unsigned int nr)
100499 +       int i, ret = -1;
100501 +       for (i = 0; i < nr; i++) {
100502 +               spin_lock(&futexv->objects[i].bucket->lock);
100503 +               if (!list_empty(&futexv->objects[i].list)) {
100504 +                       list_del_init(&futexv->objects[i].list);
100505 +                       bucket_dec_waiters(futexv->objects[i].bucket);
100506 +               } else {
100507 +                       ret = i;
100508 +               }
100509 +               spin_unlock(&futexv->objects[i].bucket->lock);
100510 +       }
100512 +       return ret;
100516 + * futex_enqueue - Check the value and enqueue a futex on a wait list
100518 + * @futexv:     List of futexes
100519 + * @nr_futexes: Number of futexes in the list
100520 + * @awakened:  If a futex was awakened during enqueueing, store the index here
100522 + * Get the value from the userspace address and compares with the expected one.
100524 + * Getting the value from user futex address:
100526 + * Since we are in a hurry, we use a spin lock and we can't sleep.
100527 + * Try to get the value with page fault disabled (when enable, we might
100528 + * sleep).
100530 + * If we fail, we aren't sure if the address is invalid or is just a
100531 + * page fault. Then, release the lock (so we can sleep) and try to get
100532 + * the value with page fault enabled. In order to trigger a page fault
100533 + * handling, we just call __get_user() again. If we sleep with enqueued
100534 + * futexes, we might miss a wake, so dequeue everything before sleeping.
100536 + * If get_user succeeds, this mean that the address is valid and we do
100537 + * the work again. Since we just handled the page fault, the page is
100538 + * likely pinned in memory and we should be luckier this time and be
100539 + * able to get the value. If we fail anyway, we will try again.
100541 + * If even with page faults enabled we get and error, this means that
100542 + * the address is not valid and we return from the syscall.
100544 + * If we got an unexpected value or need to treat a page fault and realized that
100545 + * a futex was awakened, we can priority this and return success.
100547 + * In success, enqueue the futex in the correct bucket
100549 + * Return:
100550 + * * 1  - We were awake in the process and nothing is enqueued
100551 + * * 0  - Everything is enqueued and we are ready to sleep
100552 + * * 0< - Something went wrong, nothing is enqueued, return error code
100553 + */
100554 +static int futex_enqueue(struct futex_waiter_head *futexv, unsigned int nr_futexes,
100555 +                        int *awakened)
100557 +       int i, ret;
100558 +       u32 uval, val;
100559 +       u32 __user *uaddr;
100560 +       bool retry = false;
100561 +       struct futex_bucket *bucket;
100563 +retry:
100564 +       set_current_state(TASK_INTERRUPTIBLE);
100566 +       for (i = 0; i < nr_futexes; i++) {
100567 +               uaddr = (u32 __user *)futexv->objects[i].uaddr;
100568 +               val = (u32)futexv->objects[i].val;
100570 +               if (is_object_shared && retry) {
100571 +                       struct futex_bucket *tmp =
100572 +                               futex_get_bucket((void __user *)uaddr,
100573 +                                                &futexv->objects[i].key, true);
100574 +                       if (IS_ERR(tmp)) {
100575 +                               __set_current_state(TASK_RUNNING);
100576 +                               futex_dequeue_multiple(futexv, i);
100577 +                               return PTR_ERR(tmp);
100578 +                       }
100579 +                       futexv->objects[i].bucket = tmp;
100580 +               }
100582 +               bucket = futexv->objects[i].bucket;
100584 +               bucket_inc_waiters(bucket);
100585 +               spin_lock(&bucket->lock);
100587 +               ret = futex_get_user(&uval, uaddr);
100589 +               if (unlikely(ret)) {
100590 +                       spin_unlock(&bucket->lock);
100592 +                       bucket_dec_waiters(bucket);
100593 +                       __set_current_state(TASK_RUNNING);
100594 +                       *awakened = futex_dequeue_multiple(futexv, i);
100596 +                       if (*awakened >= 0)
100597 +                               return 1;
100599 +                       if (__get_user(uval, uaddr))
100600 +                               return -EFAULT;
100602 +                       retry = true;
100603 +                       goto retry;
100604 +               }
100606 +               if (uval != val) {
100607 +                       spin_unlock(&bucket->lock);
100609 +                       bucket_dec_waiters(bucket);
100610 +                       __set_current_state(TASK_RUNNING);
100611 +                       *awakened = futex_dequeue_multiple(futexv, i);
100613 +                       if (*awakened >= 0)
100614 +                               return 1;
100616 +                       return -EAGAIN;
100617 +               }
100619 +               list_add_tail(&futexv->objects[i].list, &bucket->list);
100620 +               spin_unlock(&bucket->lock);
100621 +       }
100623 +       return 0;
100627 + * __futex_waitv - Enqueue the list of futexes and wait to be woken
100628 + * @futexv: List of futexes to wait
100629 + * @nr_futexes: Length of futexv
100630 + * @timo:      Timeout
100631 + * @flags:     Timeout flags
100633 + * Return:
100634 + * * 0 >= - Hint of which futex woke us
100635 + * * 0 <  - Error code
100636 + */
100637 +static int __futex_waitv(struct futex_waiter_head *futexv, unsigned int nr_futexes,
100638 +                        struct __kernel_timespec __user *timo,
100639 +                        unsigned int flags)
100641 +       int ret;
100642 +       struct hrtimer_sleeper timeout;
100644 +       if (timo) {
100645 +               ret = futex_setup_time(timo, &timeout, flags);
100646 +               if (ret)
100647 +                       return ret;
100648 +       }
100650 +       while (1) {
100651 +               int awakened = -1;
100653 +               ret = futex_enqueue(futexv, nr_futexes, &awakened);
100655 +               if (ret) {
100656 +                       if (awakened >= 0)
100657 +                               ret = awakened;
100658 +                       break;
100659 +               }
100661 +               /* Before sleeping, check if someone was woken */
100662 +               if (!futexv->hint && (!timo || timeout.task))
100663 +                       freezable_schedule();
100665 +               __set_current_state(TASK_RUNNING);
100667 +               /*
100668 +                * One of those things triggered this wake:
100669 +                *
100670 +                * * We have been removed from the bucket. futex_wake() woke
100671 +                *   us. We just need to dequeue and return 0 to userspace.
100672 +                *
100673 +                * However, if no futex was dequeued by a futex_wake():
100674 +                *
100675 +                * * If the there's a timeout and it has expired,
100676 +                *   return -ETIMEDOUT.
100677 +                *
100678 +                * * If there is a signal pending, something wants to kill our
100679 +                *   thread, return -ERESTARTSYS.
100680 +                *
100681 +                * * If there's no signal pending, it was a spurious wake
100682 +                *   (scheduler gave us a chance to do some work, even if we
100683 +                *   don't want to). We need to remove ourselves from the
100684 +                *   bucket and add again, to prevent losing wakeups in the
100685 +                *   meantime.
100686 +                */
100688 +               ret = futex_dequeue_multiple(futexv, nr_futexes);
100690 +               /* Normal wake */
100691 +               if (ret >= 0)
100692 +                       break;
100694 +               if (timo && !timeout.task) {
100695 +                       ret = -ETIMEDOUT;
100696 +                       break;
100697 +               }
100699 +               if (signal_pending(current)) {
100700 +                       ret = -ERESTARTSYS;
100701 +                       break;
100702 +               }
100704 +               /* Spurious wake, do everything again */
100705 +       }
100707 +       if (timo)
100708 +               hrtimer_cancel(&timeout.timer);
100710 +       return ret;
100714 + * sys_futex_wait - Wait on a futex address if (*uaddr) == val
100715 + * @uaddr: User address of futex
100716 + * @val:   Expected value of futex
100717 + * @flags: Specify the size of futex and the clockid
100718 + * @timo:  Optional absolute timeout.
100720 + * The user thread is put to sleep, waiting for a futex_wake() at uaddr, if the
100721 + * value at *uaddr is the same as val (otherwise, the syscall returns
100722 + * immediately with -EAGAIN).
100724 + * Returns 0 on success, error code otherwise.
100725 + */
100726 +SYSCALL_DEFINE4(futex_wait, void __user *, uaddr, unsigned int, val,
100727 +               unsigned int, flags, struct __kernel_timespec __user *, timo)
100729 +       bool shared = (flags & FUTEX_SHARED_FLAG) ? true : false;
100730 +       unsigned int size = flags & FUTEX_SIZE_MASK;
100731 +       struct futex_waiter *waiter;
100732 +       struct futex_waiter_head *futexv;
100734 +       /* Wrapper for a futexv_waiter_head with one element */
100735 +       struct {
100736 +               struct futex_waiter_head futexv;
100737 +               struct futex_waiter waiter;
100738 +       } __packed wait_single;
100740 +       if (flags & ~FUTEX2_MASK)
100741 +               return -EINVAL;
100743 +       if (size != FUTEX_32)
100744 +               return -EINVAL;
100746 +       futexv = &wait_single.futexv;
100747 +       futexv->task = current;
100748 +       futexv->hint = false;
100750 +       waiter = &wait_single.waiter;
100751 +       waiter->index = 0;
100752 +       waiter->val = val;
100753 +       waiter->uaddr = uaddr;
100754 +       memset(&wait_single.waiter.key, 0, sizeof(struct futex_key));
100756 +       INIT_LIST_HEAD(&waiter->list);
100758 +       /* Get an unlocked hash bucket */
100759 +       waiter->bucket = futex_get_bucket(uaddr, &waiter->key, shared);
100760 +       if (IS_ERR(waiter->bucket))
100761 +               return PTR_ERR(waiter->bucket);
100763 +       return __futex_waitv(futexv, 1, timo, flags);
100766 +#ifdef CONFIG_COMPAT
100768 + * compat_futex_parse_waitv - Parse a waitv array from userspace
100769 + * @futexv:    Kernel side list of waiters to be filled
100770 + * @uwaitv:     Userspace list to be parsed
100771 + * @nr_futexes: Length of futexv
100773 + * Return: Error code on failure, pointer to a prepared futexv otherwise
100774 + */
100775 +static int compat_futex_parse_waitv(struct futex_waiter_head *futexv,
100776 +                                   struct compat_futex_waitv __user *uwaitv,
100777 +                                   unsigned int nr_futexes)
100779 +       struct futex_bucket *bucket;
100780 +       struct compat_futex_waitv waitv;
100781 +       unsigned int i;
100783 +       for (i = 0; i < nr_futexes; i++) {
100784 +               if (copy_from_user(&waitv, &uwaitv[i], sizeof(waitv)))
100785 +                       return -EFAULT;
100787 +               if ((waitv.flags & ~FUTEXV_WAITER_MASK) ||
100788 +                   (waitv.flags & FUTEX_SIZE_MASK) != FUTEX_32)
100789 +                       return -EINVAL;
100791 +               futexv->objects[i].key.pointer = 0;
100792 +               futexv->objects[i].flags  = waitv.flags;
100793 +               futexv->objects[i].uaddr  = compat_ptr(waitv.uaddr);
100794 +               futexv->objects[i].val    = waitv.val;
100795 +               futexv->objects[i].index  = i;
100797 +               bucket = futex_get_bucket(compat_ptr(waitv.uaddr),
100798 +                                         &futexv->objects[i].key,
100799 +                                         is_object_shared);
100801 +               if (IS_ERR(bucket))
100802 +                       return PTR_ERR(bucket);
100804 +               futexv->objects[i].bucket = bucket;
100806 +               INIT_LIST_HEAD(&futexv->objects[i].list);
100807 +       }
100809 +       return 0;
100812 +COMPAT_SYSCALL_DEFINE4(futex_waitv, struct compat_futex_waitv __user *, waiters,
100813 +                      unsigned int, nr_futexes, unsigned int, flags,
100814 +                      struct __kernel_timespec __user *, timo)
100816 +       struct futex_waiter_head *futexv;
100817 +       int ret;
100819 +       if (flags & ~FUTEXV_MASK)
100820 +               return -EINVAL;
100822 +       if (!nr_futexes || nr_futexes > FUTEX_WAITV_MAX || !waiters)
100823 +               return -EINVAL;
100825 +       futexv = kmalloc((sizeof(struct futex_waiter) * nr_futexes) +
100826 +                        sizeof(*futexv), GFP_KERNEL);
100827 +       if (!futexv)
100828 +               return -ENOMEM;
100830 +       futexv->hint = false;
100831 +       futexv->task = current;
100833 +       ret = compat_futex_parse_waitv(futexv, waiters, nr_futexes);
100835 +       if (!ret)
100836 +               ret = __futex_waitv(futexv, nr_futexes, timo, flags);
100838 +       kfree(futexv);
100840 +       return ret;
100842 +#endif
100845 + * futex_parse_waitv - Parse a waitv array from userspace
100846 + * @futexv:    Kernel side list of waiters to be filled
100847 + * @uwaitv:     Userspace list to be parsed
100848 + * @nr_futexes: Length of futexv
100850 + * Return: Error code on failure, pointer to a prepared futexv otherwise
100851 + */
100852 +static int futex_parse_waitv(struct futex_waiter_head *futexv,
100853 +                            struct futex_waitv __user *uwaitv,
100854 +                            unsigned int nr_futexes)
100856 +       struct futex_bucket *bucket;
100857 +       struct futex_waitv waitv;
100858 +       unsigned int i;
100860 +       for (i = 0; i < nr_futexes; i++) {
100861 +               if (copy_from_user(&waitv, &uwaitv[i], sizeof(waitv)))
100862 +                       return -EFAULT;
100864 +               if ((waitv.flags & ~FUTEXV_WAITER_MASK) ||
100865 +                   (waitv.flags & FUTEX_SIZE_MASK) != FUTEX_32)
100866 +                       return -EINVAL;
100868 +               futexv->objects[i].key.pointer = 0;
100869 +               futexv->objects[i].flags  = waitv.flags;
100870 +               futexv->objects[i].uaddr  = waitv.uaddr;
100871 +               futexv->objects[i].val    = waitv.val;
100872 +               futexv->objects[i].index  = i;
100874 +               bucket = futex_get_bucket(waitv.uaddr, &futexv->objects[i].key,
100875 +                                         is_object_shared);
100877 +               if (IS_ERR(bucket))
100878 +                       return PTR_ERR(bucket);
100880 +               futexv->objects[i].bucket = bucket;
100882 +               INIT_LIST_HEAD(&futexv->objects[i].list);
100883 +       }
100885 +       return 0;
100889 + * sys_futex_waitv - Wait on a list of futexes
100890 + * @waiters:    List of futexes to wait on
100891 + * @nr_futexes: Length of futexv
100892 + * @flags:      Flag for timeout (monotonic/realtime)
100893 + * @timo:      Optional absolute timeout.
100895 + * Given an array of `struct futex_waitv`, wait on each uaddr. The thread wakes
100896 + * if a futex_wake() is performed at any uaddr. The syscall returns immediately
100897 + * if any waiter has *uaddr != val. *timo is an optional timeout value for the
100898 + * operation. Each waiter has individual flags. The `flags` argument for the
100899 + * syscall should be used solely for specifying the timeout as realtime, if
100900 + * needed. Flags for shared futexes, sizes, etc. should be used on the
100901 + * individual flags of each waiter.
100903 + * Returns the array index of one of the awaken futexes. There's no given
100904 + * information of how many were awakened, or any particular attribute of it (if
100905 + * it's the first awakened, if it is of the smaller index...).
100906 + */
100907 +SYSCALL_DEFINE4(futex_waitv, struct futex_waitv __user *, waiters,
100908 +               unsigned int, nr_futexes, unsigned int, flags,
100909 +               struct __kernel_timespec __user *, timo)
100911 +       struct futex_waiter_head *futexv;
100912 +       int ret;
100914 +       if (flags & ~FUTEXV_MASK)
100915 +               return -EINVAL;
100917 +       if (!nr_futexes || nr_futexes > FUTEX_WAITV_MAX || !waiters)
100918 +               return -EINVAL;
100920 +       futexv = kmalloc((sizeof(struct futex_waiter) * nr_futexes) +
100921 +                        sizeof(*futexv), GFP_KERNEL);
100922 +       if (!futexv)
100923 +               return -ENOMEM;
100925 +       futexv->hint = false;
100926 +       futexv->task = current;
100928 +#ifdef CONFIG_X86_X32_ABI
100929 +       if (in_x32_syscall()) {
100930 +               ret = compat_futex_parse_waitv(futexv, (struct compat_futex_waitv *)waiters,
100931 +                                              nr_futexes);
100932 +       } else
100933 +#endif
100934 +       {
100935 +               ret = futex_parse_waitv(futexv, waiters, nr_futexes);
100936 +       }
100938 +       if (!ret)
100939 +               ret = __futex_waitv(futexv, nr_futexes, timo, flags);
100941 +       kfree(futexv);
100943 +       return ret;
100947 + * futex_get_parent - For a given futex in a futexv list, get a pointer to the futexv
100948 + * @waiter: Address of futex in the list
100949 + * @index: Index of futex in the list
100951 + * Return: A pointer to its futexv struct
100952 + */
100953 +static inline struct futex_waiter_head *futex_get_parent(uintptr_t waiter,
100954 +                                                        unsigned int index)
100956 +       uintptr_t parent = waiter - sizeof(struct futex_waiter_head)
100957 +                          - (uintptr_t)(index * sizeof(struct futex_waiter));
100959 +       return (struct futex_waiter_head *)parent;
100963 + * futex_mark_wake - Find the task to be wake and add it in wake queue
100964 + * @waiter: Waiter to be wake
100965 + * @bucket: Bucket to be decremented
100966 + * @wake_q: Wake queue to insert the task
100967 + */
100968 +static void futex_mark_wake(struct futex_waiter *waiter,
100969 +                           struct futex_bucket *bucket,
100970 +                           struct wake_q_head *wake_q)
100972 +       struct task_struct *task;
100973 +       struct futex_waiter_head *parent = futex_get_parent((uintptr_t)waiter,
100974 +                                                           waiter->index);
100976 +       lockdep_assert_held(&bucket->lock);
100977 +       parent->hint = true;
100978 +       task = parent->task;
100979 +       get_task_struct(task);
100980 +       list_del_init(&waiter->list);
100981 +       wake_q_add_safe(wake_q, task);
100982 +       bucket_dec_waiters(bucket);
100985 +static inline bool futex_match(struct futex_key key1, struct futex_key key2)
100987 +       return (key1.index == key2.index &&
100988 +               key1.pointer == key2.pointer &&
100989 +               key1.offset == key2.offset);
100992 +long ksys_futex_wake(void __user *uaddr, unsigned long nr_wake,
100993 +                    unsigned int flags)
100995 +       bool shared = (flags & FUTEX_SHARED_FLAG) ? true : false;
100996 +       unsigned int size = flags & FUTEX_SIZE_MASK;
100997 +       struct futex_waiter waiter, *aux, *tmp;
100998 +       struct futex_bucket *bucket;
100999 +       DEFINE_WAKE_Q(wake_q);
101000 +       int ret = 0;
101002 +       if (flags & ~FUTEX2_MASK)
101003 +               return -EINVAL;
101005 +       if (size != FUTEX_32)
101006 +               return -EINVAL;
101008 +       bucket = futex_get_bucket(uaddr, &waiter.key, shared);
101009 +       if (IS_ERR(bucket))
101010 +               return PTR_ERR(bucket);
101012 +       if (!bucket_get_waiters(bucket) || !nr_wake)
101013 +               return 0;
101015 +       spin_lock(&bucket->lock);
101016 +       list_for_each_entry_safe(aux, tmp, &bucket->list, list) {
101017 +               if (futex_match(waiter.key, aux->key)) {
101018 +                       futex_mark_wake(aux, bucket, &wake_q);
101019 +                       if (++ret >= nr_wake)
101020 +                               break;
101021 +               }
101022 +       }
101023 +       spin_unlock(&bucket->lock);
101025 +       wake_up_q(&wake_q);
101027 +       return ret;
101031 + * sys_futex_wake - Wake a number of futexes waiting on an address
101032 + * @uaddr:   Address of futex to be woken up
101033 + * @nr_wake: Number of futexes waiting in uaddr to be woken up
101034 + * @flags:   Flags for size and shared
101036 + * Wake `nr_wake` threads waiting at uaddr.
101038 + * Returns the number of woken threads on success, error code otherwise.
101039 + */
101040 +SYSCALL_DEFINE3(futex_wake, void __user *, uaddr, unsigned int, nr_wake,
101041 +               unsigned int, flags)
101043 +       return ksys_futex_wake(uaddr, nr_wake, flags);
101046 +static void futex_double_unlock(struct futex_bucket *b1, struct futex_bucket *b2)
101048 +       spin_unlock(&b1->lock);
101049 +       if (b1 != b2)
101050 +               spin_unlock(&b2->lock);
101053 +static inline int __futex_requeue(struct futex_requeue rq1,
101054 +                                 struct futex_requeue rq2, unsigned int nr_wake,
101055 +                                 unsigned int nr_requeue, unsigned int cmpval,
101056 +                                 bool shared1, bool shared2)
101058 +       struct futex_waiter w1, w2, *aux, *tmp;
101059 +       bool retry = false;
101060 +       struct futex_bucket *b1, *b2;
101061 +       DEFINE_WAKE_Q(wake_q);
101062 +       u32 uval;
101063 +       int ret;
101065 +       b1 = futex_get_bucket(rq1.uaddr, &w1.key, shared1);
101066 +       if (IS_ERR(b1))
101067 +               return PTR_ERR(b1);
101069 +       b2 = futex_get_bucket(rq2.uaddr, &w2.key, shared2);
101070 +       if (IS_ERR(b2))
101071 +               return PTR_ERR(b2);
101073 +retry:
101074 +       if (shared1 && retry) {
101075 +               b1 = futex_get_bucket(rq1.uaddr, &w1.key, shared1);
101076 +               if (IS_ERR(b1))
101077 +                       return PTR_ERR(b1);
101078 +       }
101080 +       if (shared2 && retry) {
101081 +               b2 = futex_get_bucket(rq2.uaddr, &w2.key, shared2);
101082 +               if (IS_ERR(b2))
101083 +                       return PTR_ERR(b2);
101084 +       }
101086 +       bucket_inc_waiters(b2);
101087 +       /*
101088 +        * To ensure the locks are taken in the same order for all threads (and
101089 +        * thus avoiding deadlocks), take the "smaller" one first
101090 +        */
101091 +       if (b1 <= b2) {
101092 +               spin_lock(&b1->lock);
101093 +               if (b1 < b2)
101094 +                       spin_lock_nested(&b2->lock, SINGLE_DEPTH_NESTING);
101095 +       } else {
101096 +               spin_lock(&b2->lock);
101097 +               spin_lock_nested(&b1->lock, SINGLE_DEPTH_NESTING);
101098 +       }
101100 +       ret = futex_get_user(&uval, rq1.uaddr);
101102 +       if (unlikely(ret)) {
101103 +               futex_double_unlock(b1, b2);
101104 +               if (__get_user(uval, (u32 __user *)rq1.uaddr))
101105 +                       return -EFAULT;
101107 +               bucket_dec_waiters(b2);
101108 +               retry = true;
101109 +               goto retry;
101110 +       }
101112 +       if (uval != cmpval) {
101113 +               futex_double_unlock(b1, b2);
101115 +               bucket_dec_waiters(b2);
101116 +               return -EAGAIN;
101117 +       }
101119 +       list_for_each_entry_safe(aux, tmp, &b1->list, list) {
101120 +               if (futex_match(w1.key, aux->key)) {
101121 +                       if (ret < nr_wake) {
101122 +                               futex_mark_wake(aux, b1, &wake_q);
101123 +                               ret++;
101124 +                               continue;
101125 +                       }
101127 +                       if (ret >= nr_wake + nr_requeue)
101128 +                               break;
101130 +                       aux->key.pointer = w2.key.pointer;
101131 +                       aux->key.index = w2.key.index;
101132 +                       aux->key.offset = w2.key.offset;
101134 +                       if (b1 != b2) {
101135 +                               list_del_init(&aux->list);
101136 +                               bucket_dec_waiters(b1);
101138 +                               list_add_tail(&aux->list, &b2->list);
101139 +                               bucket_inc_waiters(b2);
101140 +                       }
101141 +                       ret++;
101142 +               }
101143 +       }
101145 +       futex_double_unlock(b1, b2);
101146 +       wake_up_q(&wake_q);
101147 +       bucket_dec_waiters(b2);
101149 +       return ret;
101152 +#ifdef CONFIG_COMPAT
101153 +static int compat_futex_parse_requeue(struct futex_requeue *rq,
101154 +                                     struct compat_futex_requeue __user *uaddr,
101155 +                                     bool *shared)
101157 +       struct compat_futex_requeue tmp;
101159 +       if (copy_from_user(&tmp, uaddr, sizeof(tmp)))
101160 +               return -EFAULT;
101162 +       if (tmp.flags & ~FUTEXV_WAITER_MASK ||
101163 +           (tmp.flags & FUTEX_SIZE_MASK) != FUTEX_32)
101164 +               return -EINVAL;
101166 +       *shared = (tmp.flags & FUTEX_SHARED_FLAG) ? true : false;
101168 +       rq->uaddr = compat_ptr(tmp.uaddr);
101169 +       rq->flags = tmp.flags;
101171 +       return 0;
101174 +COMPAT_SYSCALL_DEFINE6(futex_requeue, struct compat_futex_requeue __user *, uaddr1,
101175 +                      struct compat_futex_requeue __user *, uaddr2,
101176 +                      unsigned int, nr_wake, unsigned int, nr_requeue,
101177 +                      unsigned int, cmpval, unsigned int, flags)
101179 +       struct futex_requeue rq1, rq2;
101180 +       bool shared1, shared2;
101181 +       int ret;
101183 +       if (flags)
101184 +               return -EINVAL;
101186 +       ret = compat_futex_parse_requeue(&rq1, uaddr1, &shared1);
101187 +       if (ret)
101188 +               return ret;
101190 +       ret = compat_futex_parse_requeue(&rq2, uaddr2, &shared2);
101191 +       if (ret)
101192 +               return ret;
101194 +       return __futex_requeue(rq1, rq2, nr_wake, nr_requeue, cmpval, shared1, shared2);
101196 +#endif
101199 + * futex_parse_requeue - Copy a user struct futex_requeue and check it's flags
101200 + * @rq:    Kernel struct
101201 + * @uaddr: Address of user struct
101202 + * @shared: Out parameter, defines if this is a shared futex
101204 + * Return: 0 on success, error code otherwise
101205 + */
101206 +static int futex_parse_requeue(struct futex_requeue *rq,
101207 +                              struct futex_requeue __user *uaddr, bool *shared)
101209 +       if (copy_from_user(rq, uaddr, sizeof(*rq)))
101210 +               return -EFAULT;
101212 +       if (rq->flags & ~FUTEXV_WAITER_MASK ||
101213 +           (rq->flags & FUTEX_SIZE_MASK) != FUTEX_32)
101214 +               return -EINVAL;
101216 +       *shared = (rq->flags & FUTEX_SHARED_FLAG) ? true : false;
101218 +       return 0;
101222 + * sys_futex_requeue - Wake futexes at uaddr1 and requeue from uaddr1 to uaddr2
101223 + * @uaddr1:    Address of futexes to be waken/dequeued
101224 + * @uaddr2:    Address for the futexes to be enqueued
101225 + * @nr_wake:   Number of futexes waiting in uaddr1 to be woken up
101226 + * @nr_requeue: Number of futexes to be requeued from uaddr1 to uaddr2
101227 + * @cmpval:    Expected value at uaddr1
101228 + * @flags:     Reserved flags arg for requeue operation expansion. Must be 0.
101230 + * If (uaddr1->uaddr == cmpval), wake at uaddr1->uaddr a nr_wake number of
101231 + * waiters and then, remove a number of nr_requeue waiters at uaddr1->uaddr
101232 + * and add then to uaddr2->uaddr list. Each uaddr has its own set of flags,
101233 + * that must be defined at struct futex_requeue (such as size, shared, NUMA).
101235 + * Return the number of the woken futexes + the number of requeued ones on
101236 + * success, error code otherwise.
101237 + */
101238 +SYSCALL_DEFINE6(futex_requeue, struct futex_requeue __user *, uaddr1,
101239 +               struct futex_requeue __user *, uaddr2,
101240 +               unsigned int, nr_wake, unsigned int, nr_requeue,
101241 +               unsigned int, cmpval, unsigned int, flags)
101243 +       struct futex_requeue rq1, rq2;
101244 +       bool shared1, shared2;
101245 +       int ret;
101247 +       if (flags)
101248 +               return -EINVAL;
101250 +#ifdef CONFIG_X86_X32_ABI
101251 +       if (in_x32_syscall()) {
101252 +               ret = compat_futex_parse_requeue(&rq1, (struct compat_futex_requeue *)uaddr1,
101253 +                                                &shared1);
101254 +               if (ret)
101255 +                       return ret;
101257 +               ret = compat_futex_parse_requeue(&rq2, (struct compat_futex_requeue *)uaddr2,
101258 +                                                &shared2);
101259 +               if (ret)
101260 +                       return ret;
101261 +       } else
101262 +#endif
101263 +       {
101264 +               ret = futex_parse_requeue(&rq1, uaddr1, &shared1);
101265 +               if (ret)
101266 +                       return ret;
101268 +               ret = futex_parse_requeue(&rq2, uaddr2, &shared2);
101269 +               if (ret)
101270 +                       return ret;
101271 +       }
101273 +       return __futex_requeue(rq1, rq2, nr_wake, nr_requeue, cmpval, shared1, shared2);
101276 +static int __init futex2_init(void)
101278 +       int i;
101279 +       unsigned int futex_shift;
101281 +#if CONFIG_BASE_SMALL
101282 +       futex2_hashsize = 16;
101283 +#else
101284 +       futex2_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
101285 +#endif
101287 +       futex_table = alloc_large_system_hash("futex2", sizeof(struct futex_bucket),
101288 +                                             futex2_hashsize, 0,
101289 +                                             futex2_hashsize < 256 ? HASH_SMALL : 0,
101290 +                                             &futex_shift, NULL,
101291 +                                             futex2_hashsize, futex2_hashsize);
101292 +       futex2_hashsize = 1UL << futex_shift;
101294 +       BUG_ON(!is_power_of_2(futex2_hashsize));
101296 +       for (i = 0; i < futex2_hashsize; i++) {
101297 +               INIT_LIST_HEAD(&futex_table[i].list);
101298 +               spin_lock_init(&futex_table[i].lock);
101299 +               atomic_set(&futex_table[i].waiters, 0);
101300 +       }
101302 +       return 0;
101304 +core_initcall(futex2_init);
101305 diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
101306 index 651a4ad6d711..8e586858bcf4 100644
101307 --- a/kernel/irq/matrix.c
101308 +++ b/kernel/irq/matrix.c
101309 @@ -423,7 +423,9 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
101310         if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
101311                 return;
101313 -       clear_bit(bit, cm->alloc_map);
101314 +       if (WARN_ON_ONCE(!test_and_clear_bit(bit, cm->alloc_map)))
101315 +               return;
101317         cm->allocated--;
101318         if(managed)
101319                 cm->managed_allocated--;
101320 diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
101321 index 3bf98db9c702..23e7acb5c667 100644
101322 --- a/kernel/kcsan/core.c
101323 +++ b/kernel/kcsan/core.c
101324 @@ -639,8 +639,6 @@ void __init kcsan_init(void)
101326         BUG_ON(!in_task());
101328 -       kcsan_debugfs_init();
101330         for_each_possible_cpu(cpu)
101331                 per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
101333 diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
101334 index 3c8093a371b1..209ad8dcfcec 100644
101335 --- a/kernel/kcsan/debugfs.c
101336 +++ b/kernel/kcsan/debugfs.c
101337 @@ -261,7 +261,9 @@ static const struct file_operations debugfs_ops =
101338         .release = single_release
101341 -void __init kcsan_debugfs_init(void)
101342 +static void __init kcsan_debugfs_init(void)
101344         debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
101347 +late_initcall(kcsan_debugfs_init);
101348 diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
101349 index 8d4bf3431b3c..87ccdb3b051f 100644
101350 --- a/kernel/kcsan/kcsan.h
101351 +++ b/kernel/kcsan/kcsan.h
101352 @@ -30,11 +30,6 @@ extern bool kcsan_enabled;
101353  void kcsan_save_irqtrace(struct task_struct *task);
101354  void kcsan_restore_irqtrace(struct task_struct *task);
101357 - * Initialize debugfs file.
101358 - */
101359 -void kcsan_debugfs_init(void);
101362   * Statistics counters displayed via debugfs; should only be modified in
101363   * slow-paths.
101364 diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
101365 index 5c3447cf7ad5..33400ff051a8 100644
101366 --- a/kernel/kexec_file.c
101367 +++ b/kernel/kexec_file.c
101368 @@ -740,8 +740,10 @@ static int kexec_calculate_store_digests(struct kimage *image)
101370         sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
101371         sha_regions = vzalloc(sha_region_sz);
101372 -       if (!sha_regions)
101373 +       if (!sha_regions) {
101374 +               ret = -ENOMEM;
101375                 goto out_free_desc;
101376 +       }
101378         desc->tfm   = tfm;
101380 diff --git a/kernel/kthread.c b/kernel/kthread.c
101381 index 1578973c5740..3b8dfbc24a22 100644
101382 --- a/kernel/kthread.c
101383 +++ b/kernel/kthread.c
101384 @@ -84,6 +84,25 @@ static inline struct kthread *to_kthread(struct task_struct *k)
101385         return (__force void *)k->set_child_tid;
101389 + * Variant of to_kthread() that doesn't assume @p is a kthread.
101391 + * Per construction; when:
101393 + *   (p->flags & PF_KTHREAD) && p->set_child_tid
101395 + * the task is both a kthread and struct kthread is persistent. However
101396 + * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
101397 + * begin_new_exec()).
101398 + */
101399 +static inline struct kthread *__to_kthread(struct task_struct *p)
101401 +       void *kthread = (__force void *)p->set_child_tid;
101402 +       if (kthread && !(p->flags & PF_KTHREAD))
101403 +               kthread = NULL;
101404 +       return kthread;
101407  void free_kthread_struct(struct task_struct *k)
101409         struct kthread *kthread;
101410 @@ -168,8 +187,9 @@ EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
101411   */
101412  void *kthread_func(struct task_struct *task)
101414 -       if (task->flags & PF_KTHREAD)
101415 -               return to_kthread(task)->threadfn;
101416 +       struct kthread *kthread = __to_kthread(task);
101417 +       if (kthread)
101418 +               return kthread->threadfn;
101419         return NULL;
101421  EXPORT_SYMBOL_GPL(kthread_func);
101422 @@ -199,10 +219,11 @@ EXPORT_SYMBOL_GPL(kthread_data);
101423   */
101424  void *kthread_probe_data(struct task_struct *task)
101426 -       struct kthread *kthread = to_kthread(task);
101427 +       struct kthread *kthread = __to_kthread(task);
101428         void *data = NULL;
101430 -       copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
101431 +       if (kthread)
101432 +               copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
101433         return data;
101436 @@ -514,9 +535,9 @@ void kthread_set_per_cpu(struct task_struct *k, int cpu)
101437         set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
101440 -bool kthread_is_per_cpu(struct task_struct *k)
101441 +bool kthread_is_per_cpu(struct task_struct *p)
101443 -       struct kthread *kthread = to_kthread(k);
101444 +       struct kthread *kthread = __to_kthread(p);
101445         if (!kthread)
101446                 return false;
101448 @@ -1303,6 +1324,7 @@ void kthread_use_mm(struct mm_struct *mm)
101449         tsk->mm = mm;
101450         membarrier_update_current_mm(mm);
101451         switch_mm_irqs_off(active_mm, mm, tsk);
101452 +       lru_gen_switch_mm(active_mm, mm);
101453         local_irq_enable();
101454         task_unlock(tsk);
101455  #ifdef finish_arch_post_lock_switch
101456 diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
101457 index abba5df50006..b9fab2d55b93 100644
101458 --- a/kernel/locking/rwsem.c
101459 +++ b/kernel/locking/rwsem.c
101460 @@ -668,6 +668,7 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
101461         struct task_struct *new, *owner;
101462         unsigned long flags, new_flags;
101463         enum owner_state state;
101464 +       int i = 0;
101466         owner = rwsem_owner_flags(sem, &flags);
101467         state = rwsem_owner_state(owner, flags);
101468 @@ -701,7 +702,8 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
101469                         break;
101470                 }
101472 -               cpu_relax();
101473 +               if (i++ > 1000)
101474 +                       cpu_relax();
101475         }
101476         rcu_read_unlock();
101478 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
101479 index 575a34b88936..77ae2704e979 100644
101480 --- a/kernel/printk/printk.c
101481 +++ b/kernel/printk/printk.c
101482 @@ -1494,6 +1494,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
101483         struct printk_info info;
101484         unsigned int line_count;
101485         struct printk_record r;
101486 +       u64 max_seq;
101487         char *text;
101488         int len = 0;
101489         u64 seq;
101490 @@ -1512,9 +1513,15 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
101491         prb_for_each_info(clear_seq, prb, seq, &info, &line_count)
101492                 len += get_record_print_text_size(&info, line_count, true, time);
101494 +       /*
101495 +        * Set an upper bound for the next loop to avoid subtracting lengths
101496 +        * that were never added.
101497 +        */
101498 +       max_seq = seq;
101500         /* move first record forward until length fits into the buffer */
101501         prb_for_each_info(clear_seq, prb, seq, &info, &line_count) {
101502 -               if (len <= size)
101503 +               if (len <= size || info.seq >= max_seq)
101504                         break;
101505                 len -= get_record_print_text_size(&info, line_count, true, time);
101506         }
101507 diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
101508 index 3128b7cf8e1f..abfae9afbdc8 100644
101509 --- a/kernel/rcu/Kconfig
101510 +++ b/kernel/rcu/Kconfig
101511 @@ -189,8 +189,8 @@ config RCU_FAST_NO_HZ
101513  config RCU_BOOST
101514         bool "Enable RCU priority boosting"
101515 -       depends on (RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT) || PREEMPT_RT
101516 -       default y if PREEMPT_RT
101517 +       depends on (RT_MUTEXES && PREEMPT_RCU) || PREEMPT_RT
101518 +       default y
101519         help
101520           This option boosts the priority of preempted RCU readers that
101521           block the current preemptible RCU grace period for too long.
101522 @@ -204,7 +204,7 @@ config RCU_BOOST_DELAY
101523         int "Milliseconds to delay boosting after RCU grace-period start"
101524         range 0 3000
101525         depends on RCU_BOOST
101526 -       default 500
101527 +       default 0
101528         help
101529           This option specifies the time to wait after the beginning of
101530           a given grace period before priority-boosting preempted RCU
101531 diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
101532 index da6f5213fb74..7356764e49a0 100644
101533 --- a/kernel/rcu/tree.c
101534 +++ b/kernel/rcu/tree.c
101535 @@ -1077,7 +1077,6 @@ noinstr void rcu_nmi_enter(void)
101536         } else if (!in_nmi()) {
101537                 instrumentation_begin();
101538                 rcu_irq_enter_check_tick();
101539 -               instrumentation_end();
101540         } else  {
101541                 instrumentation_begin();
101542         }
101543 @@ -3464,7 +3463,7 @@ static void fill_page_cache_func(struct work_struct *work)
101545         for (i = 0; i < rcu_min_cached_objs; i++) {
101546                 bnode = (struct kvfree_rcu_bulk_data *)
101547 -                       __get_free_page(GFP_KERNEL | __GFP_NOWARN);
101548 +                       __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
101550                 if (bnode) {
101551                         raw_spin_lock_irqsave(&krcp->lock, flags);
101552 diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
101553 index 2d603771c7dc..0796a75b6e0e 100644
101554 --- a/kernel/rcu/tree_plugin.h
101555 +++ b/kernel/rcu/tree_plugin.h
101556 @@ -1646,7 +1646,11 @@ static bool wake_nocb_gp(struct rcu_data *rdp, bool force,
101557                 rcu_nocb_unlock_irqrestore(rdp, flags);
101558                 return false;
101559         }
101560 -       del_timer(&rdp->nocb_timer);
101562 +       if (READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT) {
101563 +               WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
101564 +               del_timer(&rdp->nocb_timer);
101565 +       }
101566         rcu_nocb_unlock_irqrestore(rdp, flags);
101567         raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
101568         if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
101569 @@ -2265,7 +2269,6 @@ static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
101570                 return false;
101571         }
101572         ndw = READ_ONCE(rdp->nocb_defer_wakeup);
101573 -       WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
101574         ret = wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
101575         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
101577 diff --git a/kernel/resource.c b/kernel/resource.c
101578 index 627e61b0c124..16e0c7e8ed24 100644
101579 --- a/kernel/resource.c
101580 +++ b/kernel/resource.c
101581 @@ -457,7 +457,7 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
101583         unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
101585 -       return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
101586 +       return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, false,
101587                                      arg, func);
101590 @@ -470,7 +470,7 @@ int walk_mem_res(u64 start, u64 end, void *arg,
101592         unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
101594 -       return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
101595 +       return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, false,
101596                                      arg, func);
101599 diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c
101600 index 2067080bb235..573b313efe4c 100644
101601 --- a/kernel/sched/autogroup.c
101602 +++ b/kernel/sched/autogroup.c
101603 @@ -5,7 +5,8 @@
101604  #include <linux/nospec.h>
101605  #include "sched.h"
101607 -unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
101608 +unsigned int __read_mostly sysctl_sched_autogroup_enabled =
101609 +               IS_ENABLED(CONFIG_SCHED_AUTOGROUP_DEFAULT_ENABLED) ? 1 : 0;
101610  static struct autogroup autogroup_default;
101611  static atomic_t autogroup_seq_nr;
101613 @@ -197,11 +198,12 @@ void sched_autogroup_exit(struct signal_struct *sig)
101615  static int __init setup_autogroup(char *str)
101617 -       sysctl_sched_autogroup_enabled = 0;
101619 +       unsigned long enabled;
101620 +       if (!kstrtoul(str, 0, &enabled))
101621 +               sysctl_sched_autogroup_enabled = enabled ? 1 : 0;
101622         return 1;
101624 -__setup("noautogroup", setup_autogroup);
101625 +__setup("autogroup=", setup_autogroup);
101627  #ifdef CONFIG_PROC_FS
101629 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
101630 index 98191218d891..cd3bb52378c0 100644
101631 --- a/kernel/sched/core.c
101632 +++ b/kernel/sched/core.c
101633 @@ -928,7 +928,7 @@ DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
101635  static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
101637 -       return clamp_value / UCLAMP_BUCKET_DELTA;
101638 +       return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
101641  static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
101642 @@ -4306,6 +4306,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
101643                  * finish_task_switch()'s mmdrop().
101644                  */
101645                 switch_mm_irqs_off(prev->active_mm, next->mm, next);
101646 +               lru_gen_switch_mm(prev->active_mm, next->mm);
101648                 if (!prev->mm) {                        // from kernel
101649                         /* will mmdrop() in finish_task_switch(). */
101650 @@ -5765,6 +5766,7 @@ int can_nice(const struct task_struct *p, const int nice)
101651         return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
101652                 capable(CAP_SYS_NICE));
101654 +EXPORT_SYMBOL(can_nice);
101656  #ifdef __ARCH_WANT_SYS_NICE
101658 @@ -7597,6 +7599,7 @@ void idle_task_exit(void)
101660         if (mm != &init_mm) {
101661                 switch_mm(mm, &init_mm, current);
101662 +               lru_gen_switch_mm(mm, &init_mm);
101663                 finish_arch_post_lock_switch();
101664         }
101666 @@ -7652,7 +7655,7 @@ static void balance_push(struct rq *rq)
101667          * histerical raisins.
101668          */
101669         if (rq->idle == push_task ||
101670 -           ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) ||
101671 +           kthread_is_per_cpu(push_task) ||
101672             is_migration_disabled(push_task)) {
101674                 /*
101675 diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
101676 index 486f403a778b..9c8b3ed2199a 100644
101677 --- a/kernel/sched/debug.c
101678 +++ b/kernel/sched/debug.c
101679 @@ -8,8 +8,6 @@
101680   */
101681  #include "sched.h"
101683 -static DEFINE_SPINLOCK(sched_debug_lock);
101686   * This allows printing both to /proc/sched_debug and
101687   * to the console
101688 @@ -470,16 +468,37 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
101689  #endif
101691  #ifdef CONFIG_CGROUP_SCHED
101692 +static DEFINE_SPINLOCK(sched_debug_lock);
101693  static char group_path[PATH_MAX];
101695 -static char *task_group_path(struct task_group *tg)
101696 +static void task_group_path(struct task_group *tg, char *path, int plen)
101698 -       if (autogroup_path(tg, group_path, PATH_MAX))
101699 -               return group_path;
101700 +       if (autogroup_path(tg, path, plen))
101701 +               return;
101703 -       cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
101704 +       cgroup_path(tg->css.cgroup, path, plen);
101707 -       return group_path;
101709 + * Only 1 SEQ_printf_task_group_path() caller can use the full length
101710 + * group_path[] for cgroup path. Other simultaneous callers will have
101711 + * to use a shorter stack buffer. A "..." suffix is appended at the end
101712 + * of the stack buffer so that it will show up in case the output length
101713 + * matches the given buffer size to indicate possible path name truncation.
101714 + */
101715 +#define SEQ_printf_task_group_path(m, tg, fmt...)                      \
101716 +{                                                                      \
101717 +       if (spin_trylock(&sched_debug_lock)) {                          \
101718 +               task_group_path(tg, group_path, sizeof(group_path));    \
101719 +               SEQ_printf(m, fmt, group_path);                         \
101720 +               spin_unlock(&sched_debug_lock);                         \
101721 +       } else {                                                        \
101722 +               char buf[128];                                          \
101723 +               char *bufend = buf + sizeof(buf) - 3;                   \
101724 +               task_group_path(tg, buf, bufend - buf);                 \
101725 +               strcpy(bufend - 1, "...");                              \
101726 +               SEQ_printf(m, fmt, buf);                                \
101727 +       }                                                               \
101729  #endif
101731 @@ -506,7 +525,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
101732         SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
101733  #endif
101734  #ifdef CONFIG_CGROUP_SCHED
101735 -       SEQ_printf(m, " %s", task_group_path(task_group(p)));
101736 +       SEQ_printf_task_group_path(m, task_group(p), " %s")
101737  #endif
101739         SEQ_printf(m, "\n");
101740 @@ -543,7 +562,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
101742  #ifdef CONFIG_FAIR_GROUP_SCHED
101743         SEQ_printf(m, "\n");
101744 -       SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
101745 +       SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
101746  #else
101747         SEQ_printf(m, "\n");
101748         SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
101749 @@ -614,7 +633,7 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
101751  #ifdef CONFIG_RT_GROUP_SCHED
101752         SEQ_printf(m, "\n");
101753 -       SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
101754 +       SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
101755  #else
101756         SEQ_printf(m, "\n");
101757         SEQ_printf(m, "rt_rq[%d]:\n", cpu);
101758 @@ -666,7 +685,6 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
101759  static void print_cpu(struct seq_file *m, int cpu)
101761         struct rq *rq = cpu_rq(cpu);
101762 -       unsigned long flags;
101764  #ifdef CONFIG_X86
101765         {
101766 @@ -717,13 +735,11 @@ do {                                                                      \
101767         }
101768  #undef P
101770 -       spin_lock_irqsave(&sched_debug_lock, flags);
101771         print_cfs_stats(m, cpu);
101772         print_rt_stats(m, cpu);
101773         print_dl_stats(m, cpu);
101775         print_rq(m, rq, cpu);
101776 -       spin_unlock_irqrestore(&sched_debug_lock, flags);
101777         SEQ_printf(m, "\n");
101780 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
101781 index 794c2cb945f8..883b6fe91ca5 100644
101782 --- a/kernel/sched/fair.c
101783 +++ b/kernel/sched/fair.c
101784 @@ -682,7 +682,13 @@ static u64 __sched_period(unsigned long nr_running)
101785   */
101786  static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
101788 -       u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
101789 +       unsigned int nr_running = cfs_rq->nr_running;
101790 +       u64 slice;
101792 +       if (sched_feat(ALT_PERIOD))
101793 +               nr_running = rq_of(cfs_rq)->cfs.h_nr_running;
101795 +       slice = __sched_period(nr_running + !se->on_rq);
101797         for_each_sched_entity(se) {
101798                 struct load_weight *load;
101799 @@ -699,6 +705,10 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
101800                 }
101801                 slice = __calc_delta(slice, se->load.weight, load);
101802         }
101804 +       if (sched_feat(BASE_SLICE))
101805 +               slice = max(slice, (u64)sysctl_sched_min_granularity);
101807         return slice;
101810 @@ -3941,6 +3951,8 @@ static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
101811         trace_sched_util_est_cfs_tp(cfs_rq);
101814 +#define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
101817   * Check if a (signed) value is within a specified (unsigned) margin,
101818   * based on the observation that:
101819 @@ -3958,7 +3970,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
101820                                    struct task_struct *p,
101821                                    bool task_sleep)
101823 -       long last_ewma_diff;
101824 +       long last_ewma_diff, last_enqueued_diff;
101825         struct util_est ue;
101827         if (!sched_feat(UTIL_EST))
101828 @@ -3979,6 +3991,8 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
101829         if (ue.enqueued & UTIL_AVG_UNCHANGED)
101830                 return;
101832 +       last_enqueued_diff = ue.enqueued;
101834         /*
101835          * Reset EWMA on utilization increases, the moving average is used only
101836          * to smooth utilization decreases.
101837 @@ -3992,12 +4006,17 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
101838         }
101840         /*
101841 -        * Skip update of task's estimated utilization when its EWMA is
101842 +        * Skip update of task's estimated utilization when its members are
101843          * already ~1% close to its last activation value.
101844          */
101845         last_ewma_diff = ue.enqueued - ue.ewma;
101846 -       if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100)))
101847 +       last_enqueued_diff -= ue.enqueued;
101848 +       if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) {
101849 +               if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN))
101850 +                       goto done;
101852                 return;
101853 +       }
101855         /*
101856          * To avoid overestimation of actual task utilization, skip updates if
101857 @@ -6098,6 +6117,24 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
101858         return -1;
101862 + * Scan the local SMT mask for idle CPUs.
101863 + */
101864 +static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
101866 +       int cpu;
101868 +       for_each_cpu(cpu, cpu_smt_mask(target)) {
101869 +               if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
101870 +                   !cpumask_test_cpu(cpu, sched_domain_span(sd)))
101871 +                       continue;
101872 +               if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
101873 +                       return cpu;
101874 +       }
101876 +       return -1;
101879  #else /* CONFIG_SCHED_SMT */
101881  static inline void set_idle_cores(int cpu, int val)
101882 @@ -6114,6 +6151,11 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
101883         return __select_idle_cpu(core);
101886 +static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
101888 +       return -1;
101891  #endif /* CONFIG_SCHED_SMT */
101894 @@ -6121,11 +6163,10 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
101895   * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
101896   * average idle time for this rq (as found in rq->avg_idle).
101897   */
101898 -static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
101899 +static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target)
101901         struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
101902         int i, cpu, idle_cpu = -1, nr = INT_MAX;
101903 -       bool smt = test_idle_cores(target, false);
101904         int this = smp_processor_id();
101905         struct sched_domain *this_sd;
101906         u64 time;
101907 @@ -6136,7 +6177,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
101909         cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
101911 -       if (sched_feat(SIS_PROP) && !smt) {
101912 +       if (sched_feat(SIS_PROP) && !has_idle_core) {
101913                 u64 avg_cost, avg_idle, span_avg;
101915                 /*
101916 @@ -6156,7 +6197,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
101917         }
101919         for_each_cpu_wrap(cpu, cpus, target) {
101920 -               if (smt) {
101921 +               if (has_idle_core) {
101922                         i = select_idle_core(p, cpu, cpus, &idle_cpu);
101923                         if ((unsigned int)i < nr_cpumask_bits)
101924                                 return i;
101925 @@ -6170,10 +6211,10 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
101926                 }
101927         }
101929 -       if (smt)
101930 -               set_idle_cores(this, false);
101931 +       if (has_idle_core)
101932 +               set_idle_cores(target, false);
101934 -       if (sched_feat(SIS_PROP) && !smt) {
101935 +       if (sched_feat(SIS_PROP) && !has_idle_core) {
101936                 time = cpu_clock(this) - time;
101937                 update_avg(&this_sd->avg_scan_cost, time);
101938         }
101939 @@ -6228,6 +6269,7 @@ static inline bool asym_fits_capacity(int task_util, int cpu)
101940   */
101941  static int select_idle_sibling(struct task_struct *p, int prev, int target)
101943 +       bool has_idle_core = false;
101944         struct sched_domain *sd;
101945         unsigned long task_util;
101946         int i, recent_used_cpu;
101947 @@ -6307,7 +6349,17 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
101948         if (!sd)
101949                 return target;
101951 -       i = select_idle_cpu(p, sd, target);
101952 +       if (sched_smt_active()) {
101953 +               has_idle_core = test_idle_cores(target, false);
101955 +               if (!has_idle_core && cpus_share_cache(prev, target)) {
101956 +                       i = select_idle_smt(p, sd, prev);
101957 +                       if ((unsigned int)i < nr_cpumask_bits)
101958 +                               return i;
101959 +               }
101960 +       }
101962 +       i = select_idle_cpu(p, sd, has_idle_core, target);
101963         if ((unsigned)i < nr_cpumask_bits)
101964                 return i;
101966 @@ -6455,240 +6507,6 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
101967         return min_t(unsigned long, util, capacity_orig_of(cpu));
101971 - * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
101972 - * to @dst_cpu.
101973 - */
101974 -static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu)
101976 -       struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
101977 -       unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg);
101979 -       /*
101980 -        * If @p migrates from @cpu to another, remove its contribution. Or,
101981 -        * if @p migrates from another CPU to @cpu, add its contribution. In
101982 -        * the other cases, @cpu is not impacted by the migration, so the
101983 -        * util_avg should already be correct.
101984 -        */
101985 -       if (task_cpu(p) == cpu && dst_cpu != cpu)
101986 -               sub_positive(&util, task_util(p));
101987 -       else if (task_cpu(p) != cpu && dst_cpu == cpu)
101988 -               util += task_util(p);
101990 -       if (sched_feat(UTIL_EST)) {
101991 -               util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
101993 -               /*
101994 -                * During wake-up, the task isn't enqueued yet and doesn't
101995 -                * appear in the cfs_rq->avg.util_est.enqueued of any rq,
101996 -                * so just add it (if needed) to "simulate" what will be
101997 -                * cpu_util() after the task has been enqueued.
101998 -                */
101999 -               if (dst_cpu == cpu)
102000 -                       util_est += _task_util_est(p);
102002 -               util = max(util, util_est);
102003 -       }
102005 -       return min(util, capacity_orig_of(cpu));
102009 - * compute_energy(): Estimates the energy that @pd would consume if @p was
102010 - * migrated to @dst_cpu. compute_energy() predicts what will be the utilization
102011 - * landscape of @pd's CPUs after the task migration, and uses the Energy Model
102012 - * to compute what would be the energy if we decided to actually migrate that
102013 - * task.
102014 - */
102015 -static long
102016 -compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
102018 -       struct cpumask *pd_mask = perf_domain_span(pd);
102019 -       unsigned long cpu_cap = arch_scale_cpu_capacity(cpumask_first(pd_mask));
102020 -       unsigned long max_util = 0, sum_util = 0;
102021 -       int cpu;
102023 -       /*
102024 -        * The capacity state of CPUs of the current rd can be driven by CPUs
102025 -        * of another rd if they belong to the same pd. So, account for the
102026 -        * utilization of these CPUs too by masking pd with cpu_online_mask
102027 -        * instead of the rd span.
102028 -        *
102029 -        * If an entire pd is outside of the current rd, it will not appear in
102030 -        * its pd list and will not be accounted by compute_energy().
102031 -        */
102032 -       for_each_cpu_and(cpu, pd_mask, cpu_online_mask) {
102033 -               unsigned long cpu_util, util_cfs = cpu_util_next(cpu, p, dst_cpu);
102034 -               struct task_struct *tsk = cpu == dst_cpu ? p : NULL;
102036 -               /*
102037 -                * Busy time computation: utilization clamping is not
102038 -                * required since the ratio (sum_util / cpu_capacity)
102039 -                * is already enough to scale the EM reported power
102040 -                * consumption at the (eventually clamped) cpu_capacity.
102041 -                */
102042 -               sum_util += effective_cpu_util(cpu, util_cfs, cpu_cap,
102043 -                                              ENERGY_UTIL, NULL);
102045 -               /*
102046 -                * Performance domain frequency: utilization clamping
102047 -                * must be considered since it affects the selection
102048 -                * of the performance domain frequency.
102049 -                * NOTE: in case RT tasks are running, by default the
102050 -                * FREQUENCY_UTIL's utilization can be max OPP.
102051 -                */
102052 -               cpu_util = effective_cpu_util(cpu, util_cfs, cpu_cap,
102053 -                                             FREQUENCY_UTIL, tsk);
102054 -               max_util = max(max_util, cpu_util);
102055 -       }
102057 -       return em_cpu_energy(pd->em_pd, max_util, sum_util);
102061 - * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the
102062 - * waking task. find_energy_efficient_cpu() looks for the CPU with maximum
102063 - * spare capacity in each performance domain and uses it as a potential
102064 - * candidate to execute the task. Then, it uses the Energy Model to figure
102065 - * out which of the CPU candidates is the most energy-efficient.
102067 - * The rationale for this heuristic is as follows. In a performance domain,
102068 - * all the most energy efficient CPU candidates (according to the Energy
102069 - * Model) are those for which we'll request a low frequency. When there are
102070 - * several CPUs for which the frequency request will be the same, we don't
102071 - * have enough data to break the tie between them, because the Energy Model
102072 - * only includes active power costs. With this model, if we assume that
102073 - * frequency requests follow utilization (e.g. using schedutil), the CPU with
102074 - * the maximum spare capacity in a performance domain is guaranteed to be among
102075 - * the best candidates of the performance domain.
102077 - * In practice, it could be preferable from an energy standpoint to pack
102078 - * small tasks on a CPU in order to let other CPUs go in deeper idle states,
102079 - * but that could also hurt our chances to go cluster idle, and we have no
102080 - * ways to tell with the current Energy Model if this is actually a good
102081 - * idea or not. So, find_energy_efficient_cpu() basically favors
102082 - * cluster-packing, and spreading inside a cluster. That should at least be
102083 - * a good thing for latency, and this is consistent with the idea that most
102084 - * of the energy savings of EAS come from the asymmetry of the system, and
102085 - * not so much from breaking the tie between identical CPUs. That's also the
102086 - * reason why EAS is enabled in the topology code only for systems where
102087 - * SD_ASYM_CPUCAPACITY is set.
102089 - * NOTE: Forkees are not accepted in the energy-aware wake-up path because
102090 - * they don't have any useful utilization data yet and it's not possible to
102091 - * forecast their impact on energy consumption. Consequently, they will be
102092 - * placed by find_idlest_cpu() on the least loaded CPU, which might turn out
102093 - * to be energy-inefficient in some use-cases. The alternative would be to
102094 - * bias new tasks towards specific types of CPUs first, or to try to infer
102095 - * their util_avg from the parent task, but those heuristics could hurt
102096 - * other use-cases too. So, until someone finds a better way to solve this,
102097 - * let's keep things simple by re-using the existing slow path.
102098 - */
102099 -static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
102101 -       unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
102102 -       struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
102103 -       unsigned long cpu_cap, util, base_energy = 0;
102104 -       int cpu, best_energy_cpu = prev_cpu;
102105 -       struct sched_domain *sd;
102106 -       struct perf_domain *pd;
102108 -       rcu_read_lock();
102109 -       pd = rcu_dereference(rd->pd);
102110 -       if (!pd || READ_ONCE(rd->overutilized))
102111 -               goto fail;
102113 -       /*
102114 -        * Energy-aware wake-up happens on the lowest sched_domain starting
102115 -        * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
102116 -        */
102117 -       sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity));
102118 -       while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
102119 -               sd = sd->parent;
102120 -       if (!sd)
102121 -               goto fail;
102123 -       sync_entity_load_avg(&p->se);
102124 -       if (!task_util_est(p))
102125 -               goto unlock;
102127 -       for (; pd; pd = pd->next) {
102128 -               unsigned long cur_delta, spare_cap, max_spare_cap = 0;
102129 -               unsigned long base_energy_pd;
102130 -               int max_spare_cap_cpu = -1;
102132 -               /* Compute the 'base' energy of the pd, without @p */
102133 -               base_energy_pd = compute_energy(p, -1, pd);
102134 -               base_energy += base_energy_pd;
102136 -               for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) {
102137 -                       if (!cpumask_test_cpu(cpu, p->cpus_ptr))
102138 -                               continue;
102140 -                       util = cpu_util_next(cpu, p, cpu);
102141 -                       cpu_cap = capacity_of(cpu);
102142 -                       spare_cap = cpu_cap;
102143 -                       lsub_positive(&spare_cap, util);
102145 -                       /*
102146 -                        * Skip CPUs that cannot satisfy the capacity request.
102147 -                        * IOW, placing the task there would make the CPU
102148 -                        * overutilized. Take uclamp into account to see how
102149 -                        * much capacity we can get out of the CPU; this is
102150 -                        * aligned with sched_cpu_util().
102151 -                        */
102152 -                       util = uclamp_rq_util_with(cpu_rq(cpu), util, p);
102153 -                       if (!fits_capacity(util, cpu_cap))
102154 -                               continue;
102156 -                       /* Always use prev_cpu as a candidate. */
102157 -                       if (cpu == prev_cpu) {
102158 -                               prev_delta = compute_energy(p, prev_cpu, pd);
102159 -                               prev_delta -= base_energy_pd;
102160 -                               best_delta = min(best_delta, prev_delta);
102161 -                       }
102163 -                       /*
102164 -                        * Find the CPU with the maximum spare capacity in
102165 -                        * the performance domain
102166 -                        */
102167 -                       if (spare_cap > max_spare_cap) {
102168 -                               max_spare_cap = spare_cap;
102169 -                               max_spare_cap_cpu = cpu;
102170 -                       }
102171 -               }
102173 -               /* Evaluate the energy impact of using this CPU. */
102174 -               if (max_spare_cap_cpu >= 0 && max_spare_cap_cpu != prev_cpu) {
102175 -                       cur_delta = compute_energy(p, max_spare_cap_cpu, pd);
102176 -                       cur_delta -= base_energy_pd;
102177 -                       if (cur_delta < best_delta) {
102178 -                               best_delta = cur_delta;
102179 -                               best_energy_cpu = max_spare_cap_cpu;
102180 -                       }
102181 -               }
102182 -       }
102183 -unlock:
102184 -       rcu_read_unlock();
102186 -       /*
102187 -        * Pick the best CPU if prev_cpu cannot be used, or if it saves at
102188 -        * least 6% of the energy used by prev_cpu.
102189 -        */
102190 -       if (prev_delta == ULONG_MAX)
102191 -               return best_energy_cpu;
102193 -       if ((prev_delta - best_delta) > ((prev_delta + base_energy) >> 4))
102194 -               return best_energy_cpu;
102196 -       return prev_cpu;
102198 -fail:
102199 -       rcu_read_unlock();
102201 -       return -1;
102205   * select_task_rq_fair: Select target runqueue for the waking task in domains
102206   * that have the relevant SD flag set. In practice, this is SD_BALANCE_WAKE,
102207 @@ -6714,14 +6532,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
102209         if (wake_flags & WF_TTWU) {
102210                 record_wakee(p);
102212 -               if (sched_energy_enabled()) {
102213 -                       new_cpu = find_energy_efficient_cpu(p, prev_cpu);
102214 -                       if (new_cpu >= 0)
102215 -                               return new_cpu;
102216 -                       new_cpu = prev_cpu;
102217 -               }
102219                 want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
102220         }
102222 @@ -7539,6 +7349,10 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
102223         if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
102224                 return 0;
102226 +       /* Disregard pcpu kthreads; they are where they need to be. */
102227 +       if (kthread_is_per_cpu(p))
102228 +               return 0;
102230         if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
102231                 int cpu;
102233 @@ -7708,8 +7522,7 @@ static int detach_tasks(struct lb_env *env)
102234                          * scheduler fails to find a good waiting task to
102235                          * migrate.
102236                          */
102238 -                       if ((load >> env->sd->nr_balance_failed) > env->imbalance)
102239 +                       if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance)
102240                                 goto next;
102242                         env->imbalance -= load;
102243 @@ -10844,16 +10657,22 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
102245         struct cfs_rq *cfs_rq;
102247 +       list_add_leaf_cfs_rq(cfs_rq_of(se));
102249         /* Start to propagate at parent */
102250         se = se->parent;
102252         for_each_sched_entity(se) {
102253                 cfs_rq = cfs_rq_of(se);
102255 -               if (cfs_rq_throttled(cfs_rq))
102256 -                       break;
102257 +               if (!cfs_rq_throttled(cfs_rq)){
102258 +                       update_load_avg(cfs_rq, se, UPDATE_TG);
102259 +                       list_add_leaf_cfs_rq(cfs_rq);
102260 +                       continue;
102261 +               }
102263 -               update_load_avg(cfs_rq, se, UPDATE_TG);
102264 +               if (list_add_leaf_cfs_rq(cfs_rq))
102265 +                       break;
102266         }
102268  #else
102269 diff --git a/kernel/sched/features.h b/kernel/sched/features.h
102270 index 1bc2b158fc51..e911111df83a 100644
102271 --- a/kernel/sched/features.h
102272 +++ b/kernel/sched/features.h
102273 @@ -90,3 +90,6 @@ SCHED_FEAT(WA_BIAS, true)
102274   */
102275  SCHED_FEAT(UTIL_EST, true)
102276  SCHED_FEAT(UTIL_EST_FASTUP, true)
102278 +SCHED_FEAT(ALT_PERIOD, true)
102279 +SCHED_FEAT(BASE_SLICE, true)
102280 diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
102281 index 967732c0766c..651218ded981 100644
102282 --- a/kernel/sched/psi.c
102283 +++ b/kernel/sched/psi.c
102284 @@ -711,14 +711,15 @@ static void psi_group_change(struct psi_group *group, int cpu,
102285         for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
102286                 if (!(m & (1 << t)))
102287                         continue;
102288 -               if (groupc->tasks[t] == 0 && !psi_bug) {
102289 +               if (groupc->tasks[t]) {
102290 +                       groupc->tasks[t]--;
102291 +               } else if (!psi_bug) {
102292                         printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
102293                                         cpu, t, groupc->tasks[0],
102294                                         groupc->tasks[1], groupc->tasks[2],
102295                                         groupc->tasks[3], clear, set);
102296                         psi_bug = 1;
102297                 }
102298 -               groupc->tasks[t]--;
102299         }
102301         for (t = 0; set; set &= ~(1 << t), t++)
102302 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
102303 index 10a1522b1e30..e4e4f47cee6a 100644
102304 --- a/kernel/sched/sched.h
102305 +++ b/kernel/sched/sched.h
102306 @@ -204,6 +204,13 @@ static inline void update_avg(u64 *avg, u64 sample)
102307         *avg += diff / 8;
102311 + * Shifting a value by an exponent greater *or equal* to the size of said value
102312 + * is UB; cap at size-1.
102313 + */
102314 +#define shr_bound(val, shift)                                                  \
102315 +       (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
102318   * !! For sched_setattr_nocheck() (kernel) only !!
102319   *
102320 diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
102321 index 09d35044bd88..12f80587e127 100644
102322 --- a/kernel/sched/topology.c
102323 +++ b/kernel/sched/topology.c
102324 @@ -723,35 +723,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
102325         for (tmp = sd; tmp; tmp = tmp->parent)
102326                 numa_distance += !!(tmp->flags & SD_NUMA);
102328 -       /*
102329 -        * FIXME: Diameter >=3 is misrepresented.
102330 -        *
102331 -        * Smallest diameter=3 topology is:
102332 -        *
102333 -        *   node   0   1   2   3
102334 -        *     0:  10  20  30  40
102335 -        *     1:  20  10  20  30
102336 -        *     2:  30  20  10  20
102337 -        *     3:  40  30  20  10
102338 -        *
102339 -        *   0 --- 1 --- 2 --- 3
102340 -        *
102341 -        * NUMA-3       0-3             N/A             N/A             0-3
102342 -        *  groups:     {0-2},{1-3}                                     {1-3},{0-2}
102343 -        *
102344 -        * NUMA-2       0-2             0-3             0-3             1-3
102345 -        *  groups:     {0-1},{1-3}     {0-2},{2-3}     {1-3},{0-1}     {2-3},{0-2}
102346 -        *
102347 -        * NUMA-1       0-1             0-2             1-3             2-3
102348 -        *  groups:     {0},{1}         {1},{2},{0}     {2},{3},{1}     {3},{2}
102349 -        *
102350 -        * NUMA-0       0               1               2               3
102351 -        *
102352 -        * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
102353 -        * group span isn't a subset of the domain span.
102354 -        */
102355 -       WARN_ONCE(numa_distance > 2, "Shortest NUMA path spans too many nodes\n");
102357         sched_domain_debug(sd, cpu);
102359         rq_attach_root(rq, rd);
102360 @@ -982,6 +953,31 @@ static void init_overlap_sched_group(struct sched_domain *sd,
102361         sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
102364 +static struct sched_domain *
102365 +find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling)
102367 +       /*
102368 +        * The proper descendant would be the one whose child won't span out
102369 +        * of sd
102370 +        */
102371 +       while (sibling->child &&
102372 +              !cpumask_subset(sched_domain_span(sibling->child),
102373 +                              sched_domain_span(sd)))
102374 +               sibling = sibling->child;
102376 +       /*
102377 +        * As we are referencing sgc across different topology level, we need
102378 +        * to go down to skip those sched_domains which don't contribute to
102379 +        * scheduling because they will be degenerated in cpu_attach_domain
102380 +        */
102381 +       while (sibling->child &&
102382 +              cpumask_equal(sched_domain_span(sibling->child),
102383 +                            sched_domain_span(sibling)))
102384 +               sibling = sibling->child;
102386 +       return sibling;
102389  static int
102390  build_overlap_sched_groups(struct sched_domain *sd, int cpu)
102392 @@ -1015,6 +1011,41 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
102393                 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
102394                         continue;
102396 +               /*
102397 +                * Usually we build sched_group by sibling's child sched_domain
102398 +                * But for machines whose NUMA diameter are 3 or above, we move
102399 +                * to build sched_group by sibling's proper descendant's child
102400 +                * domain because sibling's child sched_domain will span out of
102401 +                * the sched_domain being built as below.
102402 +                *
102403 +                * Smallest diameter=3 topology is:
102404 +                *
102405 +                *   node   0   1   2   3
102406 +                *     0:  10  20  30  40
102407 +                *     1:  20  10  20  30
102408 +                *     2:  30  20  10  20
102409 +                *     3:  40  30  20  10
102410 +                *
102411 +                *   0 --- 1 --- 2 --- 3
102412 +                *
102413 +                * NUMA-3       0-3             N/A             N/A             0-3
102414 +                *  groups:     {0-2},{1-3}                                     {1-3},{0-2}
102415 +                *
102416 +                * NUMA-2       0-2             0-3             0-3             1-3
102417 +                *  groups:     {0-1},{1-3}     {0-2},{2-3}     {1-3},{0-1}     {2-3},{0-2}
102418 +                *
102419 +                * NUMA-1       0-1             0-2             1-3             2-3
102420 +                *  groups:     {0},{1}         {1},{2},{0}     {2},{3},{1}     {3},{2}
102421 +                *
102422 +                * NUMA-0       0               1               2               3
102423 +                *
102424 +                * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
102425 +                * group span isn't a subset of the domain span.
102426 +                */
102427 +               if (sibling->child &&
102428 +                   !cpumask_subset(sched_domain_span(sibling->child), span))
102429 +                       sibling = find_descended_sibling(sd, sibling);
102431                 sg = build_group_from_child_sched_domain(sibling, cpu);
102432                 if (!sg)
102433                         goto fail;
102434 @@ -1022,7 +1053,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
102435                 sg_span = sched_group_span(sg);
102436                 cpumask_or(covered, covered, sg_span);
102438 -               init_overlap_sched_group(sd, sg);
102439 +               init_overlap_sched_group(sibling, sg);
102441                 if (!first)
102442                         first = sg;
102443 diff --git a/kernel/smp.c b/kernel/smp.c
102444 index aeb0adfa0606..c678589fbb76 100644
102445 --- a/kernel/smp.c
102446 +++ b/kernel/smp.c
102447 @@ -110,7 +110,7 @@ static DEFINE_PER_CPU(void *, cur_csd_info);
102448  static atomic_t csd_bug_count = ATOMIC_INIT(0);
102450  /* Record current CSD work for current CPU, NULL to erase. */
102451 -static void csd_lock_record(call_single_data_t *csd)
102452 +static void csd_lock_record(struct __call_single_data *csd)
102454         if (!csd) {
102455                 smp_mb(); /* NULL cur_csd after unlock. */
102456 @@ -125,7 +125,7 @@ static void csd_lock_record(call_single_data_t *csd)
102457                   /* Or before unlock, as the case may be. */
102460 -static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
102461 +static __always_inline int csd_lock_wait_getcpu(struct __call_single_data *csd)
102463         unsigned int csd_type;
102465 @@ -140,7 +140,7 @@ static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
102466   * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
102467   * so waiting on other types gets much less information.
102468   */
102469 -static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
102470 +static __always_inline bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
102472         int cpu = -1;
102473         int cpux;
102474 @@ -204,7 +204,7 @@ static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 t
102475   * previous function call. For multi-cpu calls its even more interesting
102476   * as we'll have to ensure no other cpu is observing our csd.
102477   */
102478 -static __always_inline void csd_lock_wait(call_single_data_t *csd)
102479 +static __always_inline void csd_lock_wait(struct __call_single_data *csd)
102481         int bug_id = 0;
102482         u64 ts0, ts1;
102483 @@ -219,17 +219,17 @@ static __always_inline void csd_lock_wait(call_single_data_t *csd)
102486  #else
102487 -static void csd_lock_record(call_single_data_t *csd)
102488 +static void csd_lock_record(struct __call_single_data *csd)
102492 -static __always_inline void csd_lock_wait(call_single_data_t *csd)
102493 +static __always_inline void csd_lock_wait(struct __call_single_data *csd)
102495         smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
102497  #endif
102499 -static __always_inline void csd_lock(call_single_data_t *csd)
102500 +static __always_inline void csd_lock(struct __call_single_data *csd)
102502         csd_lock_wait(csd);
102503         csd->node.u_flags |= CSD_FLAG_LOCK;
102504 @@ -242,7 +242,7 @@ static __always_inline void csd_lock(call_single_data_t *csd)
102505         smp_wmb();
102508 -static __always_inline void csd_unlock(call_single_data_t *csd)
102509 +static __always_inline void csd_unlock(struct __call_single_data *csd)
102511         WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
102513 @@ -276,7 +276,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
102514   * for execution on the given CPU. data must already have
102515   * ->func, ->info, and ->flags set.
102516   */
102517 -static int generic_exec_single(int cpu, call_single_data_t *csd)
102518 +static int generic_exec_single(int cpu, struct __call_single_data *csd)
102520         if (cpu == smp_processor_id()) {
102521                 smp_call_func_t func = csd->func;
102522 @@ -542,7 +542,7 @@ EXPORT_SYMBOL(smp_call_function_single);
102523   * NOTE: Be careful, there is unfortunately no current debugging facility to
102524   * validate the correctness of this serialization.
102525   */
102526 -int smp_call_function_single_async(int cpu, call_single_data_t *csd)
102527 +int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
102529         int err = 0;
102531 diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
102532 index 19aa806890d5..1750dfc416d8 100644
102533 --- a/kernel/sys_ni.c
102534 +++ b/kernel/sys_ni.c
102535 @@ -150,6 +150,12 @@ COND_SYSCALL_COMPAT(set_robust_list);
102536  COND_SYSCALL(get_robust_list);
102537  COND_SYSCALL_COMPAT(get_robust_list);
102539 +/* kernel/futex2.c */
102540 +COND_SYSCALL(futex_wait);
102541 +COND_SYSCALL(futex_wake);
102542 +COND_SYSCALL(futex_waitv);
102543 +COND_SYSCALL(futex_requeue);
102545  /* kernel/hrtimer.c */
102547  /* kernel/itimer.c */
102548 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
102549 index 62fbd09b5dc1..caad193c931f 100644
102550 --- a/kernel/sysctl.c
102551 +++ b/kernel/sysctl.c
102552 @@ -120,9 +120,9 @@ static unsigned long long_max = LONG_MAX;
102553  static int one_hundred = 100;
102554  static int two_hundred = 200;
102555  static int one_thousand = 1000;
102556 -#ifdef CONFIG_PRINTK
102557  static int ten_thousand = 10000;
102558 -#endif
102559 +extern int hrtimer_granularity_us;
102560 +extern int hrtimeout_min_us;
102561  #ifdef CONFIG_PERF_EVENTS
102562  static int six_hundred_forty_kb = 640 * 1024;
102563  #endif
102564 @@ -200,6 +200,10 @@ static int min_extfrag_threshold;
102565  static int max_extfrag_threshold = 1000;
102566  #endif
102568 +#ifdef CONFIG_USER_NS
102569 +extern int unprivileged_userns_clone;
102570 +#endif
102572  #endif /* CONFIG_SYSCTL */
102574  #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_SYSCTL)
102575 @@ -1652,6 +1656,24 @@ int proc_do_static_key(struct ctl_table *table, int write,
102578  static struct ctl_table kern_table[] = {
102579 +       {
102580 +               .procname       = "hrtimer_granularity_us",
102581 +               .data           = &hrtimer_granularity_us,
102582 +               .maxlen         = sizeof(int),
102583 +               .mode           = 0644,
102584 +               .proc_handler   = &proc_dointvec_minmax,
102585 +               .extra1         = SYSCTL_ONE,
102586 +               .extra2         = &ten_thousand,
102587 +       },
102588 +       {
102589 +               .procname       = "hrtimeout_min_us",
102590 +               .data           = &hrtimeout_min_us,
102591 +               .maxlen         = sizeof(int),
102592 +               .mode           = 0644,
102593 +               .proc_handler   = &proc_dointvec_minmax,
102594 +               .extra1         = SYSCTL_ONE,
102595 +               .extra2         = &ten_thousand,
102596 +       },
102597         {
102598                 .procname       = "sched_child_runs_first",
102599                 .data           = &sysctl_sched_child_runs_first,
102600 @@ -1902,6 +1924,15 @@ static struct ctl_table kern_table[] = {
102601                 .proc_handler   = proc_dointvec,
102602         },
102603  #endif
102604 +#ifdef CONFIG_USER_NS
102605 +       {
102606 +               .procname       = "unprivileged_userns_clone",
102607 +               .data           = &unprivileged_userns_clone,
102608 +               .maxlen         = sizeof(int),
102609 +               .mode           = 0644,
102610 +               .proc_handler   = proc_dointvec,
102611 +       },
102612 +#endif
102613  #ifdef CONFIG_PROC_SYSCTL
102614         {
102615                 .procname       = "tainted",
102616 @@ -3093,6 +3124,20 @@ static struct ctl_table vm_table[] = {
102617                 .extra2         = SYSCTL_ONE,
102618         },
102619  #endif
102620 +       {
102621 +               .procname       = "clean_low_kbytes",
102622 +               .data           = &sysctl_clean_low_kbytes,
102623 +               .maxlen         = sizeof(sysctl_clean_low_kbytes),
102624 +               .mode           = 0644,
102625 +               .proc_handler   = proc_doulongvec_minmax,
102626 +       },
102627 +       {
102628 +               .procname       = "clean_min_kbytes",
102629 +               .data           = &sysctl_clean_min_kbytes,
102630 +               .maxlen         = sizeof(sysctl_clean_min_kbytes),
102631 +               .mode           = 0644,
102632 +               .proc_handler   = proc_doulongvec_minmax,
102633 +       },
102634         {
102635                 .procname       = "user_reserve_kbytes",
102636                 .data           = &sysctl_user_reserve_kbytes,
102637 diff --git a/kernel/task_work.c b/kernel/task_work.c
102638 index 9cde961875c0..5c8dea45d4f8 100644
102639 --- a/kernel/task_work.c
102640 +++ b/kernel/task_work.c
102641 @@ -57,6 +57,7 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
102643         return 0;
102645 +EXPORT_SYMBOL(task_work_add);
102647  /**
102648   * task_work_cancel - cancel a pending work added by task_work_add()
102649 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
102650 index 4d94e2b5499d..a7924fedf479 100644
102651 --- a/kernel/time/alarmtimer.c
102652 +++ b/kernel/time/alarmtimer.c
102653 @@ -92,7 +92,7 @@ static int alarmtimer_rtc_add_device(struct device *dev,
102654         if (rtcdev)
102655                 return -EBUSY;
102657 -       if (!rtc->ops->set_alarm)
102658 +       if (!test_bit(RTC_FEATURE_ALARM, rtc->features))
102659                 return -1;
102660         if (!device_may_wakeup(rtc->dev.parent))
102661                 return -1;
102662 diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
102663 index f5490222e134..23db3c39e07a 100644
102664 --- a/kernel/time/clockevents.c
102665 +++ b/kernel/time/clockevents.c
102666 @@ -190,8 +190,9 @@ int clockevents_tick_resume(struct clock_event_device *dev)
102668  #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
102670 +int __read_mostly hrtimer_granularity_us = 100;
102671  /* Limit min_delta to a jiffie */
102672 -#define MIN_DELTA_LIMIT                (NSEC_PER_SEC / HZ)
102673 +#define MIN_DELTA_LIMIT                (hrtimer_granularity_us * NSEC_PER_USEC)
102675  /**
102676   * clockevents_increase_min_delta - raise minimum delta of a clock event device
102677 diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
102678 index 5c9d968187ae..7a3d640dc13a 100644
102679 --- a/kernel/time/hrtimer.c
102680 +++ b/kernel/time/hrtimer.c
102681 @@ -2236,3 +2236,113 @@ int __sched schedule_hrtimeout(ktime_t *expires,
102682         return schedule_hrtimeout_range(expires, 0, mode);
102684  EXPORT_SYMBOL_GPL(schedule_hrtimeout);
102687 + * As per schedule_hrtimeout but taskes a millisecond value and returns how
102688 + * many milliseconds are left.
102689 + */
102690 +long __sched schedule_msec_hrtimeout(long timeout)
102692 +       struct hrtimer_sleeper t;
102693 +       int delta, jiffs;
102694 +       ktime_t expires;
102696 +       if (!timeout) {
102697 +               __set_current_state(TASK_RUNNING);
102698 +               return 0;
102699 +       }
102701 +       jiffs = msecs_to_jiffies(timeout);
102702 +       /*
102703 +        * If regular timer resolution is adequate or hrtimer resolution is not
102704 +        * (yet) better than Hz, as would occur during startup, use regular
102705 +        * timers.
102706 +        */
102707 +       if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing)
102708 +               return schedule_timeout(jiffs);
102710 +       delta = (timeout % 1000) * NSEC_PER_MSEC;
102711 +       expires = ktime_set(0, delta);
102713 +       hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
102714 +       hrtimer_set_expires_range_ns(&t.timer, expires, delta);
102716 +       hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL);
102718 +       if (likely(t.task))
102719 +               schedule();
102721 +       hrtimer_cancel(&t.timer);
102722 +       destroy_hrtimer_on_stack(&t.timer);
102724 +       __set_current_state(TASK_RUNNING);
102726 +       expires = hrtimer_expires_remaining(&t.timer);
102727 +       timeout = ktime_to_ms(expires);
102728 +       return timeout < 0 ? 0 : timeout;
102731 +EXPORT_SYMBOL(schedule_msec_hrtimeout);
102733 +#define USECS_PER_SEC 1000000
102734 +extern int hrtimer_granularity_us;
102736 +static inline long schedule_usec_hrtimeout(long timeout)
102738 +       struct hrtimer_sleeper t;
102739 +       ktime_t expires;
102740 +       int delta;
102742 +       if (!timeout) {
102743 +               __set_current_state(TASK_RUNNING);
102744 +               return 0;
102745 +       }
102747 +       if (hrtimer_resolution >= NSEC_PER_SEC / HZ)
102748 +               return schedule_timeout(usecs_to_jiffies(timeout));
102750 +       if (timeout < hrtimer_granularity_us)
102751 +               timeout = hrtimer_granularity_us;
102752 +       delta = (timeout % USECS_PER_SEC) * NSEC_PER_USEC;
102753 +       expires = ktime_set(0, delta);
102755 +       hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
102756 +       hrtimer_set_expires_range_ns(&t.timer, expires, delta);
102758 +       hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL);
102760 +       if (likely(t.task))
102761 +               schedule();
102763 +       hrtimer_cancel(&t.timer);
102764 +       destroy_hrtimer_on_stack(&t.timer);
102766 +       __set_current_state(TASK_RUNNING);
102768 +       expires = hrtimer_expires_remaining(&t.timer);
102769 +       timeout = ktime_to_us(expires);
102770 +       return timeout < 0 ? 0 : timeout;
102773 +int __read_mostly hrtimeout_min_us = 500;
102775 +long __sched schedule_min_hrtimeout(void)
102777 +       return usecs_to_jiffies(schedule_usec_hrtimeout(hrtimeout_min_us));
102780 +EXPORT_SYMBOL(schedule_min_hrtimeout);
102782 +long __sched schedule_msec_hrtimeout_interruptible(long timeout)
102784 +       __set_current_state(TASK_INTERRUPTIBLE);
102785 +       return schedule_msec_hrtimeout(timeout);
102787 +EXPORT_SYMBOL(schedule_msec_hrtimeout_interruptible);
102789 +long __sched schedule_msec_hrtimeout_uninterruptible(long timeout)
102791 +       __set_current_state(TASK_UNINTERRUPTIBLE);
102792 +       return schedule_msec_hrtimeout(timeout);
102794 +EXPORT_SYMBOL(schedule_msec_hrtimeout_uninterruptible);
102795 diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
102796 index bf540f5a4115..dd5697d7347b 100644
102797 --- a/kernel/time/posix-timers.c
102798 +++ b/kernel/time/posix-timers.c
102799 @@ -1191,8 +1191,8 @@ SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock,
102801         err = do_clock_adjtime(which_clock, &ktx);
102803 -       if (err >= 0)
102804 -               err = put_old_timex32(utp, &ktx);
102805 +       if (err >= 0 && put_old_timex32(utp, &ktx))
102806 +               return -EFAULT;
102808         return err;
102810 diff --git a/kernel/time/timer.c b/kernel/time/timer.c
102811 index f475f1a027c8..8d82fe9f6fbb 100644
102812 --- a/kernel/time/timer.c
102813 +++ b/kernel/time/timer.c
102814 @@ -44,6 +44,7 @@
102815  #include <linux/slab.h>
102816  #include <linux/compat.h>
102817  #include <linux/random.h>
102818 +#include <linux/freezer.h>
102820  #include <linux/uaccess.h>
102821  #include <asm/unistd.h>
102822 @@ -1886,6 +1887,18 @@ signed long __sched schedule_timeout(signed long timeout)
102824         expire = timeout + jiffies;
102826 +#ifdef CONFIG_HIGH_RES_TIMERS
102827 +       if (timeout == 1 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
102828 +               /*
102829 +                * Special case 1 as being a request for the minimum timeout
102830 +                * and use highres timers to timeout after 1ms to workaround
102831 +                * the granularity of low Hz tick timers.
102832 +                */
102833 +               if (!schedule_min_hrtimeout())
102834 +                       return 0;
102835 +               goto out_timeout;
102836 +       }
102837 +#endif
102838         timer.task = current;
102839         timer_setup_on_stack(&timer.timer, process_timeout, 0);
102840         __mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING);
102841 @@ -1894,10 +1907,10 @@ signed long __sched schedule_timeout(signed long timeout)
102843         /* Remove the timer from the object tracker */
102844         destroy_timer_on_stack(&timer.timer);
102846 +out_timeout:
102847         timeout = expire - jiffies;
102849 - out:
102850 +out:
102851         return timeout < 0 ? 0 : timeout;
102853  EXPORT_SYMBOL(schedule_timeout);
102854 @@ -2040,7 +2053,19 @@ void __init init_timers(void)
102855   */
102856  void msleep(unsigned int msecs)
102858 -       unsigned long timeout = msecs_to_jiffies(msecs) + 1;
102859 +       int jiffs = msecs_to_jiffies(msecs);
102860 +       unsigned long timeout;
102862 +       /*
102863 +        * Use high resolution timers where the resolution of tick based
102864 +        * timers is inadequate.
102865 +        */
102866 +       if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
102867 +               while (msecs)
102868 +                       msecs = schedule_msec_hrtimeout_uninterruptible(msecs);
102869 +               return;
102870 +       }
102871 +       timeout = jiffs + 1;
102873         while (timeout)
102874                 timeout = schedule_timeout_uninterruptible(timeout);
102875 @@ -2054,7 +2079,15 @@ EXPORT_SYMBOL(msleep);
102876   */
102877  unsigned long msleep_interruptible(unsigned int msecs)
102879 -       unsigned long timeout = msecs_to_jiffies(msecs) + 1;
102880 +       int jiffs = msecs_to_jiffies(msecs);
102881 +       unsigned long timeout;
102883 +       if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
102884 +               while (msecs && !signal_pending(current))
102885 +                       msecs = schedule_msec_hrtimeout_interruptible(msecs);
102886 +               return msecs;
102887 +       }
102888 +       timeout = jiffs + 1;
102890         while (timeout && !signal_pending(current))
102891                 timeout = schedule_timeout_interruptible(timeout);
102892 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
102893 index 3ba52d4e1314..826b88b727a6 100644
102894 --- a/kernel/trace/ftrace.c
102895 +++ b/kernel/trace/ftrace.c
102896 @@ -5631,7 +5631,10 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
102898         parser = &iter->parser;
102899         if (trace_parser_loaded(parser)) {
102900 -               ftrace_match_records(iter->hash, parser->buffer, parser->idx);
102901 +               int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
102903 +               ftrace_process_regex(iter, parser->buffer,
102904 +                                    parser->idx, enable);
102905         }
102907         trace_parser_put(parser);
102908 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
102909 index c0c9aa5cd8e2..67c01dc5cdeb 100644
102910 --- a/kernel/trace/trace.c
102911 +++ b/kernel/trace/trace.c
102912 @@ -2390,14 +2390,13 @@ static void tracing_stop_tr(struct trace_array *tr)
102914  static int trace_save_cmdline(struct task_struct *tsk)
102916 -       unsigned pid, idx;
102917 +       unsigned tpid, idx;
102919         /* treat recording of idle task as a success */
102920         if (!tsk->pid)
102921                 return 1;
102923 -       if (unlikely(tsk->pid > PID_MAX_DEFAULT))
102924 -               return 0;
102925 +       tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
102927         /*
102928          * It's not the end of the world if we don't get
102929 @@ -2408,26 +2407,15 @@ static int trace_save_cmdline(struct task_struct *tsk)
102930         if (!arch_spin_trylock(&trace_cmdline_lock))
102931                 return 0;
102933 -       idx = savedcmd->map_pid_to_cmdline[tsk->pid];
102934 +       idx = savedcmd->map_pid_to_cmdline[tpid];
102935         if (idx == NO_CMDLINE_MAP) {
102936                 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
102938 -               /*
102939 -                * Check whether the cmdline buffer at idx has a pid
102940 -                * mapped. We are going to overwrite that entry so we
102941 -                * need to clear the map_pid_to_cmdline. Otherwise we
102942 -                * would read the new comm for the old pid.
102943 -                */
102944 -               pid = savedcmd->map_cmdline_to_pid[idx];
102945 -               if (pid != NO_CMDLINE_MAP)
102946 -                       savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
102948 -               savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
102949 -               savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
102951 +               savedcmd->map_pid_to_cmdline[tpid] = idx;
102952                 savedcmd->cmdline_idx = idx;
102953         }
102955 +       savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
102956         set_cmdline(idx, tsk->comm);
102958         arch_spin_unlock(&trace_cmdline_lock);
102959 @@ -2438,6 +2426,7 @@ static int trace_save_cmdline(struct task_struct *tsk)
102960  static void __trace_find_cmdline(int pid, char comm[])
102962         unsigned map;
102963 +       int tpid;
102965         if (!pid) {
102966                 strcpy(comm, "<idle>");
102967 @@ -2449,16 +2438,16 @@ static void __trace_find_cmdline(int pid, char comm[])
102968                 return;
102969         }
102971 -       if (pid > PID_MAX_DEFAULT) {
102972 -               strcpy(comm, "<...>");
102973 -               return;
102974 +       tpid = pid & (PID_MAX_DEFAULT - 1);
102975 +       map = savedcmd->map_pid_to_cmdline[tpid];
102976 +       if (map != NO_CMDLINE_MAP) {
102977 +               tpid = savedcmd->map_cmdline_to_pid[map];
102978 +               if (tpid == pid) {
102979 +                       strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
102980 +                       return;
102981 +               }
102982         }
102984 -       map = savedcmd->map_pid_to_cmdline[pid];
102985 -       if (map != NO_CMDLINE_MAP)
102986 -               strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
102987 -       else
102988 -               strcpy(comm, "<...>");
102989 +       strcpy(comm, "<...>");
102992  void trace_find_cmdline(int pid, char comm[])
102993 diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
102994 index aaf6793ededa..c1637f90c8a3 100644
102995 --- a/kernel/trace/trace_clock.c
102996 +++ b/kernel/trace/trace_clock.c
102997 @@ -95,33 +95,49 @@ u64 notrace trace_clock_global(void)
102999         unsigned long flags;
103000         int this_cpu;
103001 -       u64 now;
103002 +       u64 now, prev_time;
103004         raw_local_irq_save(flags);
103006         this_cpu = raw_smp_processor_id();
103007 -       now = sched_clock_cpu(this_cpu);
103009         /*
103010 -        * If in an NMI context then dont risk lockups and return the
103011 -        * cpu_clock() time:
103012 +        * The global clock "guarantees" that the events are ordered
103013 +        * between CPUs. But if two events on two different CPUS call
103014 +        * trace_clock_global at roughly the same time, it really does
103015 +        * not matter which one gets the earlier time. Just make sure
103016 +        * that the same CPU will always show a monotonic clock.
103017 +        *
103018 +        * Use a read memory barrier to get the latest written
103019 +        * time that was recorded.
103020          */
103021 -       if (unlikely(in_nmi()))
103022 -               goto out;
103023 +       smp_rmb();
103024 +       prev_time = READ_ONCE(trace_clock_struct.prev_time);
103025 +       now = sched_clock_cpu(this_cpu);
103027 -       arch_spin_lock(&trace_clock_struct.lock);
103028 +       /* Make sure that now is always greater than prev_time */
103029 +       if ((s64)(now - prev_time) < 0)
103030 +               now = prev_time + 1;
103032         /*
103033 -        * TODO: if this happens often then maybe we should reset
103034 -        * my_scd->clock to prev_time+1, to make sure
103035 -        * we start ticking with the local clock from now on?
103036 +        * If in an NMI context then dont risk lockups and simply return
103037 +        * the current time.
103038          */
103039 -       if ((s64)(now - trace_clock_struct.prev_time) < 0)
103040 -               now = trace_clock_struct.prev_time + 1;
103041 +       if (unlikely(in_nmi()))
103042 +               goto out;
103044 -       trace_clock_struct.prev_time = now;
103045 +       /* Tracing can cause strange recursion, always use a try lock */
103046 +       if (arch_spin_trylock(&trace_clock_struct.lock)) {
103047 +               /* Reread prev_time in case it was already updated */
103048 +               prev_time = READ_ONCE(trace_clock_struct.prev_time);
103049 +               if ((s64)(now - prev_time) < 0)
103050 +                       now = prev_time + 1;
103052 -       arch_spin_unlock(&trace_clock_struct.lock);
103053 +               trace_clock_struct.prev_time = now;
103055 +               /* The unlock acts as the wmb for the above rmb */
103056 +               arch_spin_unlock(&trace_clock_struct.lock);
103057 +       }
103058   out:
103059         raw_local_irq_restore(flags);
103061 diff --git a/kernel/up.c b/kernel/up.c
103062 index c6f323dcd45b..4edd5493eba2 100644
103063 --- a/kernel/up.c
103064 +++ b/kernel/up.c
103065 @@ -25,7 +25,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
103067  EXPORT_SYMBOL(smp_call_function_single);
103069 -int smp_call_function_single_async(int cpu, call_single_data_t *csd)
103070 +int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
103072         unsigned long flags;
103074 diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
103075 index 9a4b980d695b..0475d15b1c66 100644
103076 --- a/kernel/user_namespace.c
103077 +++ b/kernel/user_namespace.c
103078 @@ -21,6 +21,9 @@
103079  #include <linux/bsearch.h>
103080  #include <linux/sort.h>
103082 +/* sysctl */
103083 +int unprivileged_userns_clone = 1;
103085  static struct kmem_cache *user_ns_cachep __read_mostly;
103086  static DEFINE_MUTEX(userns_state_mutex);
103088 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
103089 index 107bc38b1945..8cf0678378d2 100644
103090 --- a/kernel/watchdog.c
103091 +++ b/kernel/watchdog.c
103092 @@ -154,7 +154,11 @@ static void lockup_detector_update_enable(void)
103094  #ifdef CONFIG_SOFTLOCKUP_DETECTOR
103096 -#define SOFTLOCKUP_RESET       ULONG_MAX
103098 + * Delay the soflockup report when running a known slow code.
103099 + * It does _not_ affect the timestamp of the last successdul reschedule.
103100 + */
103101 +#define SOFTLOCKUP_DELAY_REPORT        ULONG_MAX
103103  #ifdef CONFIG_SMP
103104  int __read_mostly sysctl_softlockup_all_cpu_backtrace;
103105 @@ -169,10 +173,12 @@ unsigned int __read_mostly softlockup_panic =
103106  static bool softlockup_initialized __read_mostly;
103107  static u64 __read_mostly sample_period;
103109 +/* Timestamp taken after the last successful reschedule. */
103110  static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
103111 +/* Timestamp of the last softlockup report. */
103112 +static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
103113  static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
103114  static DEFINE_PER_CPU(bool, softlockup_touch_sync);
103115 -static DEFINE_PER_CPU(bool, soft_watchdog_warn);
103116  static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
103117  static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
103118  static unsigned long soft_lockup_nmi_warn;
103119 @@ -235,10 +241,16 @@ static void set_sample_period(void)
103120         watchdog_update_hrtimer_threshold(sample_period);
103123 +static void update_report_ts(void)
103125 +       __this_cpu_write(watchdog_report_ts, get_timestamp());
103128  /* Commands for resetting the watchdog */
103129 -static void __touch_watchdog(void)
103130 +static void update_touch_ts(void)
103132         __this_cpu_write(watchdog_touch_ts, get_timestamp());
103133 +       update_report_ts();
103136  /**
103137 @@ -252,10 +264,10 @@ static void __touch_watchdog(void)
103138  notrace void touch_softlockup_watchdog_sched(void)
103140         /*
103141 -        * Preemption can be enabled.  It doesn't matter which CPU's timestamp
103142 -        * gets zeroed here, so use the raw_ operation.
103143 +        * Preemption can be enabled.  It doesn't matter which CPU's watchdog
103144 +        * report period gets restarted here, so use the raw_ operation.
103145          */
103146 -       raw_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
103147 +       raw_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
103150  notrace void touch_softlockup_watchdog(void)
103151 @@ -279,7 +291,7 @@ void touch_all_softlockup_watchdogs(void)
103152          * the softlockup check.
103153          */
103154         for_each_cpu(cpu, &watchdog_allowed_mask) {
103155 -               per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
103156 +               per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT;
103157                 wq_watchdog_touch(cpu);
103158         }
103160 @@ -287,16 +299,16 @@ void touch_all_softlockup_watchdogs(void)
103161  void touch_softlockup_watchdog_sync(void)
103163         __this_cpu_write(softlockup_touch_sync, true);
103164 -       __this_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
103165 +       __this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
103168 -static int is_softlockup(unsigned long touch_ts)
103169 +static int is_softlockup(unsigned long touch_ts, unsigned long period_ts)
103171         unsigned long now = get_timestamp();
103173         if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
103174                 /* Warn about unreasonable delays. */
103175 -               if (time_after(now, touch_ts + get_softlockup_thresh()))
103176 +               if (time_after(now, period_ts + get_softlockup_thresh()))
103177                         return now - touch_ts;
103178         }
103179         return 0;
103180 @@ -332,7 +344,7 @@ static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
103181   */
103182  static int softlockup_fn(void *data)
103184 -       __touch_watchdog();
103185 +       update_touch_ts();
103186         complete(this_cpu_ptr(&softlockup_completion));
103188         return 0;
103189 @@ -342,6 +354,7 @@ static int softlockup_fn(void *data)
103190  static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
103192         unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
103193 +       unsigned long period_ts = __this_cpu_read(watchdog_report_ts);
103194         struct pt_regs *regs = get_irq_regs();
103195         int duration;
103196         int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
103197 @@ -363,7 +376,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
103198         /* .. and repeat */
103199         hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
103201 -       if (touch_ts == SOFTLOCKUP_RESET) {
103202 +       /* Reset the interval when touched externally by a known slow code. */
103203 +       if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
103204                 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
103205                         /*
103206                          * If the time stamp was touched atomically
103207 @@ -375,7 +389,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
103209                 /* Clear the guest paused flag on watchdog reset */
103210                 kvm_check_and_clear_guest_paused();
103211 -               __touch_watchdog();
103212 +               update_report_ts();
103214                 return HRTIMER_RESTART;
103215         }
103217 @@ -385,7 +400,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
103218          * indicate it is getting cpu time.  If it hasn't then
103219          * this is a good indication some task is hogging the cpu
103220          */
103221 -       duration = is_softlockup(touch_ts);
103222 +       duration = is_softlockup(touch_ts, period_ts);
103223         if (unlikely(duration)) {
103224                 /*
103225                  * If a virtual machine is stopped by the host it can look to
103226 @@ -395,21 +410,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
103227                 if (kvm_check_and_clear_guest_paused())
103228                         return HRTIMER_RESTART;
103230 -               /* only warn once */
103231 -               if (__this_cpu_read(soft_watchdog_warn) == true)
103232 -                       return HRTIMER_RESTART;
103234 +               /*
103235 +                * Prevent multiple soft-lockup reports if one cpu is already
103236 +                * engaged in dumping all cpu back traces.
103237 +                */
103238                 if (softlockup_all_cpu_backtrace) {
103239 -                       /* Prevent multiple soft-lockup reports if one cpu is already
103240 -                        * engaged in dumping cpu back traces
103241 -                        */
103242 -                       if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
103243 -                               /* Someone else will report us. Let's give up */
103244 -                               __this_cpu_write(soft_watchdog_warn, true);
103245 +                       if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
103246                                 return HRTIMER_RESTART;
103247 -                       }
103248                 }
103250 +               /* Start period for the next softlockup warning. */
103251 +               update_report_ts();
103253                 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
103254                         smp_processor_id(), duration,
103255                         current->comm, task_pid_nr(current));
103256 @@ -421,22 +433,14 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
103257                         dump_stack();
103259                 if (softlockup_all_cpu_backtrace) {
103260 -                       /* Avoid generating two back traces for current
103261 -                        * given that one is already made above
103262 -                        */
103263                         trigger_allbutself_cpu_backtrace();
103265 -                       clear_bit(0, &soft_lockup_nmi_warn);
103266 -                       /* Barrier to sync with other cpus */
103267 -                       smp_mb__after_atomic();
103268 +                       clear_bit_unlock(0, &soft_lockup_nmi_warn);
103269                 }
103271                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
103272                 if (softlockup_panic)
103273                         panic("softlockup: hung tasks");
103274 -               __this_cpu_write(soft_watchdog_warn, true);
103275 -       } else
103276 -               __this_cpu_write(soft_watchdog_warn, false);
103277 +       }
103279         return HRTIMER_RESTART;
103281 @@ -461,7 +465,7 @@ static void watchdog_enable(unsigned int cpu)
103282                       HRTIMER_MODE_REL_PINNED_HARD);
103284         /* Initialize timestamp */
103285 -       __touch_watchdog();
103286 +       update_touch_ts();
103287         /* Enable the perf event */
103288         if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
103289                 watchdog_nmi_enable(cpu);
103290 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
103291 index 417c3d3e521b..03d75fe17edf 100644
103292 --- a/lib/Kconfig.debug
103293 +++ b/lib/Kconfig.debug
103294 @@ -179,7 +179,7 @@ config DYNAMIC_DEBUG_CORE
103296  config SYMBOLIC_ERRNAME
103297         bool "Support symbolic error names in printf"
103298 -       default y if PRINTK
103299 +       default n
103300         help
103301           If you say Y here, the kernel's printf implementation will
103302           be able to print symbolic error names such as ENOSPC instead
103303 @@ -189,7 +189,7 @@ config SYMBOLIC_ERRNAME
103304  config DEBUG_BUGVERBOSE
103305         bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
103306         depends on BUG && (GENERIC_BUG || HAVE_DEBUG_BUGVERBOSE)
103307 -       default y
103308 +       default n
103309         help
103310           Say Y here to make BUG() panics output the file name and line number
103311           of the BUG call as well as the EIP and oops trace.  This aids
103312 diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence
103313 index 78f50ccb3b45..e641add33947 100644
103314 --- a/lib/Kconfig.kfence
103315 +++ b/lib/Kconfig.kfence
103316 @@ -7,6 +7,7 @@ menuconfig KFENCE
103317         bool "KFENCE: low-overhead sampling-based memory safety error detector"
103318         depends on HAVE_ARCH_KFENCE && (SLAB || SLUB)
103319         select STACKTRACE
103320 +       select IRQ_WORK
103321         help
103322           KFENCE is a low-overhead sampling-based detector of heap out-of-bounds
103323           access, use-after-free, and invalid-free errors. KFENCE is designed
103324 diff --git a/lib/bug.c b/lib/bug.c
103325 index 8f9d537bfb2a..b92da1f6e21b 100644
103326 --- a/lib/bug.c
103327 +++ b/lib/bug.c
103328 @@ -155,30 +155,27 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
103330         file = NULL;
103331         line = 0;
103332 -       warning = 0;
103334 -       if (bug) {
103335  #ifdef CONFIG_DEBUG_BUGVERBOSE
103336  #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
103337 -               file = bug->file;
103338 +       file = bug->file;
103339  #else
103340 -               file = (const char *)bug + bug->file_disp;
103341 +       file = (const char *)bug + bug->file_disp;
103342  #endif
103343 -               line = bug->line;
103344 +       line = bug->line;
103345  #endif
103346 -               warning = (bug->flags & BUGFLAG_WARNING) != 0;
103347 -               once = (bug->flags & BUGFLAG_ONCE) != 0;
103348 -               done = (bug->flags & BUGFLAG_DONE) != 0;
103350 -               if (warning && once) {
103351 -                       if (done)
103352 -                               return BUG_TRAP_TYPE_WARN;
103354 -                       /*
103355 -                        * Since this is the only store, concurrency is not an issue.
103356 -                        */
103357 -                       bug->flags |= BUGFLAG_DONE;
103358 -               }
103359 +       warning = (bug->flags & BUGFLAG_WARNING) != 0;
103360 +       once = (bug->flags & BUGFLAG_ONCE) != 0;
103361 +       done = (bug->flags & BUGFLAG_DONE) != 0;
103363 +       if (warning && once) {
103364 +               if (done)
103365 +                       return BUG_TRAP_TYPE_WARN;
103367 +               /*
103368 +                * Since this is the only store, concurrency is not an issue.
103369 +                */
103370 +               bug->flags |= BUGFLAG_DONE;
103371         }
103373         /*
103374 diff --git a/lib/crypto/poly1305-donna32.c b/lib/crypto/poly1305-donna32.c
103375 index 3cc77d94390b..7fb71845cc84 100644
103376 --- a/lib/crypto/poly1305-donna32.c
103377 +++ b/lib/crypto/poly1305-donna32.c
103378 @@ -10,7 +10,8 @@
103379  #include <asm/unaligned.h>
103380  #include <crypto/internal/poly1305.h>
103382 -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
103383 +void poly1305_core_setkey(struct poly1305_core_key *key,
103384 +                         const u8 raw_key[POLY1305_BLOCK_SIZE])
103386         /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
103387         key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff;
103388 diff --git a/lib/crypto/poly1305-donna64.c b/lib/crypto/poly1305-donna64.c
103389 index 6ae181bb4345..d34cf4053668 100644
103390 --- a/lib/crypto/poly1305-donna64.c
103391 +++ b/lib/crypto/poly1305-donna64.c
103392 @@ -12,7 +12,8 @@
103394  typedef __uint128_t u128;
103396 -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
103397 +void poly1305_core_setkey(struct poly1305_core_key *key,
103398 +                         const u8 raw_key[POLY1305_BLOCK_SIZE])
103400         u64 t0, t1;
103402 diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c
103403 index 9d2d14df0fee..26d87fc3823e 100644
103404 --- a/lib/crypto/poly1305.c
103405 +++ b/lib/crypto/poly1305.c
103406 @@ -12,7 +12,8 @@
103407  #include <linux/module.h>
103408  #include <asm/unaligned.h>
103410 -void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key)
103411 +void poly1305_init_generic(struct poly1305_desc_ctx *desc,
103412 +                          const u8 key[POLY1305_KEY_SIZE])
103414         poly1305_core_setkey(&desc->core_r, key);
103415         desc->s[0] = get_unaligned_le32(key + 16);
103416 diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c
103417 index 790abc472f5b..6e5ecfba0a8d 100644
103418 --- a/lib/decompress_unzstd.c
103419 +++ b/lib/decompress_unzstd.c
103420 @@ -68,11 +68,7 @@
103421  #ifdef STATIC
103422  # define UNZSTD_PREBOOT
103423  # include "xxhash.c"
103424 -# include "zstd/entropy_common.c"
103425 -# include "zstd/fse_decompress.c"
103426 -# include "zstd/huf_decompress.c"
103427 -# include "zstd/zstd_common.c"
103428 -# include "zstd/decompress.c"
103429 +# include "zstd/decompress_sources.h"
103430  #endif
103432  #include <linux/decompress/mm.h>
103433 @@ -91,11 +87,15 @@
103435  static int INIT handle_zstd_error(size_t ret, void (*error)(char *x))
103437 -       const int err = ZSTD_getErrorCode(ret);
103438 +       const zstd_error_code err = zstd_get_error_code(ret);
103440 -       if (!ZSTD_isError(ret))
103441 +       if (!zstd_is_error(ret))
103442                 return 0;
103444 +       /*
103445 +        * zstd_get_error_name() cannot be used because error takes a char *
103446 +        * not a const char *
103447 +        */
103448         switch (err) {
103449         case ZSTD_error_memory_allocation:
103450                 error("ZSTD decompressor ran out of memory");
103451 @@ -124,28 +124,28 @@ static int INIT decompress_single(const u8 *in_buf, long in_len, u8 *out_buf,
103452                                   long out_len, long *in_pos,
103453                                   void (*error)(char *x))
103455 -       const size_t wksp_size = ZSTD_DCtxWorkspaceBound();
103456 +       const size_t wksp_size = zstd_dctx_workspace_bound();
103457         void *wksp = large_malloc(wksp_size);
103458 -       ZSTD_DCtx *dctx = ZSTD_initDCtx(wksp, wksp_size);
103459 +       zstd_dctx *dctx = zstd_init_dctx(wksp, wksp_size);
103460         int err;
103461         size_t ret;
103463         if (dctx == NULL) {
103464 -               error("Out of memory while allocating ZSTD_DCtx");
103465 +               error("Out of memory while allocating zstd_dctx");
103466                 err = -1;
103467                 goto out;
103468         }
103469         /*
103470          * Find out how large the frame actually is, there may be junk at
103471 -        * the end of the frame that ZSTD_decompressDCtx() can't handle.
103472 +        * the end of the frame that zstd_decompress_dctx() can't handle.
103473          */
103474 -       ret = ZSTD_findFrameCompressedSize(in_buf, in_len);
103475 +       ret = zstd_find_frame_compressed_size(in_buf, in_len);
103476         err = handle_zstd_error(ret, error);
103477         if (err)
103478                 goto out;
103479         in_len = (long)ret;
103481 -       ret = ZSTD_decompressDCtx(dctx, out_buf, out_len, in_buf, in_len);
103482 +       ret = zstd_decompress_dctx(dctx, out_buf, out_len, in_buf, in_len);
103483         err = handle_zstd_error(ret, error);
103484         if (err)
103485                 goto out;
103486 @@ -167,14 +167,14 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
103487                          long *in_pos,
103488                          void (*error)(char *x))
103490 -       ZSTD_inBuffer in;
103491 -       ZSTD_outBuffer out;
103492 -       ZSTD_frameParams params;
103493 +       zstd_in_buffer in;
103494 +       zstd_out_buffer out;
103495 +       zstd_frame_header header;
103496         void *in_allocated = NULL;
103497         void *out_allocated = NULL;
103498         void *wksp = NULL;
103499         size_t wksp_size;
103500 -       ZSTD_DStream *dstream;
103501 +       zstd_dstream *dstream;
103502         int err;
103503         size_t ret;
103505 @@ -238,13 +238,13 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
103506         out.size = out_len;
103508         /*
103509 -        * We need to know the window size to allocate the ZSTD_DStream.
103510 +        * We need to know the window size to allocate the zstd_dstream.
103511          * Since we are streaming, we need to allocate a buffer for the sliding
103512          * window. The window size varies from 1 KB to ZSTD_WINDOWSIZE_MAX
103513          * (8 MB), so it is important to use the actual value so as not to
103514          * waste memory when it is smaller.
103515          */
103516 -       ret = ZSTD_getFrameParams(&params, in.src, in.size);
103517 +       ret = zstd_get_frame_header(&header, in.src, in.size);
103518         err = handle_zstd_error(ret, error);
103519         if (err)
103520                 goto out;
103521 @@ -253,19 +253,19 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
103522                 err = -1;
103523                 goto out;
103524         }
103525 -       if (params.windowSize > ZSTD_WINDOWSIZE_MAX) {
103526 +       if (header.windowSize > ZSTD_WINDOWSIZE_MAX) {
103527                 error("ZSTD-compressed data has too large a window size");
103528                 err = -1;
103529                 goto out;
103530         }
103532         /*
103533 -        * Allocate the ZSTD_DStream now that we know how much memory is
103534 +        * Allocate the zstd_dstream now that we know how much memory is
103535          * required.
103536          */
103537 -       wksp_size = ZSTD_DStreamWorkspaceBound(params.windowSize);
103538 +       wksp_size = zstd_dstream_workspace_bound(header.windowSize);
103539         wksp = large_malloc(wksp_size);
103540 -       dstream = ZSTD_initDStream(params.windowSize, wksp, wksp_size);
103541 +       dstream = zstd_init_dstream(header.windowSize, wksp, wksp_size);
103542         if (dstream == NULL) {
103543                 error("Out of memory while allocating ZSTD_DStream");
103544                 err = -1;
103545 @@ -298,7 +298,7 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
103546                         in.size = in_len;
103547                 }
103548                 /* Returns zero when the frame is complete. */
103549 -               ret = ZSTD_decompressStream(dstream, &out, &in);
103550 +               ret = zstd_decompress_stream(dstream, &out, &in);
103551                 err = handle_zstd_error(ret, error);
103552                 if (err)
103553                         goto out;
103554 diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
103555 index c70d6347afa2..921d0a654243 100644
103556 --- a/lib/dynamic_debug.c
103557 +++ b/lib/dynamic_debug.c
103558 @@ -396,7 +396,7 @@ static int ddebug_parse_query(char *words[], int nwords,
103559                         /* tail :$info is function or line-range */
103560                         fline = strchr(query->filename, ':');
103561                         if (!fline)
103562 -                               break;
103563 +                               continue;
103564                         *fline++ = '\0';
103565                         if (isalpha(*fline) || *fline == '*' || *fline == '?') {
103566                                 /* take as function name */
103567 diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
103568 index 7998affa45d4..c87d5b6a8a55 100644
103569 --- a/lib/kobject_uevent.c
103570 +++ b/lib/kobject_uevent.c
103571 @@ -251,12 +251,13 @@ static int kobj_usermode_filter(struct kobject *kobj)
103573  static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
103575 +       int buffer_size = sizeof(env->buf) - env->buflen;
103576         int len;
103578 -       len = strlcpy(&env->buf[env->buflen], subsystem,
103579 -                     sizeof(env->buf) - env->buflen);
103580 -       if (len >= (sizeof(env->buf) - env->buflen)) {
103581 -               WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
103582 +       len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size);
103583 +       if (len >= buffer_size) {
103584 +               pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n",
103585 +                       buffer_size, len);
103586                 return -ENOMEM;
103587         }
103589 diff --git a/lib/nlattr.c b/lib/nlattr.c
103590 index 5b6116e81f9f..1d051ef66afe 100644
103591 --- a/lib/nlattr.c
103592 +++ b/lib/nlattr.c
103593 @@ -828,7 +828,7 @@ int nla_strcmp(const struct nlattr *nla, const char *str)
103594         int attrlen = nla_len(nla);
103595         int d;
103597 -       if (attrlen > 0 && buf[attrlen - 1] == '\0')
103598 +       while (attrlen > 0 && buf[attrlen - 1] == '\0')
103599                 attrlen--;
103601         d = attrlen - len;
103602 diff --git a/lib/test_kasan.c b/lib/test_kasan.c
103603 index e5647d147b35..be69c3aa615a 100644
103604 --- a/lib/test_kasan.c
103605 +++ b/lib/test_kasan.c
103606 @@ -646,8 +646,20 @@ static char global_array[10];
103608  static void kasan_global_oob(struct kunit *test)
103610 -       volatile int i = 3;
103611 -       char *p = &global_array[ARRAY_SIZE(global_array) + i];
103612 +       /*
103613 +        * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
103614 +        * from failing here and panicing the kernel, access the array via a
103615 +        * volatile pointer, which will prevent the compiler from being able to
103616 +        * determine the array bounds.
103617 +        *
103618 +        * This access uses a volatile pointer to char (char *volatile) rather
103619 +        * than the more conventional pointer to volatile char (volatile char *)
103620 +        * because we want to prevent the compiler from making inferences about
103621 +        * the pointer itself (i.e. its array bounds), not the data that it
103622 +        * refers to.
103623 +        */
103624 +       char *volatile array = global_array;
103625 +       char *p = &array[ARRAY_SIZE(global_array) + 3];
103627         /* Only generic mode instruments globals. */
103628         KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
103629 @@ -695,8 +707,9 @@ static void ksize_uaf(struct kunit *test)
103630  static void kasan_stack_oob(struct kunit *test)
103632         char stack_array[10];
103633 -       volatile int i = OOB_TAG_OFF;
103634 -       char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
103635 +       /* See comment in kasan_global_oob. */
103636 +       char *volatile array = stack_array;
103637 +       char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
103639         KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
103641 @@ -707,7 +720,9 @@ static void kasan_alloca_oob_left(struct kunit *test)
103643         volatile int i = 10;
103644         char alloca_array[i];
103645 -       char *p = alloca_array - 1;
103646 +       /* See comment in kasan_global_oob. */
103647 +       char *volatile array = alloca_array;
103648 +       char *p = array - 1;
103650         /* Only generic mode instruments dynamic allocas. */
103651         KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
103652 @@ -720,7 +735,9 @@ static void kasan_alloca_oob_right(struct kunit *test)
103654         volatile int i = 10;
103655         char alloca_array[i];
103656 -       char *p = alloca_array + i;
103657 +       /* See comment in kasan_global_oob. */
103658 +       char *volatile array = alloca_array;
103659 +       char *p = array + i;
103661         /* Only generic mode instruments dynamic allocas. */
103662         KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
103663 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
103664 index 41ddc353ebb8..39ef2e314da5 100644
103665 --- a/lib/vsprintf.c
103666 +++ b/lib/vsprintf.c
103667 @@ -3135,8 +3135,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
103668                         switch (*fmt) {
103669                         case 'S':
103670                         case 's':
103671 -                       case 'F':
103672 -                       case 'f':
103673                         case 'x':
103674                         case 'K':
103675                         case 'e':
103676 diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile
103677 index f5d778e7e5c7..19485e3cc7c9 100644
103678 --- a/lib/zstd/Makefile
103679 +++ b/lib/zstd/Makefile
103680 @@ -1,10 +1,46 @@
103681  # SPDX-License-Identifier: GPL-2.0-only
103682 +# ################################################################
103683 +# Copyright (c) Facebook, Inc.
103684 +# All rights reserved.
103686 +# This source code is licensed under both the BSD-style license (found in the
103687 +# LICENSE file in the root directory of this source tree) and the GPLv2 (found
103688 +# in the COPYING file in the root directory of this source tree).
103689 +# You may select, at your option, one of the above-listed licenses.
103690 +# ################################################################
103691  obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o
103692  obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o
103694  ccflags-y += -O3
103696 -zstd_compress-y := fse_compress.o huf_compress.o compress.o \
103697 -                  entropy_common.o fse_decompress.o zstd_common.o
103698 -zstd_decompress-y := huf_decompress.o decompress.o \
103699 -                    entropy_common.o fse_decompress.o zstd_common.o
103700 +zstd_compress-y := \
103701 +               zstd_compress_module.o \
103702 +               common/debug.o \
103703 +               common/entropy_common.o \
103704 +               common/error_private.o \
103705 +               common/fse_decompress.o \
103706 +               common/zstd_common.o \
103707 +               compress/fse_compress.o \
103708 +               compress/hist.o \
103709 +               compress/huf_compress.o \
103710 +               compress/zstd_compress.o \
103711 +               compress/zstd_compress_literals.o \
103712 +               compress/zstd_compress_sequences.o \
103713 +               compress/zstd_compress_superblock.o \
103714 +               compress/zstd_double_fast.o \
103715 +               compress/zstd_fast.o \
103716 +               compress/zstd_lazy.o \
103717 +               compress/zstd_ldm.o \
103718 +               compress/zstd_opt.o \
103720 +zstd_decompress-y := \
103721 +               zstd_decompress_module.o \
103722 +               common/debug.o \
103723 +               common/entropy_common.o \
103724 +               common/error_private.o \
103725 +               common/fse_decompress.o \
103726 +               common/zstd_common.o \
103727 +               decompress/huf_decompress.o \
103728 +               decompress/zstd_ddict.o \
103729 +               decompress/zstd_decompress.o \
103730 +               decompress/zstd_decompress_block.o \
103731 diff --git a/lib/zstd/bitstream.h b/lib/zstd/bitstream.h
103732 deleted file mode 100644
103733 index 5d6343c1a909..000000000000
103734 --- a/lib/zstd/bitstream.h
103735 +++ /dev/null
103736 @@ -1,380 +0,0 @@
103738 - * bitstream
103739 - * Part of FSE library
103740 - * header file (to include)
103741 - * Copyright (C) 2013-2016, Yann Collet.
103743 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
103745 - * Redistribution and use in source and binary forms, with or without
103746 - * modification, are permitted provided that the following conditions are
103747 - * met:
103749 - *   * Redistributions of source code must retain the above copyright
103750 - * notice, this list of conditions and the following disclaimer.
103751 - *   * Redistributions in binary form must reproduce the above
103752 - * copyright notice, this list of conditions and the following disclaimer
103753 - * in the documentation and/or other materials provided with the
103754 - * distribution.
103756 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
103757 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
103758 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
103759 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
103760 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
103761 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
103762 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
103763 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
103764 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
103765 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
103766 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
103768 - * This program is free software; you can redistribute it and/or modify it under
103769 - * the terms of the GNU General Public License version 2 as published by the
103770 - * Free Software Foundation. This program is dual-licensed; you may select
103771 - * either version 2 of the GNU General Public License ("GPL") or BSD license
103772 - * ("BSD").
103774 - * You can contact the author at :
103775 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
103776 - */
103777 -#ifndef BITSTREAM_H_MODULE
103778 -#define BITSTREAM_H_MODULE
103781 -*  This API consists of small unitary functions, which must be inlined for best performance.
103782 -*  Since link-time-optimization is not available for all compilers,
103783 -*  these functions are defined into a .h to be included.
103786 -/*-****************************************
103787 -*  Dependencies
103788 -******************************************/
103789 -#include "error_private.h" /* error codes and messages */
103790 -#include "mem.h"          /* unaligned access routines */
103792 -/*=========================================
103793 -*  Target specific
103794 -=========================================*/
103795 -#define STREAM_ACCUMULATOR_MIN_32 25
103796 -#define STREAM_ACCUMULATOR_MIN_64 57
103797 -#define STREAM_ACCUMULATOR_MIN ((U32)(ZSTD_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
103799 -/*-******************************************
103800 -*  bitStream encoding API (write forward)
103801 -********************************************/
103802 -/* bitStream can mix input from multiple sources.
103803 -*  A critical property of these streams is that they encode and decode in **reverse** direction.
103804 -*  So the first bit sequence you add will be the last to be read, like a LIFO stack.
103806 -typedef struct {
103807 -       size_t bitContainer;
103808 -       int bitPos;
103809 -       char *startPtr;
103810 -       char *ptr;
103811 -       char *endPtr;
103812 -} BIT_CStream_t;
103814 -ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *dstBuffer, size_t dstCapacity);
103815 -ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits);
103816 -ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC);
103817 -ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC);
103819 -/* Start with initCStream, providing the size of buffer to write into.
103820 -*  bitStream will never write outside of this buffer.
103821 -*  `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
103823 -*  bits are first added to a local register.
103824 -*  Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
103825 -*  Writing data into memory is an explicit operation, performed by the flushBits function.
103826 -*  Hence keep track how many bits are potentially stored into local register to avoid register overflow.
103827 -*  After a flushBits, a maximum of 7 bits might still be stored into local register.
103829 -*  Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
103831 -*  Last operation is to close the bitStream.
103832 -*  The function returns the final size of CStream in bytes.
103833 -*  If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
103836 -/*-********************************************
103837 -*  bitStream decoding API (read backward)
103838 -**********************************************/
103839 -typedef struct {
103840 -       size_t bitContainer;
103841 -       unsigned bitsConsumed;
103842 -       const char *ptr;
103843 -       const char *start;
103844 -} BIT_DStream_t;
103846 -typedef enum {
103847 -       BIT_DStream_unfinished = 0,
103848 -       BIT_DStream_endOfBuffer = 1,
103849 -       BIT_DStream_completed = 2,
103850 -       BIT_DStream_overflow = 3
103851 -} BIT_DStream_status; /* result of BIT_reloadDStream() */
103852 -/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
103854 -ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize);
103855 -ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, unsigned nbBits);
103856 -ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD);
103857 -ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *bitD);
103859 -/* Start by invoking BIT_initDStream().
103860 -*  A chunk of the bitStream is then stored into a local register.
103861 -*  Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
103862 -*  You can then retrieve bitFields stored into the local register, **in reverse order**.
103863 -*  Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
103864 -*  A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
103865 -*  Otherwise, it can be less than that, so proceed accordingly.
103866 -*  Checking if DStream has reached its end can be performed with BIT_endOfDStream().
103869 -/*-****************************************
103870 -*  unsafe API
103871 -******************************************/
103872 -ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits);
103873 -/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
103875 -ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC);
103876 -/* unsafe version; does not check buffer overflow */
103878 -ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, unsigned nbBits);
103879 -/* faster, but works only if nbBits >= 1 */
103881 -/*-**************************************************************
103882 -*  Internal functions
103883 -****************************************************************/
103884 -ZSTD_STATIC unsigned BIT_highbit32(register U32 val) { return 31 - __builtin_clz(val); }
103886 -/*=====    Local Constants   =====*/
103887 -static const unsigned BIT_mask[] = {0,       1,       3,       7,      0xF,      0x1F,     0x3F,     0x7F,      0xFF,
103888 -                                   0x1FF,   0x3FF,   0x7FF,   0xFFF,    0x1FFF,   0x3FFF,   0x7FFF,   0xFFFF,    0x1FFFF,
103889 -                                   0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF}; /* up to 26 bits */
103891 -/*-**************************************************************
103892 -*  bitStream encoding
103893 -****************************************************************/
103894 -/*! BIT_initCStream() :
103895 - *  `dstCapacity` must be > sizeof(void*)
103896 - *  @return : 0 if success,
103897 -                         otherwise an error code (can be tested using ERR_isError() ) */
103898 -ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *startPtr, size_t dstCapacity)
103900 -       bitC->bitContainer = 0;
103901 -       bitC->bitPos = 0;
103902 -       bitC->startPtr = (char *)startPtr;
103903 -       bitC->ptr = bitC->startPtr;
103904 -       bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->ptr);
103905 -       if (dstCapacity <= sizeof(bitC->ptr))
103906 -               return ERROR(dstSize_tooSmall);
103907 -       return 0;
103910 -/*! BIT_addBits() :
103911 -       can add up to 26 bits into `bitC`.
103912 -       Does not check for register overflow ! */
103913 -ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits)
103915 -       bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
103916 -       bitC->bitPos += nbBits;
103919 -/*! BIT_addBitsFast() :
103920 - *  works only if `value` is _clean_, meaning all high bits above nbBits are 0 */
103921 -ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits)
103923 -       bitC->bitContainer |= value << bitC->bitPos;
103924 -       bitC->bitPos += nbBits;
103927 -/*! BIT_flushBitsFast() :
103928 - *  unsafe version; does not check buffer overflow */
103929 -ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC)
103931 -       size_t const nbBytes = bitC->bitPos >> 3;
103932 -       ZSTD_writeLEST(bitC->ptr, bitC->bitContainer);
103933 -       bitC->ptr += nbBytes;
103934 -       bitC->bitPos &= 7;
103935 -       bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
103938 -/*! BIT_flushBits() :
103939 - *  safe version; check for buffer overflow, and prevents it.
103940 - *  note : does not signal buffer overflow. This will be revealed later on using BIT_closeCStream() */
103941 -ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC)
103943 -       size_t const nbBytes = bitC->bitPos >> 3;
103944 -       ZSTD_writeLEST(bitC->ptr, bitC->bitContainer);
103945 -       bitC->ptr += nbBytes;
103946 -       if (bitC->ptr > bitC->endPtr)
103947 -               bitC->ptr = bitC->endPtr;
103948 -       bitC->bitPos &= 7;
103949 -       bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
103952 -/*! BIT_closeCStream() :
103953 - *  @return : size of CStream, in bytes,
103954 -                         or 0 if it could not fit into dstBuffer */
103955 -ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC)
103957 -       BIT_addBitsFast(bitC, 1, 1); /* endMark */
103958 -       BIT_flushBits(bitC);
103960 -       if (bitC->ptr >= bitC->endPtr)
103961 -               return 0; /* doesn't fit within authorized budget : cancel */
103963 -       return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
103966 -/*-********************************************************
103967 -* bitStream decoding
103968 -**********************************************************/
103969 -/*! BIT_initDStream() :
103970 -*   Initialize a BIT_DStream_t.
103971 -*   `bitD` : a pointer to an already allocated BIT_DStream_t structure.
103972 -*   `srcSize` must be the *exact* size of the bitStream, in bytes.
103973 -*   @return : size of stream (== srcSize) or an errorCode if a problem is detected
103975 -ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize)
103977 -       if (srcSize < 1) {
103978 -               memset(bitD, 0, sizeof(*bitD));
103979 -               return ERROR(srcSize_wrong);
103980 -       }
103982 -       if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
103983 -               bitD->start = (const char *)srcBuffer;
103984 -               bitD->ptr = (const char *)srcBuffer + srcSize - sizeof(bitD->bitContainer);
103985 -               bitD->bitContainer = ZSTD_readLEST(bitD->ptr);
103986 -               {
103987 -                       BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1];
103988 -                       bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
103989 -                       if (lastByte == 0)
103990 -                               return ERROR(GENERIC); /* endMark not present */
103991 -               }
103992 -       } else {
103993 -               bitD->start = (const char *)srcBuffer;
103994 -               bitD->ptr = bitD->start;
103995 -               bitD->bitContainer = *(const BYTE *)(bitD->start);
103996 -               switch (srcSize) {
103997 -               case 7: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[6]) << (sizeof(bitD->bitContainer) * 8 - 16);
103998 -                       fallthrough;
103999 -               case 6: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[5]) << (sizeof(bitD->bitContainer) * 8 - 24);
104000 -                       fallthrough;
104001 -               case 5: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[4]) << (sizeof(bitD->bitContainer) * 8 - 32);
104002 -                       fallthrough;
104003 -               case 4: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[3]) << 24;
104004 -                       fallthrough;
104005 -               case 3: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[2]) << 16;
104006 -                       fallthrough;
104007 -               case 2: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[1]) << 8;
104008 -                       fallthrough;
104009 -               default:;
104010 -               }
104011 -               {
104012 -                       BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1];
104013 -                       bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
104014 -                       if (lastByte == 0)
104015 -                               return ERROR(GENERIC); /* endMark not present */
104016 -               }
104017 -               bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize) * 8;
104018 -       }
104020 -       return srcSize;
104023 -ZSTD_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) { return bitContainer >> start; }
104025 -ZSTD_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) { return (bitContainer >> start) & BIT_mask[nbBits]; }
104027 -ZSTD_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) { return bitContainer & BIT_mask[nbBits]; }
104029 -/*! BIT_lookBits() :
104030 - *  Provides next n bits from local register.
104031 - *  local register is not modified.
104032 - *  On 32-bits, maxNbBits==24.
104033 - *  On 64-bits, maxNbBits==56.
104034 - *  @return : value extracted
104035 - */
104036 -ZSTD_STATIC size_t BIT_lookBits(const BIT_DStream_t *bitD, U32 nbBits)
104038 -       U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1;
104039 -       return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask - nbBits) & bitMask);
104042 -/*! BIT_lookBitsFast() :
104043 -*   unsafe version; only works only if nbBits >= 1 */
104044 -ZSTD_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t *bitD, U32 nbBits)
104046 -       U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1;
104047 -       return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask + 1) - nbBits) & bitMask);
104050 -ZSTD_STATIC void BIT_skipBits(BIT_DStream_t *bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; }
104052 -/*! BIT_readBits() :
104053 - *  Read (consume) next n bits from local register and update.
104054 - *  Pay attention to not read more than nbBits contained into local register.
104055 - *  @return : extracted value.
104056 - */
104057 -ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, U32 nbBits)
104059 -       size_t const value = BIT_lookBits(bitD, nbBits);
104060 -       BIT_skipBits(bitD, nbBits);
104061 -       return value;
104064 -/*! BIT_readBitsFast() :
104065 -*   unsafe version; only works only if nbBits >= 1 */
104066 -ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, U32 nbBits)
104068 -       size_t const value = BIT_lookBitsFast(bitD, nbBits);
104069 -       BIT_skipBits(bitD, nbBits);
104070 -       return value;
104073 -/*! BIT_reloadDStream() :
104074 -*   Refill `bitD` from buffer previously set in BIT_initDStream() .
104075 -*   This function is safe, it guarantees it will not read beyond src buffer.
104076 -*   @return : status of `BIT_DStream_t` internal register.
104077 -                         if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */
104078 -ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD)
104080 -       if (bitD->bitsConsumed > (sizeof(bitD->bitContainer) * 8)) /* should not happen => corruption detected */
104081 -               return BIT_DStream_overflow;
104083 -       if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
104084 -               bitD->ptr -= bitD->bitsConsumed >> 3;
104085 -               bitD->bitsConsumed &= 7;
104086 -               bitD->bitContainer = ZSTD_readLEST(bitD->ptr);
104087 -               return BIT_DStream_unfinished;
104088 -       }
104089 -       if (bitD->ptr == bitD->start) {
104090 -               if (bitD->bitsConsumed < sizeof(bitD->bitContainer) * 8)
104091 -                       return BIT_DStream_endOfBuffer;
104092 -               return BIT_DStream_completed;
104093 -       }
104094 -       {
104095 -               U32 nbBytes = bitD->bitsConsumed >> 3;
104096 -               BIT_DStream_status result = BIT_DStream_unfinished;
104097 -               if (bitD->ptr - nbBytes < bitD->start) {
104098 -                       nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
104099 -                       result = BIT_DStream_endOfBuffer;
104100 -               }
104101 -               bitD->ptr -= nbBytes;
104102 -               bitD->bitsConsumed -= nbBytes * 8;
104103 -               bitD->bitContainer = ZSTD_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
104104 -               return result;
104105 -       }
104108 -/*! BIT_endOfDStream() :
104109 -*   @return Tells if DStream has exactly reached its end (all bits consumed).
104111 -ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *DStream)
104113 -       return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer) * 8));
104116 -#endif /* BITSTREAM_H_MODULE */
104117 diff --git a/lib/zstd/common/bitstream.h b/lib/zstd/common/bitstream.h
104118 new file mode 100644
104119 index 000000000000..2d6c95b4f40c
104120 --- /dev/null
104121 +++ b/lib/zstd/common/bitstream.h
104122 @@ -0,0 +1,437 @@
104123 +/* ******************************************************************
104124 + * bitstream
104125 + * Part of FSE library
104126 + * Copyright (c) Yann Collet, Facebook, Inc.
104128 + * You can contact the author at :
104129 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
104131 + * This source code is licensed under both the BSD-style license (found in the
104132 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
104133 + * in the COPYING file in the root directory of this source tree).
104134 + * You may select, at your option, one of the above-listed licenses.
104135 +****************************************************************** */
104136 +#ifndef BITSTREAM_H_MODULE
104137 +#define BITSTREAM_H_MODULE
104140 +*  This API consists of small unitary functions, which must be inlined for best performance.
104141 +*  Since link-time-optimization is not available for all compilers,
104142 +*  these functions are defined into a .h to be included.
104145 +/*-****************************************
104146 +*  Dependencies
104147 +******************************************/
104148 +#include "mem.h"            /* unaligned access routines */
104149 +#include "compiler.h"       /* UNLIKELY() */
104150 +#include "debug.h"          /* assert(), DEBUGLOG(), RAWLOG() */
104151 +#include "error_private.h"  /* error codes and messages */
104154 +/*=========================================
104155 +*  Target specific
104156 +=========================================*/
104158 +#define STREAM_ACCUMULATOR_MIN_32  25
104159 +#define STREAM_ACCUMULATOR_MIN_64  57
104160 +#define STREAM_ACCUMULATOR_MIN    ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
104163 +/*-******************************************
104164 +*  bitStream encoding API (write forward)
104165 +********************************************/
104166 +/* bitStream can mix input from multiple sources.
104167 + * A critical property of these streams is that they encode and decode in **reverse** direction.
104168 + * So the first bit sequence you add will be the last to be read, like a LIFO stack.
104169 + */
104170 +typedef struct {
104171 +    size_t bitContainer;
104172 +    unsigned bitPos;
104173 +    char*  startPtr;
104174 +    char*  ptr;
104175 +    char*  endPtr;
104176 +} BIT_CStream_t;
104178 +MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
104179 +MEM_STATIC void   BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
104180 +MEM_STATIC void   BIT_flushBits(BIT_CStream_t* bitC);
104181 +MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
104183 +/* Start with initCStream, providing the size of buffer to write into.
104184 +*  bitStream will never write outside of this buffer.
104185 +*  `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
104187 +*  bits are first added to a local register.
104188 +*  Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
104189 +*  Writing data into memory is an explicit operation, performed by the flushBits function.
104190 +*  Hence keep track how many bits are potentially stored into local register to avoid register overflow.
104191 +*  After a flushBits, a maximum of 7 bits might still be stored into local register.
104193 +*  Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
104195 +*  Last operation is to close the bitStream.
104196 +*  The function returns the final size of CStream in bytes.
104197 +*  If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
104201 +/*-********************************************
104202 +*  bitStream decoding API (read backward)
104203 +**********************************************/
104204 +typedef struct {
104205 +    size_t   bitContainer;
104206 +    unsigned bitsConsumed;
104207 +    const char* ptr;
104208 +    const char* start;
104209 +    const char* limitPtr;
104210 +} BIT_DStream_t;
104212 +typedef enum { BIT_DStream_unfinished = 0,
104213 +               BIT_DStream_endOfBuffer = 1,
104214 +               BIT_DStream_completed = 2,
104215 +               BIT_DStream_overflow = 3 } BIT_DStream_status;  /* result of BIT_reloadDStream() */
104216 +               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
104218 +MEM_STATIC size_t   BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
104219 +MEM_STATIC size_t   BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
104220 +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
104221 +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
104224 +/* Start by invoking BIT_initDStream().
104225 +*  A chunk of the bitStream is then stored into a local register.
104226 +*  Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
104227 +*  You can then retrieve bitFields stored into the local register, **in reverse order**.
104228 +*  Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
104229 +*  A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
104230 +*  Otherwise, it can be less than that, so proceed accordingly.
104231 +*  Checking if DStream has reached its end can be performed with BIT_endOfDStream().
104235 +/*-****************************************
104236 +*  unsafe API
104237 +******************************************/
104238 +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
104239 +/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
104241 +MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
104242 +/* unsafe version; does not check buffer overflow */
104244 +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
104245 +/* faster, but works only if nbBits >= 1 */
104249 +/*-**************************************************************
104250 +*  Internal functions
104251 +****************************************************************/
104252 +MEM_STATIC unsigned BIT_highbit32 (U32 val)
104254 +    assert(val != 0);
104255 +    {
104256 +#   if (__GNUC__ >= 3)   /* Use GCC Intrinsic */
104257 +        return __builtin_clz (val) ^ 31;
104258 +#   else   /* Software version */
104259 +        static const unsigned DeBruijnClz[32] = { 0,  9,  1, 10, 13, 21,  2, 29,
104260 +                                                 11, 14, 16, 18, 22, 25,  3, 30,
104261 +                                                  8, 12, 20, 28, 15, 17, 24,  7,
104262 +                                                 19, 27, 23,  6, 26,  5,  4, 31 };
104263 +        U32 v = val;
104264 +        v |= v >> 1;
104265 +        v |= v >> 2;
104266 +        v |= v >> 4;
104267 +        v |= v >> 8;
104268 +        v |= v >> 16;
104269 +        return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
104270 +#   endif
104271 +    }
104274 +/*=====    Local Constants   =====*/
104275 +static const unsigned BIT_mask[] = {
104276 +    0,          1,         3,         7,         0xF,       0x1F,
104277 +    0x3F,       0x7F,      0xFF,      0x1FF,     0x3FF,     0x7FF,
104278 +    0xFFF,      0x1FFF,    0x3FFF,    0x7FFF,    0xFFFF,    0x1FFFF,
104279 +    0x3FFFF,    0x7FFFF,   0xFFFFF,   0x1FFFFF,  0x3FFFFF,  0x7FFFFF,
104280 +    0xFFFFFF,   0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
104281 +    0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
104282 +#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
104284 +/*-**************************************************************
104285 +*  bitStream encoding
104286 +****************************************************************/
104287 +/*! BIT_initCStream() :
104288 + *  `dstCapacity` must be > sizeof(size_t)
104289 + *  @return : 0 if success,
104290 + *            otherwise an error code (can be tested using ERR_isError()) */
104291 +MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
104292 +                                  void* startPtr, size_t dstCapacity)
104294 +    bitC->bitContainer = 0;
104295 +    bitC->bitPos = 0;
104296 +    bitC->startPtr = (char*)startPtr;
104297 +    bitC->ptr = bitC->startPtr;
104298 +    bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
104299 +    if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
104300 +    return 0;
104303 +/*! BIT_addBits() :
104304 + *  can add up to 31 bits into `bitC`.
104305 + *  Note : does not check for register overflow ! */
104306 +MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
104307 +                            size_t value, unsigned nbBits)
104309 +    DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32);
104310 +    assert(nbBits < BIT_MASK_SIZE);
104311 +    assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
104312 +    bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
104313 +    bitC->bitPos += nbBits;
104316 +/*! BIT_addBitsFast() :
104317 + *  works only if `value` is _clean_,
104318 + *  meaning all high bits above nbBits are 0 */
104319 +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
104320 +                                size_t value, unsigned nbBits)
104322 +    assert((value>>nbBits) == 0);
104323 +    assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
104324 +    bitC->bitContainer |= value << bitC->bitPos;
104325 +    bitC->bitPos += nbBits;
104328 +/*! BIT_flushBitsFast() :
104329 + *  assumption : bitContainer has not overflowed
104330 + *  unsafe version; does not check buffer overflow */
104331 +MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
104333 +    size_t const nbBytes = bitC->bitPos >> 3;
104334 +    assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
104335 +    assert(bitC->ptr <= bitC->endPtr);
104336 +    MEM_writeLEST(bitC->ptr, bitC->bitContainer);
104337 +    bitC->ptr += nbBytes;
104338 +    bitC->bitPos &= 7;
104339 +    bitC->bitContainer >>= nbBytes*8;
104342 +/*! BIT_flushBits() :
104343 + *  assumption : bitContainer has not overflowed
104344 + *  safe version; check for buffer overflow, and prevents it.
104345 + *  note : does not signal buffer overflow.
104346 + *  overflow will be revealed later on using BIT_closeCStream() */
104347 +MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
104349 +    size_t const nbBytes = bitC->bitPos >> 3;
104350 +    assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
104351 +    assert(bitC->ptr <= bitC->endPtr);
104352 +    MEM_writeLEST(bitC->ptr, bitC->bitContainer);
104353 +    bitC->ptr += nbBytes;
104354 +    if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
104355 +    bitC->bitPos &= 7;
104356 +    bitC->bitContainer >>= nbBytes*8;
104359 +/*! BIT_closeCStream() :
104360 + *  @return : size of CStream, in bytes,
104361 + *            or 0 if it could not fit into dstBuffer */
104362 +MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
104364 +    BIT_addBitsFast(bitC, 1, 1);   /* endMark */
104365 +    BIT_flushBits(bitC);
104366 +    if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
104367 +    return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
104371 +/*-********************************************************
104372 +*  bitStream decoding
104373 +**********************************************************/
104374 +/*! BIT_initDStream() :
104375 + *  Initialize a BIT_DStream_t.
104376 + * `bitD` : a pointer to an already allocated BIT_DStream_t structure.
104377 + * `srcSize` must be the *exact* size of the bitStream, in bytes.
104378 + * @return : size of stream (== srcSize), or an errorCode if a problem is detected
104379 + */
104380 +MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
104382 +    if (srcSize < 1) { ZSTD_memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
104384 +    bitD->start = (const char*)srcBuffer;
104385 +    bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
104387 +    if (srcSize >=  sizeof(bitD->bitContainer)) {  /* normal case */
104388 +        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
104389 +        bitD->bitContainer = MEM_readLEST(bitD->ptr);
104390 +        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
104391 +          bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;  /* ensures bitsConsumed is always set */
104392 +          if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
104393 +    } else {
104394 +        bitD->ptr   = bitD->start;
104395 +        bitD->bitContainer = *(const BYTE*)(bitD->start);
104396 +        switch(srcSize)
104397 +        {
104398 +        case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
104399 +                /* fall-through */
104401 +        case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
104402 +                /* fall-through */
104404 +        case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
104405 +                /* fall-through */
104407 +        case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
104408 +                /* fall-through */
104410 +        case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
104411 +                /* fall-through */
104413 +        case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) <<  8;
104414 +                /* fall-through */
104416 +        default: break;
104417 +        }
104418 +        {   BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
104419 +            bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
104420 +            if (lastByte == 0) return ERROR(corruption_detected);  /* endMark not present */
104421 +        }
104422 +        bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
104423 +    }
104425 +    return srcSize;
104428 +MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
104430 +    return bitContainer >> start;
104433 +MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
104435 +    U32 const regMask = sizeof(bitContainer)*8 - 1;
104436 +    /* if start > regMask, bitstream is corrupted, and result is undefined */
104437 +    assert(nbBits < BIT_MASK_SIZE);
104438 +    return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
104441 +MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
104443 +    assert(nbBits < BIT_MASK_SIZE);
104444 +    return bitContainer & BIT_mask[nbBits];
104447 +/*! BIT_lookBits() :
104448 + *  Provides next n bits from local register.
104449 + *  local register is not modified.
104450 + *  On 32-bits, maxNbBits==24.
104451 + *  On 64-bits, maxNbBits==56.
104452 + * @return : value extracted */
104453 +MEM_STATIC  FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t*  bitD, U32 nbBits)
104455 +    /* arbitrate between double-shift and shift+mask */
104456 +#if 1
104457 +    /* if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8,
104458 +     * bitstream is likely corrupted, and result is undefined */
104459 +    return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
104460 +#else
104461 +    /* this code path is slower on my os-x laptop */
104462 +    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
104463 +    return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
104464 +#endif
104467 +/*! BIT_lookBitsFast() :
104468 + *  unsafe version; only works if nbBits >= 1 */
104469 +MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
104471 +    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
104472 +    assert(nbBits >= 1);
104473 +    return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
104476 +MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
104478 +    bitD->bitsConsumed += nbBits;
104481 +/*! BIT_readBits() :
104482 + *  Read (consume) next n bits from local register and update.
104483 + *  Pay attention to not read more than nbBits contained into local register.
104484 + * @return : extracted value. */
104485 +MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
104487 +    size_t const value = BIT_lookBits(bitD, nbBits);
104488 +    BIT_skipBits(bitD, nbBits);
104489 +    return value;
104492 +/*! BIT_readBitsFast() :
104493 + *  unsafe version; only works only if nbBits >= 1 */
104494 +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
104496 +    size_t const value = BIT_lookBitsFast(bitD, nbBits);
104497 +    assert(nbBits >= 1);
104498 +    BIT_skipBits(bitD, nbBits);
104499 +    return value;
104502 +/*! BIT_reloadDStreamFast() :
104503 + *  Similar to BIT_reloadDStream(), but with two differences:
104504 + *  1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
104505 + *  2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
104506 + *     point you must use BIT_reloadDStream() to reload.
104507 + */
104508 +MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
104510 +    if (UNLIKELY(bitD->ptr < bitD->limitPtr))
104511 +        return BIT_DStream_overflow;
104512 +    assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8);
104513 +    bitD->ptr -= bitD->bitsConsumed >> 3;
104514 +    bitD->bitsConsumed &= 7;
104515 +    bitD->bitContainer = MEM_readLEST(bitD->ptr);
104516 +    return BIT_DStream_unfinished;
104519 +/*! BIT_reloadDStream() :
104520 + *  Refill `bitD` from buffer previously set in BIT_initDStream() .
104521 + *  This function is safe, it guarantees it will not read beyond src buffer.
104522 + * @return : status of `BIT_DStream_t` internal register.
104523 + *           when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
104524 +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
104526 +    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* overflow detected, like end of stream */
104527 +        return BIT_DStream_overflow;
104529 +    if (bitD->ptr >= bitD->limitPtr) {
104530 +        return BIT_reloadDStreamFast(bitD);
104531 +    }
104532 +    if (bitD->ptr == bitD->start) {
104533 +        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
104534 +        return BIT_DStream_completed;
104535 +    }
104536 +    /* start < ptr < limitPtr */
104537 +    {   U32 nbBytes = bitD->bitsConsumed >> 3;
104538 +        BIT_DStream_status result = BIT_DStream_unfinished;
104539 +        if (bitD->ptr - nbBytes < bitD->start) {
104540 +            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
104541 +            result = BIT_DStream_endOfBuffer;
104542 +        }
104543 +        bitD->ptr -= nbBytes;
104544 +        bitD->bitsConsumed -= nbBytes*8;
104545 +        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
104546 +        return result;
104547 +    }
104550 +/*! BIT_endOfDStream() :
104551 + * @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
104552 + */
104553 +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
104555 +    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
104559 +#endif /* BITSTREAM_H_MODULE */
104560 diff --git a/lib/zstd/common/compiler.h b/lib/zstd/common/compiler.h
104561 new file mode 100644
104562 index 000000000000..9269b58a93e2
104563 --- /dev/null
104564 +++ b/lib/zstd/common/compiler.h
104565 @@ -0,0 +1,151 @@
104567 + * Copyright (c) Yann Collet, Facebook, Inc.
104568 + * All rights reserved.
104570 + * This source code is licensed under both the BSD-style license (found in the
104571 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
104572 + * in the COPYING file in the root directory of this source tree).
104573 + * You may select, at your option, one of the above-listed licenses.
104574 + */
104576 +#ifndef ZSTD_COMPILER_H
104577 +#define ZSTD_COMPILER_H
104579 +/*-*******************************************************
104580 +*  Compiler specifics
104581 +*********************************************************/
104582 +/* force inlining */
104584 +#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
104585 +#  define INLINE_KEYWORD inline
104586 +#else
104587 +#  define INLINE_KEYWORD
104588 +#endif
104590 +#define FORCE_INLINE_ATTR __attribute__((always_inline))
104594 +  On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
104595 +  This explictly marks such functions as __cdecl so that the code will still compile
104596 +  if a CC other than __cdecl has been made the default.
104598 +#define WIN_CDECL
104601 + * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
104602 + * parameters. They must be inlined for the compiler to eliminate the constant
104603 + * branches.
104604 + */
104605 +#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
104607 + * HINT_INLINE is used to help the compiler generate better code. It is *not*
104608 + * used for "templates", so it can be tweaked based on the compilers
104609 + * performance.
104611 + * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
104612 + * always_inline attribute.
104614 + * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
104615 + * attribute.
104616 + */
104617 +#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
104618 +#  define HINT_INLINE static INLINE_KEYWORD
104619 +#else
104620 +#  define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
104621 +#endif
104623 +/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
104624 +#define UNUSED_ATTR __attribute__((unused))
104626 +/* force no inlining */
104627 +#define FORCE_NOINLINE static __attribute__((__noinline__))
104630 +/* target attribute */
104631 +#ifndef __has_attribute
104632 +  #define __has_attribute(x) 0  /* Compatibility with non-clang compilers. */
104633 +#endif
104634 +#define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
104636 +/* Enable runtime BMI2 dispatch based on the CPU.
104637 + * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
104638 + */
104639 +#ifndef DYNAMIC_BMI2
104640 +  #if ((defined(__clang__) && __has_attribute(__target__)) \
104641 +      || (defined(__GNUC__) \
104642 +          && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
104643 +      && (defined(__x86_64__) || defined(_M_X86)) \
104644 +      && !defined(__BMI2__)
104645 +  #  define DYNAMIC_BMI2 1
104646 +  #else
104647 +  #  define DYNAMIC_BMI2 0
104648 +  #endif
104649 +#endif
104651 +/* prefetch
104652 + * can be disabled, by declaring NO_PREFETCH build macro */
104653 +#if ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
104654 +#  define PREFETCH_L1(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
104655 +#  define PREFETCH_L2(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
104656 +#elif defined(__aarch64__)
104657 +#  define PREFETCH_L1(ptr)  __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr)))
104658 +#  define PREFETCH_L2(ptr)  __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr)))
104659 +#else
104660 +#  define PREFETCH_L1(ptr) (void)(ptr)  /* disabled */
104661 +#  define PREFETCH_L2(ptr) (void)(ptr)  /* disabled */
104662 +#endif  /* NO_PREFETCH */
104664 +#define CACHELINE_SIZE 64
104666 +#define PREFETCH_AREA(p, s)  {            \
104667 +    const char* const _ptr = (const char*)(p);  \
104668 +    size_t const _size = (size_t)(s);     \
104669 +    size_t _pos;                          \
104670 +    for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) {  \
104671 +        PREFETCH_L2(_ptr + _pos);         \
104672 +    }                                     \
104675 +/* vectorization
104676 + * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */
104677 +#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__)
104678 +#  if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
104679 +#    define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
104680 +#  else
104681 +#    define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")")
104682 +#  endif
104683 +#else
104684 +#  define DONT_VECTORIZE
104685 +#endif
104687 +/* Tell the compiler that a branch is likely or unlikely.
104688 + * Only use these macros if it causes the compiler to generate better code.
104689 + * If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc
104690 + * and clang, please do.
104691 + */
104692 +#define LIKELY(x) (__builtin_expect((x), 1))
104693 +#define UNLIKELY(x) (__builtin_expect((x), 0))
104695 +/* disable warnings */
104697 +/*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/
104700 +/* compat. with non-clang compilers */
104701 +#ifndef __has_builtin
104702 +#  define __has_builtin(x) 0
104703 +#endif
104705 +/* compat. with non-clang compilers */
104706 +#ifndef __has_feature
104707 +#  define __has_feature(x) 0
104708 +#endif
104710 +/* detects whether we are being compiled under msan */
104713 +/* detects whether we are being compiled under asan */
104716 +#endif /* ZSTD_COMPILER_H */
104717 diff --git a/lib/zstd/common/cpu.h b/lib/zstd/common/cpu.h
104718 new file mode 100644
104719 index 000000000000..0202d94076a3
104720 --- /dev/null
104721 +++ b/lib/zstd/common/cpu.h
104722 @@ -0,0 +1,194 @@
104724 + * Copyright (c) Facebook, Inc.
104725 + * All rights reserved.
104727 + * This source code is licensed under both the BSD-style license (found in the
104728 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
104729 + * in the COPYING file in the root directory of this source tree).
104730 + * You may select, at your option, one of the above-listed licenses.
104731 + */
104733 +#ifndef ZSTD_COMMON_CPU_H
104734 +#define ZSTD_COMMON_CPU_H
104737 + * Implementation taken from folly/CpuId.h
104738 + * https://github.com/facebook/folly/blob/master/folly/CpuId.h
104739 + */
104741 +#include "mem.h"
104744 +typedef struct {
104745 +    U32 f1c;
104746 +    U32 f1d;
104747 +    U32 f7b;
104748 +    U32 f7c;
104749 +} ZSTD_cpuid_t;
104751 +MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
104752 +    U32 f1c = 0;
104753 +    U32 f1d = 0;
104754 +    U32 f7b = 0;
104755 +    U32 f7c = 0;
104756 +#if defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)
104757 +    /* The following block like the normal cpuid branch below, but gcc
104758 +     * reserves ebx for use of its pic register so we must specially
104759 +     * handle the save and restore to avoid clobbering the register
104760 +     */
104761 +    U32 n;
104762 +    __asm__(
104763 +        "pushl %%ebx\n\t"
104764 +        "cpuid\n\t"
104765 +        "popl %%ebx\n\t"
104766 +        : "=a"(n)
104767 +        : "a"(0)
104768 +        : "ecx", "edx");
104769 +    if (n >= 1) {
104770 +      U32 f1a;
104771 +      __asm__(
104772 +          "pushl %%ebx\n\t"
104773 +          "cpuid\n\t"
104774 +          "popl %%ebx\n\t"
104775 +          : "=a"(f1a), "=c"(f1c), "=d"(f1d)
104776 +          : "a"(1));
104777 +    }
104778 +    if (n >= 7) {
104779 +      __asm__(
104780 +          "pushl %%ebx\n\t"
104781 +          "cpuid\n\t"
104782 +          "movl %%ebx, %%eax\n\t"
104783 +          "popl %%ebx"
104784 +          : "=a"(f7b), "=c"(f7c)
104785 +          : "a"(7), "c"(0)
104786 +          : "edx");
104787 +    }
104788 +#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)
104789 +    U32 n;
104790 +    __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
104791 +    if (n >= 1) {
104792 +      U32 f1a;
104793 +      __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx");
104794 +    }
104795 +    if (n >= 7) {
104796 +      U32 f7a;
104797 +      __asm__("cpuid"
104798 +              : "=a"(f7a), "=b"(f7b), "=c"(f7c)
104799 +              : "a"(7), "c"(0)
104800 +              : "edx");
104801 +    }
104802 +#endif
104803 +    {
104804 +        ZSTD_cpuid_t cpuid;
104805 +        cpuid.f1c = f1c;
104806 +        cpuid.f1d = f1d;
104807 +        cpuid.f7b = f7b;
104808 +        cpuid.f7c = f7c;
104809 +        return cpuid;
104810 +    }
104813 +#define X(name, r, bit)                                                        \
104814 +  MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) {                 \
104815 +    return ((cpuid.r) & (1U << bit)) != 0;                                     \
104816 +  }
104818 +/* cpuid(1): Processor Info and Feature Bits. */
104819 +#define C(name, bit) X(name, f1c, bit)
104820 +  C(sse3, 0)
104821 +  C(pclmuldq, 1)
104822 +  C(dtes64, 2)
104823 +  C(monitor, 3)
104824 +  C(dscpl, 4)
104825 +  C(vmx, 5)
104826 +  C(smx, 6)
104827 +  C(eist, 7)
104828 +  C(tm2, 8)
104829 +  C(ssse3, 9)
104830 +  C(cnxtid, 10)
104831 +  C(fma, 12)
104832 +  C(cx16, 13)
104833 +  C(xtpr, 14)
104834 +  C(pdcm, 15)
104835 +  C(pcid, 17)
104836 +  C(dca, 18)
104837 +  C(sse41, 19)
104838 +  C(sse42, 20)
104839 +  C(x2apic, 21)
104840 +  C(movbe, 22)
104841 +  C(popcnt, 23)
104842 +  C(tscdeadline, 24)
104843 +  C(aes, 25)
104844 +  C(xsave, 26)
104845 +  C(osxsave, 27)
104846 +  C(avx, 28)
104847 +  C(f16c, 29)
104848 +  C(rdrand, 30)
104849 +#undef C
104850 +#define D(name, bit) X(name, f1d, bit)
104851 +  D(fpu, 0)
104852 +  D(vme, 1)
104853 +  D(de, 2)
104854 +  D(pse, 3)
104855 +  D(tsc, 4)
104856 +  D(msr, 5)
104857 +  D(pae, 6)
104858 +  D(mce, 7)
104859 +  D(cx8, 8)
104860 +  D(apic, 9)
104861 +  D(sep, 11)
104862 +  D(mtrr, 12)
104863 +  D(pge, 13)
104864 +  D(mca, 14)
104865 +  D(cmov, 15)
104866 +  D(pat, 16)
104867 +  D(pse36, 17)
104868 +  D(psn, 18)
104869 +  D(clfsh, 19)
104870 +  D(ds, 21)
104871 +  D(acpi, 22)
104872 +  D(mmx, 23)
104873 +  D(fxsr, 24)
104874 +  D(sse, 25)
104875 +  D(sse2, 26)
104876 +  D(ss, 27)
104877 +  D(htt, 28)
104878 +  D(tm, 29)
104879 +  D(pbe, 31)
104880 +#undef D
104882 +/* cpuid(7): Extended Features. */
104883 +#define B(name, bit) X(name, f7b, bit)
104884 +  B(bmi1, 3)
104885 +  B(hle, 4)
104886 +  B(avx2, 5)
104887 +  B(smep, 7)
104888 +  B(bmi2, 8)
104889 +  B(erms, 9)
104890 +  B(invpcid, 10)
104891 +  B(rtm, 11)
104892 +  B(mpx, 14)
104893 +  B(avx512f, 16)
104894 +  B(avx512dq, 17)
104895 +  B(rdseed, 18)
104896 +  B(adx, 19)
104897 +  B(smap, 20)
104898 +  B(avx512ifma, 21)
104899 +  B(pcommit, 22)
104900 +  B(clflushopt, 23)
104901 +  B(clwb, 24)
104902 +  B(avx512pf, 26)
104903 +  B(avx512er, 27)
104904 +  B(avx512cd, 28)
104905 +  B(sha, 29)
104906 +  B(avx512bw, 30)
104907 +  B(avx512vl, 31)
104908 +#undef B
104909 +#define C(name, bit) X(name, f7c, bit)
104910 +  C(prefetchwt1, 0)
104911 +  C(avx512vbmi, 1)
104912 +#undef C
104914 +#undef X
104916 +#endif /* ZSTD_COMMON_CPU_H */
104917 diff --git a/lib/zstd/common/debug.c b/lib/zstd/common/debug.c
104918 new file mode 100644
104919 index 000000000000..bb863c9ea616
104920 --- /dev/null
104921 +++ b/lib/zstd/common/debug.c
104922 @@ -0,0 +1,24 @@
104923 +/* ******************************************************************
104924 + * debug
104925 + * Part of FSE library
104926 + * Copyright (c) Yann Collet, Facebook, Inc.
104928 + * You can contact the author at :
104929 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
104931 + * This source code is licensed under both the BSD-style license (found in the
104932 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
104933 + * in the COPYING file in the root directory of this source tree).
104934 + * You may select, at your option, one of the above-listed licenses.
104935 +****************************************************************** */
104939 + * This module only hosts one global variable
104940 + * which can be used to dynamically influence the verbosity of traces,
104941 + * such as DEBUGLOG and RAWLOG
104942 + */
104944 +#include "debug.h"
104946 +int g_debuglevel = DEBUGLEVEL;
104947 diff --git a/lib/zstd/common/debug.h b/lib/zstd/common/debug.h
104948 new file mode 100644
104949 index 000000000000..6dd88d1fbd02
104950 --- /dev/null
104951 +++ b/lib/zstd/common/debug.h
104952 @@ -0,0 +1,101 @@
104953 +/* ******************************************************************
104954 + * debug
104955 + * Part of FSE library
104956 + * Copyright (c) Yann Collet, Facebook, Inc.
104958 + * You can contact the author at :
104959 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
104961 + * This source code is licensed under both the BSD-style license (found in the
104962 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
104963 + * in the COPYING file in the root directory of this source tree).
104964 + * You may select, at your option, one of the above-listed licenses.
104965 +****************************************************************** */
104969 + * The purpose of this header is to enable debug functions.
104970 + * They regroup assert(), DEBUGLOG() and RAWLOG() for run-time,
104971 + * and DEBUG_STATIC_ASSERT() for compile-time.
104973 + * By default, DEBUGLEVEL==0, which means run-time debug is disabled.
104975 + * Level 1 enables assert() only.
104976 + * Starting level 2, traces can be generated and pushed to stderr.
104977 + * The higher the level, the more verbose the traces.
104979 + * It's possible to dynamically adjust level using variable g_debug_level,
104980 + * which is only declared if DEBUGLEVEL>=2,
104981 + * and is a global variable, not multi-thread protected (use with care)
104982 + */
104984 +#ifndef DEBUG_H_12987983217
104985 +#define DEBUG_H_12987983217
104989 +/* static assert is triggered at compile time, leaving no runtime artefact.
104990 + * static assert only works with compile-time constants.
104991 + * Also, this variant can only be used inside a function. */
104992 +#define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1])
104995 +/* DEBUGLEVEL is expected to be defined externally,
104996 + * typically through compiler command line.
104997 + * Value must be a number. */
104998 +#ifndef DEBUGLEVEL
104999 +#  define DEBUGLEVEL 0
105000 +#endif
105003 +/* recommended values for DEBUGLEVEL :
105004 + * 0 : release mode, no debug, all run-time checks disabled
105005 + * 1 : enables assert() only, no display
105006 + * 2 : reserved, for currently active debug path
105007 + * 3 : events once per object lifetime (CCtx, CDict, etc.)
105008 + * 4 : events once per frame
105009 + * 5 : events once per block
105010 + * 6 : events once per sequence (verbose)
105011 + * 7+: events at every position (*very* verbose)
105013 + * It's generally inconvenient to output traces > 5.
105014 + * In which case, it's possible to selectively trigger high verbosity levels
105015 + * by modifying g_debug_level.
105016 + */
105018 +#if (DEBUGLEVEL>=1)
105019 +#  define ZSTD_DEPS_NEED_ASSERT
105020 +#  include "zstd_deps.h"
105021 +#else
105022 +#  ifndef assert   /* assert may be already defined, due to prior #include <assert.h> */
105023 +#    define assert(condition) ((void)0)   /* disable assert (default) */
105024 +#  endif
105025 +#endif
105027 +#if (DEBUGLEVEL>=2)
105028 +#  define ZSTD_DEPS_NEED_IO
105029 +#  include "zstd_deps.h"
105030 +extern int g_debuglevel; /* the variable is only declared,
105031 +                            it actually lives in debug.c,
105032 +                            and is shared by the whole process.
105033 +                            It's not thread-safe.
105034 +                            It's useful when enabling very verbose levels
105035 +                            on selective conditions (such as position in src) */
105037 +#  define RAWLOG(l, ...) {                                       \
105038 +                if (l<=g_debuglevel) {                           \
105039 +                    ZSTD_DEBUG_PRINT(__VA_ARGS__);               \
105040 +            }   }
105041 +#  define DEBUGLOG(l, ...) {                                     \
105042 +                if (l<=g_debuglevel) {                           \
105043 +                    ZSTD_DEBUG_PRINT(__FILE__ ": " __VA_ARGS__); \
105044 +                    ZSTD_DEBUG_PRINT(" \n");                     \
105045 +            }   }
105046 +#else
105047 +#  define RAWLOG(l, ...)      {}    /* disabled */
105048 +#  define DEBUGLOG(l, ...)    {}    /* disabled */
105049 +#endif
105053 +#endif /* DEBUG_H_12987983217 */
105054 diff --git a/lib/zstd/common/entropy_common.c b/lib/zstd/common/entropy_common.c
105055 new file mode 100644
105056 index 000000000000..53b47a2b52ff
105057 --- /dev/null
105058 +++ b/lib/zstd/common/entropy_common.c
105059 @@ -0,0 +1,357 @@
105060 +/* ******************************************************************
105061 + * Common functions of New Generation Entropy library
105062 + * Copyright (c) Yann Collet, Facebook, Inc.
105064 + *  You can contact the author at :
105065 + *  - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
105066 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
105068 + * This source code is licensed under both the BSD-style license (found in the
105069 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
105070 + * in the COPYING file in the root directory of this source tree).
105071 + * You may select, at your option, one of the above-listed licenses.
105072 +****************************************************************** */
105074 +/* *************************************
105075 +*  Dependencies
105076 +***************************************/
105077 +#include "mem.h"
105078 +#include "error_private.h"       /* ERR_*, ERROR */
105079 +#define FSE_STATIC_LINKING_ONLY  /* FSE_MIN_TABLELOG */
105080 +#include "fse.h"
105081 +#define HUF_STATIC_LINKING_ONLY  /* HUF_TABLELOG_ABSOLUTEMAX */
105082 +#include "huf.h"
105085 +/*===   Version   ===*/
105086 +unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
105089 +/*===   Error Management   ===*/
105090 +unsigned FSE_isError(size_t code) { return ERR_isError(code); }
105091 +const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
105093 +unsigned HUF_isError(size_t code) { return ERR_isError(code); }
105094 +const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
105097 +/*-**************************************************************
105098 +*  FSE NCount encoding-decoding
105099 +****************************************************************/
105100 +static U32 FSE_ctz(U32 val)
105102 +    assert(val != 0);
105103 +    {
105104 +#   if (__GNUC__ >= 3)   /* GCC Intrinsic */
105105 +        return __builtin_ctz(val);
105106 +#   else   /* Software version */
105107 +        U32 count = 0;
105108 +        while ((val & 1) == 0) {
105109 +            val >>= 1;
105110 +            ++count;
105111 +        }
105112 +        return count;
105113 +#   endif
105114 +    }
105117 +FORCE_INLINE_TEMPLATE
105118 +size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
105119 +                           const void* headerBuffer, size_t hbSize)
105121 +    const BYTE* const istart = (const BYTE*) headerBuffer;
105122 +    const BYTE* const iend = istart + hbSize;
105123 +    const BYTE* ip = istart;
105124 +    int nbBits;
105125 +    int remaining;
105126 +    int threshold;
105127 +    U32 bitStream;
105128 +    int bitCount;
105129 +    unsigned charnum = 0;
105130 +    unsigned const maxSV1 = *maxSVPtr + 1;
105131 +    int previous0 = 0;
105133 +    if (hbSize < 8) {
105134 +        /* This function only works when hbSize >= 8 */
105135 +        char buffer[8] = {0};
105136 +        ZSTD_memcpy(buffer, headerBuffer, hbSize);
105137 +        {   size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
105138 +                                                    buffer, sizeof(buffer));
105139 +            if (FSE_isError(countSize)) return countSize;
105140 +            if (countSize > hbSize) return ERROR(corruption_detected);
105141 +            return countSize;
105142 +    }   }
105143 +    assert(hbSize >= 8);
105145 +    /* init */
105146 +    ZSTD_memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0]));   /* all symbols not present in NCount have a frequency of 0 */
105147 +    bitStream = MEM_readLE32(ip);
105148 +    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */
105149 +    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
105150 +    bitStream >>= 4;
105151 +    bitCount = 4;
105152 +    *tableLogPtr = nbBits;
105153 +    remaining = (1<<nbBits)+1;
105154 +    threshold = 1<<nbBits;
105155 +    nbBits++;
105157 +    for (;;) {
105158 +        if (previous0) {
105159 +            /* Count the number of repeats. Each time the
105160 +             * 2-bit repeat code is 0b11 there is another
105161 +             * repeat.
105162 +             * Avoid UB by setting the high bit to 1.
105163 +             */
105164 +            int repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
105165 +            while (repeats >= 12) {
105166 +                charnum += 3 * 12;
105167 +                if (LIKELY(ip <= iend-7)) {
105168 +                    ip += 3;
105169 +                } else {
105170 +                    bitCount -= (int)(8 * (iend - 7 - ip));
105171 +                    bitCount &= 31;
105172 +                    ip = iend - 4;
105173 +                }
105174 +                bitStream = MEM_readLE32(ip) >> bitCount;
105175 +                repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
105176 +            }
105177 +            charnum += 3 * repeats;
105178 +            bitStream >>= 2 * repeats;
105179 +            bitCount += 2 * repeats;
105181 +            /* Add the final repeat which isn't 0b11. */
105182 +            assert((bitStream & 3) < 3);
105183 +            charnum += bitStream & 3;
105184 +            bitCount += 2;
105186 +            /* This is an error, but break and return an error
105187 +             * at the end, because returning out of a loop makes
105188 +             * it harder for the compiler to optimize.
105189 +             */
105190 +            if (charnum >= maxSV1) break;
105192 +            /* We don't need to set the normalized count to 0
105193 +             * because we already memset the whole buffer to 0.
105194 +             */
105196 +            if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
105197 +                assert((bitCount >> 3) <= 3); /* For first condition to work */
105198 +                ip += bitCount>>3;
105199 +                bitCount &= 7;
105200 +            } else {
105201 +                bitCount -= (int)(8 * (iend - 4 - ip));
105202 +                bitCount &= 31;
105203 +                ip = iend - 4;
105204 +            }
105205 +            bitStream = MEM_readLE32(ip) >> bitCount;
105206 +        }
105207 +        {
105208 +            int const max = (2*threshold-1) - remaining;
105209 +            int count;
105211 +            if ((bitStream & (threshold-1)) < (U32)max) {
105212 +                count = bitStream & (threshold-1);
105213 +                bitCount += nbBits-1;
105214 +            } else {
105215 +                count = bitStream & (2*threshold-1);
105216 +                if (count >= threshold) count -= max;
105217 +                bitCount += nbBits;
105218 +            }
105220 +            count--;   /* extra accuracy */
105221 +            /* When it matters (small blocks), this is a
105222 +             * predictable branch, because we don't use -1.
105223 +             */
105224 +            if (count >= 0) {
105225 +                remaining -= count;
105226 +            } else {
105227 +                assert(count == -1);
105228 +                remaining += count;
105229 +            }
105230 +            normalizedCounter[charnum++] = (short)count;
105231 +            previous0 = !count;
105233 +            assert(threshold > 1);
105234 +            if (remaining < threshold) {
105235 +                /* This branch can be folded into the
105236 +                 * threshold update condition because we
105237 +                 * know that threshold > 1.
105238 +                 */
105239 +                if (remaining <= 1) break;
105240 +                nbBits = BIT_highbit32(remaining) + 1;
105241 +                threshold = 1 << (nbBits - 1);
105242 +            }
105243 +            if (charnum >= maxSV1) break;
105245 +            if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
105246 +                ip += bitCount>>3;
105247 +                bitCount &= 7;
105248 +            } else {
105249 +                bitCount -= (int)(8 * (iend - 4 - ip));
105250 +                bitCount &= 31;
105251 +                ip = iend - 4;
105252 +            }
105253 +            bitStream = MEM_readLE32(ip) >> bitCount;
105254 +    }   }
105255 +    if (remaining != 1) return ERROR(corruption_detected);
105256 +    /* Only possible when there are too many zeros. */
105257 +    if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall);
105258 +    if (bitCount > 32) return ERROR(corruption_detected);
105259 +    *maxSVPtr = charnum-1;
105261 +    ip += (bitCount+7)>>3;
105262 +    return ip-istart;
105265 +/* Avoids the FORCE_INLINE of the _body() function. */
105266 +static size_t FSE_readNCount_body_default(
105267 +        short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
105268 +        const void* headerBuffer, size_t hbSize)
105270 +    return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
105273 +#if DYNAMIC_BMI2
105274 +TARGET_ATTRIBUTE("bmi2") static size_t FSE_readNCount_body_bmi2(
105275 +        short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
105276 +        const void* headerBuffer, size_t hbSize)
105278 +    return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
105280 +#endif
105282 +size_t FSE_readNCount_bmi2(
105283 +        short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
105284 +        const void* headerBuffer, size_t hbSize, int bmi2)
105286 +#if DYNAMIC_BMI2
105287 +    if (bmi2) {
105288 +        return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
105289 +    }
105290 +#endif
105291 +    (void)bmi2;
105292 +    return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
105295 +size_t FSE_readNCount(
105296 +        short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
105297 +        const void* headerBuffer, size_t hbSize)
105299 +    return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
105303 +/*! HUF_readStats() :
105304 +    Read compact Huffman tree, saved by HUF_writeCTable().
105305 +    `huffWeight` is destination buffer.
105306 +    `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
105307 +    @return : size read from `src` , or an error Code .
105308 +    Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
105310 +size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
105311 +                     U32* nbSymbolsPtr, U32* tableLogPtr,
105312 +                     const void* src, size_t srcSize)
105314 +    U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
105315 +    return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0);
105318 +FORCE_INLINE_TEMPLATE size_t
105319 +HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
105320 +                   U32* nbSymbolsPtr, U32* tableLogPtr,
105321 +                   const void* src, size_t srcSize,
105322 +                   void* workSpace, size_t wkspSize,
105323 +                   int bmi2)
105325 +    U32 weightTotal;
105326 +    const BYTE* ip = (const BYTE*) src;
105327 +    size_t iSize;
105328 +    size_t oSize;
105330 +    if (!srcSize) return ERROR(srcSize_wrong);
105331 +    iSize = ip[0];
105332 +    /* ZSTD_memset(huffWeight, 0, hwSize);   *//* is not necessary, even though some analyzer complain ... */
105334 +    if (iSize >= 128) {  /* special header */
105335 +        oSize = iSize - 127;
105336 +        iSize = ((oSize+1)/2);
105337 +        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
105338 +        if (oSize >= hwSize) return ERROR(corruption_detected);
105339 +        ip += 1;
105340 +        {   U32 n;
105341 +            for (n=0; n<oSize; n+=2) {
105342 +                huffWeight[n]   = ip[n/2] >> 4;
105343 +                huffWeight[n+1] = ip[n/2] & 15;
105344 +    }   }   }
105345 +    else  {   /* header compressed with FSE (normal case) */
105346 +        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
105347 +        /* max (hwSize-1) values decoded, as last one is implied */
105348 +        oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2);
105349 +        if (FSE_isError(oSize)) return oSize;
105350 +    }
105352 +    /* collect weight stats */
105353 +    ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
105354 +    weightTotal = 0;
105355 +    {   U32 n; for (n=0; n<oSize; n++) {
105356 +            if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
105357 +            rankStats[huffWeight[n]]++;
105358 +            weightTotal += (1 << huffWeight[n]) >> 1;
105359 +    }   }
105360 +    if (weightTotal == 0) return ERROR(corruption_detected);
105362 +    /* get last non-null symbol weight (implied, total must be 2^n) */
105363 +    {   U32 const tableLog = BIT_highbit32(weightTotal) + 1;
105364 +        if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
105365 +        *tableLogPtr = tableLog;
105366 +        /* determine last weight */
105367 +        {   U32 const total = 1 << tableLog;
105368 +            U32 const rest = total - weightTotal;
105369 +            U32 const verif = 1 << BIT_highbit32(rest);
105370 +            U32 const lastWeight = BIT_highbit32(rest) + 1;
105371 +            if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
105372 +            huffWeight[oSize] = (BYTE)lastWeight;
105373 +            rankStats[lastWeight]++;
105374 +    }   }
105376 +    /* check tree construction validity */
105377 +    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
105379 +    /* results */
105380 +    *nbSymbolsPtr = (U32)(oSize+1);
105381 +    return iSize+1;
105384 +/* Avoids the FORCE_INLINE of the _body() function. */
105385 +static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats,
105386 +                     U32* nbSymbolsPtr, U32* tableLogPtr,
105387 +                     const void* src, size_t srcSize,
105388 +                     void* workSpace, size_t wkspSize)
105390 +    return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0);
105393 +#if DYNAMIC_BMI2
105394 +static TARGET_ATTRIBUTE("bmi2") size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
105395 +                     U32* nbSymbolsPtr, U32* tableLogPtr,
105396 +                     const void* src, size_t srcSize,
105397 +                     void* workSpace, size_t wkspSize)
105399 +    return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1);
105401 +#endif
105403 +size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
105404 +                     U32* nbSymbolsPtr, U32* tableLogPtr,
105405 +                     const void* src, size_t srcSize,
105406 +                     void* workSpace, size_t wkspSize,
105407 +                     int bmi2)
105409 +#if DYNAMIC_BMI2
105410 +    if (bmi2) {
105411 +        return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
105412 +    }
105413 +#endif
105414 +    (void)bmi2;
105415 +    return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
105417 diff --git a/lib/zstd/common/error_private.c b/lib/zstd/common/error_private.c
105418 new file mode 100644
105419 index 000000000000..6d1135f8c373
105420 --- /dev/null
105421 +++ b/lib/zstd/common/error_private.c
105422 @@ -0,0 +1,56 @@
105424 + * Copyright (c) Yann Collet, Facebook, Inc.
105425 + * All rights reserved.
105427 + * This source code is licensed under both the BSD-style license (found in the
105428 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
105429 + * in the COPYING file in the root directory of this source tree).
105430 + * You may select, at your option, one of the above-listed licenses.
105431 + */
105433 +/* The purpose of this file is to have a single list of error strings embedded in binary */
105435 +#include "error_private.h"
105437 +const char* ERR_getErrorString(ERR_enum code)
105439 +#ifdef ZSTD_STRIP_ERROR_STRINGS
105440 +    (void)code;
105441 +    return "Error strings stripped";
105442 +#else
105443 +    static const char* const notErrorCode = "Unspecified error code";
105444 +    switch( code )
105445 +    {
105446 +    case PREFIX(no_error): return "No error detected";
105447 +    case PREFIX(GENERIC):  return "Error (generic)";
105448 +    case PREFIX(prefix_unknown): return "Unknown frame descriptor";
105449 +    case PREFIX(version_unsupported): return "Version not supported";
105450 +    case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
105451 +    case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
105452 +    case PREFIX(corruption_detected): return "Corrupted block detected";
105453 +    case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
105454 +    case PREFIX(parameter_unsupported): return "Unsupported parameter";
105455 +    case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
105456 +    case PREFIX(init_missing): return "Context should be init first";
105457 +    case PREFIX(memory_allocation): return "Allocation error : not enough memory";
105458 +    case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
105459 +    case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
105460 +    case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
105461 +    case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
105462 +    case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
105463 +    case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
105464 +    case PREFIX(dictionary_wrong): return "Dictionary mismatch";
105465 +    case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
105466 +    case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
105467 +    case PREFIX(srcSize_wrong): return "Src size is incorrect";
105468 +    case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer";
105469 +        /* following error codes are not stable and may be removed or changed in a future version */
105470 +    case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
105471 +    case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
105472 +    case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong";
105473 +    case PREFIX(srcBuffer_wrong): return "Source buffer is wrong";
105474 +    case PREFIX(maxCode):
105475 +    default: return notErrorCode;
105476 +    }
105477 +#endif
105479 diff --git a/lib/zstd/common/error_private.h b/lib/zstd/common/error_private.h
105480 new file mode 100644
105481 index 000000000000..d14e686adf95
105482 --- /dev/null
105483 +++ b/lib/zstd/common/error_private.h
105484 @@ -0,0 +1,66 @@
105486 + * Copyright (c) Yann Collet, Facebook, Inc.
105487 + * All rights reserved.
105489 + * This source code is licensed under both the BSD-style license (found in the
105490 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
105491 + * in the COPYING file in the root directory of this source tree).
105492 + * You may select, at your option, one of the above-listed licenses.
105493 + */
105495 +/* Note : this module is expected to remain private, do not expose it */
105497 +#ifndef ERROR_H_MODULE
105498 +#define ERROR_H_MODULE
105502 +/* ****************************************
105503 +*  Dependencies
105504 +******************************************/
105505 +#include "zstd_deps.h"    /* size_t */
105506 +#include <linux/zstd_errors.h>  /* enum list */
105509 +/* ****************************************
105510 +*  Compiler-specific
105511 +******************************************/
105512 +#define ERR_STATIC static __attribute__((unused))
105515 +/*-****************************************
105516 +*  Customization (error_public.h)
105517 +******************************************/
105518 +typedef ZSTD_ErrorCode ERR_enum;
105519 +#define PREFIX(name) ZSTD_error_##name
105522 +/*-****************************************
105523 +*  Error codes handling
105524 +******************************************/
105525 +#undef ERROR   /* already defined on Visual Studio */
105526 +#define ERROR(name) ZSTD_ERROR(name)
105527 +#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
105529 +ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
105531 +ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
105533 +/* check and forward error code */
105534 +#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
105535 +#define CHECK_F(f)   { CHECK_V_F(_var_err__, f); }
105538 +/*-****************************************
105539 +*  Error Strings
105540 +******************************************/
105542 +const char* ERR_getErrorString(ERR_enum code);   /* error_private.c */
105544 +ERR_STATIC const char* ERR_getErrorName(size_t code)
105546 +    return ERR_getErrorString(ERR_getErrorCode(code));
105550 +#endif /* ERROR_H_MODULE */
105551 diff --git a/lib/zstd/common/fse.h b/lib/zstd/common/fse.h
105552 new file mode 100644
105553 index 000000000000..477e642ffb41
105554 --- /dev/null
105555 +++ b/lib/zstd/common/fse.h
105556 @@ -0,0 +1,708 @@
105557 +/* ******************************************************************
105558 + * FSE : Finite State Entropy codec
105559 + * Public Prototypes declaration
105560 + * Copyright (c) Yann Collet, Facebook, Inc.
105562 + * You can contact the author at :
105563 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
105565 + * This source code is licensed under both the BSD-style license (found in the
105566 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
105567 + * in the COPYING file in the root directory of this source tree).
105568 + * You may select, at your option, one of the above-listed licenses.
105569 +****************************************************************** */
105572 +#ifndef FSE_H
105573 +#define FSE_H
105576 +/*-*****************************************
105577 +*  Dependencies
105578 +******************************************/
105579 +#include "zstd_deps.h"    /* size_t, ptrdiff_t */
105582 +/*-*****************************************
105583 +*  FSE_PUBLIC_API : control library symbols visibility
105584 +******************************************/
105585 +#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
105586 +#  define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
105587 +#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */
105588 +#  define FSE_PUBLIC_API __declspec(dllexport)
105589 +#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
105590 +#  define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
105591 +#else
105592 +#  define FSE_PUBLIC_API
105593 +#endif
105595 +/*------   Version   ------*/
105596 +#define FSE_VERSION_MAJOR    0
105597 +#define FSE_VERSION_MINOR    9
105598 +#define FSE_VERSION_RELEASE  0
105600 +#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
105601 +#define FSE_QUOTE(str) #str
105602 +#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
105603 +#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
105605 +#define FSE_VERSION_NUMBER  (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
105606 +FSE_PUBLIC_API unsigned FSE_versionNumber(void);   /**< library version number; to be used when checking dll version */
105609 +/*-****************************************
105610 +*  FSE simple functions
105611 +******************************************/
105612 +/*! FSE_compress() :
105613 +    Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
105614 +    'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).
105615 +    @return : size of compressed data (<= dstCapacity).
105616 +    Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
105617 +                     if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
105618 +                     if FSE_isError(return), compression failed (more details using FSE_getErrorName())
105620 +FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
105621 +                             const void* src, size_t srcSize);
105623 +/*! FSE_decompress():
105624 +    Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
105625 +    into already allocated destination buffer 'dst', of size 'dstCapacity'.
105626 +    @return : size of regenerated data (<= maxDstSize),
105627 +              or an error code, which can be tested using FSE_isError() .
105629 +    ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!
105630 +    Why ? : making this distinction requires a header.
105631 +    Header management is intentionally delegated to the user layer, which can better manage special cases.
105633 +FSE_PUBLIC_API size_t FSE_decompress(void* dst,  size_t dstCapacity,
105634 +                               const void* cSrc, size_t cSrcSize);
105637 +/*-*****************************************
105638 +*  Tool functions
105639 +******************************************/
105640 +FSE_PUBLIC_API size_t FSE_compressBound(size_t size);       /* maximum compressed size */
105642 +/* Error Management */
105643 +FSE_PUBLIC_API unsigned    FSE_isError(size_t code);        /* tells if a return value is an error code */
105644 +FSE_PUBLIC_API const char* FSE_getErrorName(size_t code);   /* provides error code string (useful for debugging) */
105647 +/*-*****************************************
105648 +*  FSE advanced functions
105649 +******************************************/
105650 +/*! FSE_compress2() :
105651 +    Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'
105652 +    Both parameters can be defined as '0' to mean : use default value
105653 +    @return : size of compressed data
105654 +    Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!
105655 +                     if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
105656 +                     if FSE_isError(return), it's an error code.
105658 +FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
105661 +/*-*****************************************
105662 +*  FSE detailed API
105663 +******************************************/
105665 +FSE_compress() does the following:
105666 +1. count symbol occurrence from source[] into table count[] (see hist.h)
105667 +2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
105668 +3. save normalized counters to memory buffer using writeNCount()
105669 +4. build encoding table 'CTable' from normalized counters
105670 +5. encode the data stream using encoding table 'CTable'
105672 +FSE_decompress() does the following:
105673 +1. read normalized counters with readNCount()
105674 +2. build decoding table 'DTable' from normalized counters
105675 +3. decode the data stream using decoding table 'DTable'
105677 +The following API allows targeting specific sub-functions for advanced tasks.
105678 +For example, it's possible to compress several blocks using the same 'CTable',
105679 +or to save and provide normalized distribution using external method.
105682 +/* *** COMPRESSION *** */
105684 +/*! FSE_optimalTableLog():
105685 +    dynamically downsize 'tableLog' when conditions are met.
105686 +    It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
105687 +    @return : recommended tableLog (necessarily <= 'maxTableLog') */
105688 +FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
105690 +/*! FSE_normalizeCount():
105691 +    normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
105692 +    'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
105693 +    useLowProbCount is a boolean parameter which trades off compressed size for
105694 +    faster header decoding. When it is set to 1, the compressed data will be slightly
105695 +    smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be
105696 +    faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0
105697 +    is a good default, since header deserialization makes a big speed difference.
105698 +    Otherwise, useLowProbCount=1 is a good default, since the speed difference is small.
105699 +    @return : tableLog,
105700 +              or an errorCode, which can be tested using FSE_isError() */
105701 +FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog,
105702 +                    const unsigned* count, size_t srcSize, unsigned maxSymbolValue, unsigned useLowProbCount);
105704 +/*! FSE_NCountWriteBound():
105705 +    Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
105706 +    Typically useful for allocation purpose. */
105707 +FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
105709 +/*! FSE_writeNCount():
105710 +    Compactly save 'normalizedCounter' into 'buffer'.
105711 +    @return : size of the compressed table,
105712 +              or an errorCode, which can be tested using FSE_isError(). */
105713 +FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize,
105714 +                                 const short* normalizedCounter,
105715 +                                 unsigned maxSymbolValue, unsigned tableLog);
105717 +/*! Constructor and Destructor of FSE_CTable.
105718 +    Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
105719 +typedef unsigned FSE_CTable;   /* don't allocate that. It's only meant to be more restrictive than void* */
105720 +FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
105721 +FSE_PUBLIC_API void        FSE_freeCTable (FSE_CTable* ct);
105723 +/*! FSE_buildCTable():
105724 +    Builds `ct`, which must be already allocated, using FSE_createCTable().
105725 +    @return : 0, or an errorCode, which can be tested using FSE_isError() */
105726 +FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
105728 +/*! FSE_compress_usingCTable():
105729 +    Compress `src` using `ct` into `dst` which must be already allocated.
105730 +    @return : size of compressed data (<= `dstCapacity`),
105731 +              or 0 if compressed data could not fit into `dst`,
105732 +              or an errorCode, which can be tested using FSE_isError() */
105733 +FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
105736 +Tutorial :
105737 +----------
105738 +The first step is to count all symbols. FSE_count() does this job very fast.
105739 +Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
105740 +'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
105741 +maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
105742 +FSE_count() will return the number of occurrence of the most frequent symbol.
105743 +This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
105744 +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
105746 +The next step is to normalize the frequencies.
105747 +FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
105748 +It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
105749 +You can use 'tableLog'==0 to mean "use default tableLog value".
105750 +If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
105751 +which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
105753 +The result of FSE_normalizeCount() will be saved into a table,
105754 +called 'normalizedCounter', which is a table of signed short.
105755 +'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
105756 +The return value is tableLog if everything proceeded as expected.
105757 +It is 0 if there is a single symbol within distribution.
105758 +If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
105760 +'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
105761 +'buffer' must be already allocated.
105762 +For guaranteed success, buffer size must be at least FSE_headerBound().
105763 +The result of the function is the number of bytes written into 'buffer'.
105764 +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
105766 +'normalizedCounter' can then be used to create the compression table 'CTable'.
105767 +The space required by 'CTable' must be already allocated, using FSE_createCTable().
105768 +You can then use FSE_buildCTable() to fill 'CTable'.
105769 +If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
105771 +'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
105772 +Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
105773 +The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
105774 +If it returns '0', compressed data could not fit into 'dst'.
105775 +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
105779 +/* *** DECOMPRESSION *** */
105781 +/*! FSE_readNCount():
105782 +    Read compactly saved 'normalizedCounter' from 'rBuffer'.
105783 +    @return : size read from 'rBuffer',
105784 +              or an errorCode, which can be tested using FSE_isError().
105785 +              maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
105786 +FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter,
105787 +                           unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
105788 +                           const void* rBuffer, size_t rBuffSize);
105790 +/*! FSE_readNCount_bmi2():
105791 + * Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise.
105792 + */
105793 +FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter,
105794 +                           unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
105795 +                           const void* rBuffer, size_t rBuffSize, int bmi2);
105797 +/*! Constructor and Destructor of FSE_DTable.
105798 +    Note that its size depends on 'tableLog' */
105799 +typedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
105800 +FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
105801 +FSE_PUBLIC_API void        FSE_freeDTable(FSE_DTable* dt);
105803 +/*! FSE_buildDTable():
105804 +    Builds 'dt', which must be already allocated, using FSE_createDTable().
105805 +    return : 0, or an errorCode, which can be tested using FSE_isError() */
105806 +FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
105808 +/*! FSE_decompress_usingDTable():
105809 +    Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
105810 +    into `dst` which must be already allocated.
105811 +    @return : size of regenerated data (necessarily <= `dstCapacity`),
105812 +              or an errorCode, which can be tested using FSE_isError() */
105813 +FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
105816 +Tutorial :
105817 +----------
105818 +(Note : these functions only decompress FSE-compressed blocks.
105819 + If block is uncompressed, use memcpy() instead
105820 + If block is a single repeated byte, use memset() instead )
105822 +The first step is to obtain the normalized frequencies of symbols.
105823 +This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
105824 +'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
105825 +In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
105826 +or size the table to handle worst case situations (typically 256).
105827 +FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
105828 +The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
105829 +Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
105830 +If there is an error, the function will return an error code, which can be tested using FSE_isError().
105832 +The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
105833 +This is performed by the function FSE_buildDTable().
105834 +The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
105835 +If there is an error, the function will return an error code, which can be tested using FSE_isError().
105837 +`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
105838 +`cSrcSize` must be strictly correct, otherwise decompression will fail.
105839 +FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
105840 +If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
105843 +#endif  /* FSE_H */
105845 +#if !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
105846 +#define FSE_H_FSE_STATIC_LINKING_ONLY
105848 +/* *** Dependency *** */
105849 +#include "bitstream.h"
105852 +/* *****************************************
105853 +*  Static allocation
105854 +*******************************************/
105855 +/* FSE buffer bounds */
105856 +#define FSE_NCOUNTBOUND 512
105857 +#define FSE_BLOCKBOUND(size) ((size) + ((size)>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */)
105858 +#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
105860 +/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
105861 +#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<((maxTableLog)-1)) + (((maxSymbolValue)+1)*2))
105862 +#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<(maxTableLog)))
105864 +/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
105865 +#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue)   (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
105866 +#define FSE_DTABLE_SIZE(maxTableLog)                   (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
105869 +/* *****************************************
105870 + *  FSE advanced API
105871 + ***************************************** */
105873 +unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
105874 +/**< same as FSE_optimalTableLog(), which used `minus==2` */
105876 +/* FSE_compress_wksp() :
105877 + * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
105878 + * FSE_COMPRESS_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
105879 + */
105880 +#define FSE_COMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue)   ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
105881 +size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
105883 +size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
105884 +/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
105886 +size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
105887 +/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
105889 +/* FSE_buildCTable_wksp() :
105890 + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
105891 + * `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`.
105892 + */
105893 +#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (maxSymbolValue + 2 + (1ull << (tableLog - 2)))
105894 +#define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog))
105895 +size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
105897 +#define FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) (sizeof(short) * (maxSymbolValue + 1) + (1ULL << maxTableLog) + 8)
105898 +#define FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ((FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) + sizeof(unsigned) - 1) / sizeof(unsigned))
105899 +FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
105900 +/**< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */
105902 +size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
105903 +/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
105905 +size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
105906 +/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
105908 +#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1)
105909 +#define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned))
105910 +size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize);
105911 +/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)` */
105913 +size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2);
105914 +/**< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */
105916 +typedef enum {
105917 +   FSE_repeat_none,  /**< Cannot use the previous table */
105918 +   FSE_repeat_check, /**< Can use the previous table but it must be checked */
105919 +   FSE_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
105920 + } FSE_repeat;
105922 +/* *****************************************
105923 +*  FSE symbol compression API
105924 +*******************************************/
105926 +   This API consists of small unitary functions, which highly benefit from being inlined.
105927 +   Hence their body are included in next section.
105929 +typedef struct {
105930 +    ptrdiff_t   value;
105931 +    const void* stateTable;
105932 +    const void* symbolTT;
105933 +    unsigned    stateLog;
105934 +} FSE_CState_t;
105936 +static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct);
105938 +static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol);
105940 +static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr);
105942 +/**<
105943 +These functions are inner components of FSE_compress_usingCTable().
105944 +They allow the creation of custom streams, mixing multiple tables and bit sources.
105946 +A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
105947 +So the first symbol you will encode is the last you will decode, like a LIFO stack.
105949 +You will need a few variables to track your CStream. They are :
105951 +FSE_CTable    ct;         // Provided by FSE_buildCTable()
105952 +BIT_CStream_t bitStream;  // bitStream tracking structure
105953 +FSE_CState_t  state;      // State tracking structure (can have several)
105956 +The first thing to do is to init bitStream and state.
105957 +    size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
105958 +    FSE_initCState(&state, ct);
105960 +Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
105961 +You can then encode your input data, byte after byte.
105962 +FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
105963 +Remember decoding will be done in reverse direction.
105964 +    FSE_encodeByte(&bitStream, &state, symbol);
105966 +At any time, you can also add any bit sequence.
105967 +Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
105968 +    BIT_addBits(&bitStream, bitField, nbBits);
105970 +The above methods don't commit data to memory, they just store it into local register, for speed.
105971 +Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
105972 +Writing data to memory is a manual operation, performed by the flushBits function.
105973 +    BIT_flushBits(&bitStream);
105975 +Your last FSE encoding operation shall be to flush your last state value(s).
105976 +    FSE_flushState(&bitStream, &state);
105978 +Finally, you must close the bitStream.
105979 +The function returns the size of CStream in bytes.
105980 +If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
105981 +If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
105982 +    size_t size = BIT_closeCStream(&bitStream);
105986 +/* *****************************************
105987 +*  FSE symbol decompression API
105988 +*******************************************/
105989 +typedef struct {
105990 +    size_t      state;
105991 +    const void* table;   /* precise table may vary, depending on U16 */
105992 +} FSE_DState_t;
105995 +static void     FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
105997 +static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
105999 +static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
106001 +/**<
106002 +Let's now decompose FSE_decompress_usingDTable() into its unitary components.
106003 +You will decode FSE-encoded symbols from the bitStream,
106004 +and also any other bitFields you put in, **in reverse order**.
106006 +You will need a few variables to track your bitStream. They are :
106008 +BIT_DStream_t DStream;    // Stream context
106009 +FSE_DState_t  DState;     // State context. Multiple ones are possible
106010 +FSE_DTable*   DTablePtr;  // Decoding table, provided by FSE_buildDTable()
106012 +The first thing to do is to init the bitStream.
106013 +    errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
106015 +You should then retrieve your initial state(s)
106016 +(in reverse flushing order if you have several ones) :
106017 +    errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
106019 +You can then decode your data, symbol after symbol.
106020 +For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
106021 +Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
106022 +    unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
106024 +You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
106025 +Note : maximum allowed nbBits is 25, for 32-bits compatibility
106026 +    size_t bitField = BIT_readBits(&DStream, nbBits);
106028 +All above operations only read from local register (which size depends on size_t).
106029 +Refueling the register from memory is manually performed by the reload method.
106030 +    endSignal = FSE_reloadDStream(&DStream);
106032 +BIT_reloadDStream() result tells if there is still some more data to read from DStream.
106033 +BIT_DStream_unfinished : there is still some data left into the DStream.
106034 +BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
106035 +BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
106036 +BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
106038 +When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
106039 +to properly detect the exact end of stream.
106040 +After each decoded symbol, check if DStream is fully consumed using this simple test :
106041 +    BIT_reloadDStream(&DStream) >= BIT_DStream_completed
106043 +When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
106044 +Checking if DStream has reached its end is performed by :
106045 +    BIT_endOfDStream(&DStream);
106046 +Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
106047 +    FSE_endOfDState(&DState);
106051 +/* *****************************************
106052 +*  FSE unsafe API
106053 +*******************************************/
106054 +static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
106055 +/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
106058 +/* *****************************************
106059 +*  Implementation of inlined functions
106060 +*******************************************/
106061 +typedef struct {
106062 +    int deltaFindState;
106063 +    U32 deltaNbBits;
106064 +} FSE_symbolCompressionTransform; /* total 8 bytes */
106066 +MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
106068 +    const void* ptr = ct;
106069 +    const U16* u16ptr = (const U16*) ptr;
106070 +    const U32 tableLog = MEM_read16(ptr);
106071 +    statePtr->value = (ptrdiff_t)1<<tableLog;
106072 +    statePtr->stateTable = u16ptr+2;
106073 +    statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1);
106074 +    statePtr->stateLog = tableLog;
106078 +/*! FSE_initCState2() :
106079 +*   Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
106080 +*   uses the smallest state value possible, saving the cost of this symbol */
106081 +MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)
106083 +    FSE_initCState(statePtr, ct);
106084 +    {   const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
106085 +        const U16* stateTable = (const U16*)(statePtr->stateTable);
106086 +        U32 nbBitsOut  = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);
106087 +        statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
106088 +        statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
106089 +    }
106092 +MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol)
106094 +    FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
106095 +    const U16* const stateTable = (const U16*)(statePtr->stateTable);
106096 +    U32 const nbBitsOut  = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
106097 +    BIT_addBits(bitC, statePtr->value, nbBitsOut);
106098 +    statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
106101 +MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
106103 +    BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
106104 +    BIT_flushBits(bitC);
106108 +/* FSE_getMaxNbBits() :
106109 + * Approximate maximum cost of a symbol, in bits.
106110 + * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
106111 + * note 1 : assume symbolValue is valid (<= maxSymbolValue)
106112 + * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
106113 +MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue)
106115 +    const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
106116 +    return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16;
106119 +/* FSE_bitCost() :
106120 + * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
106121 + * note 1 : assume symbolValue is valid (<= maxSymbolValue)
106122 + * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
106123 +MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog)
106125 +    const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
106126 +    U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16;
106127 +    U32 const threshold = (minNbBits+1) << 16;
106128 +    assert(tableLog < 16);
106129 +    assert(accuracyLog < 31-tableLog);  /* ensure enough room for renormalization double shift */
106130 +    {   U32 const tableSize = 1 << tableLog;
106131 +        U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize);
106132 +        U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog;   /* linear interpolation (very approximate) */
106133 +        U32 const bitMultiplier = 1 << accuracyLog;
106134 +        assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold);
106135 +        assert(normalizedDeltaFromThreshold <= bitMultiplier);
106136 +        return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold;
106137 +    }
106141 +/* ======    Decompression    ====== */
106143 +typedef struct {
106144 +    U16 tableLog;
106145 +    U16 fastMode;
106146 +} FSE_DTableHeader;   /* sizeof U32 */
106148 +typedef struct
106150 +    unsigned short newState;
106151 +    unsigned char  symbol;
106152 +    unsigned char  nbBits;
106153 +} FSE_decode_t;   /* size == U32 */
106155 +MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
106157 +    const void* ptr = dt;
106158 +    const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
106159 +    DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
106160 +    BIT_reloadDStream(bitD);
106161 +    DStatePtr->table = dt + 1;
106164 +MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr)
106166 +    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
106167 +    return DInfo.symbol;
106170 +MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
106172 +    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
106173 +    U32 const nbBits = DInfo.nbBits;
106174 +    size_t const lowBits = BIT_readBits(bitD, nbBits);
106175 +    DStatePtr->state = DInfo.newState + lowBits;
106178 +MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
106180 +    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
106181 +    U32 const nbBits = DInfo.nbBits;
106182 +    BYTE const symbol = DInfo.symbol;
106183 +    size_t const lowBits = BIT_readBits(bitD, nbBits);
106185 +    DStatePtr->state = DInfo.newState + lowBits;
106186 +    return symbol;
106189 +/*! FSE_decodeSymbolFast() :
106190 +    unsafe, only works if no symbol has a probability > 50% */
106191 +MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
106193 +    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
106194 +    U32 const nbBits = DInfo.nbBits;
106195 +    BYTE const symbol = DInfo.symbol;
106196 +    size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
106198 +    DStatePtr->state = DInfo.newState + lowBits;
106199 +    return symbol;
106202 +MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
106204 +    return DStatePtr->state == 0;
106209 +#ifndef FSE_COMMONDEFS_ONLY
106211 +/* **************************************************************
106212 +*  Tuning parameters
106213 +****************************************************************/
106214 +/*!MEMORY_USAGE :
106215 +*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
106216 +*  Increasing memory usage improves compression ratio
106217 +*  Reduced memory usage can improve speed, due to cache effect
106218 +*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
106219 +#ifndef FSE_MAX_MEMORY_USAGE
106220 +#  define FSE_MAX_MEMORY_USAGE 14
106221 +#endif
106222 +#ifndef FSE_DEFAULT_MEMORY_USAGE
106223 +#  define FSE_DEFAULT_MEMORY_USAGE 13
106224 +#endif
106225 +#if (FSE_DEFAULT_MEMORY_USAGE > FSE_MAX_MEMORY_USAGE)
106226 +#  error "FSE_DEFAULT_MEMORY_USAGE must be <= FSE_MAX_MEMORY_USAGE"
106227 +#endif
106229 +/*!FSE_MAX_SYMBOL_VALUE :
106230 +*  Maximum symbol value authorized.
106231 +*  Required for proper stack allocation */
106232 +#ifndef FSE_MAX_SYMBOL_VALUE
106233 +#  define FSE_MAX_SYMBOL_VALUE 255
106234 +#endif
106236 +/* **************************************************************
106237 +*  template functions type & suffix
106238 +****************************************************************/
106239 +#define FSE_FUNCTION_TYPE BYTE
106240 +#define FSE_FUNCTION_EXTENSION
106241 +#define FSE_DECODE_TYPE FSE_decode_t
106244 +#endif   /* !FSE_COMMONDEFS_ONLY */
106247 +/* ***************************************************************
106248 +*  Constants
106249 +*****************************************************************/
106250 +#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)
106251 +#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
106252 +#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
106253 +#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
106254 +#define FSE_MIN_TABLELOG 5
106256 +#define FSE_TABLELOG_ABSOLUTE_MAX 15
106257 +#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
106258 +#  error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
106259 +#endif
106261 +#define FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3)
106264 +#endif /* FSE_STATIC_LINKING_ONLY */
106265 diff --git a/lib/zstd/common/fse_decompress.c b/lib/zstd/common/fse_decompress.c
106266 new file mode 100644
106267 index 000000000000..2c8bbe3e4c14
106268 --- /dev/null
106269 +++ b/lib/zstd/common/fse_decompress.c
106270 @@ -0,0 +1,390 @@
106271 +/* ******************************************************************
106272 + * FSE : Finite State Entropy decoder
106273 + * Copyright (c) Yann Collet, Facebook, Inc.
106275 + *  You can contact the author at :
106276 + *  - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
106277 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
106279 + * This source code is licensed under both the BSD-style license (found in the
106280 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
106281 + * in the COPYING file in the root directory of this source tree).
106282 + * You may select, at your option, one of the above-listed licenses.
106283 +****************************************************************** */
106286 +/* **************************************************************
106287 +*  Includes
106288 +****************************************************************/
106289 +#include "debug.h"      /* assert */
106290 +#include "bitstream.h"
106291 +#include "compiler.h"
106292 +#define FSE_STATIC_LINKING_ONLY
106293 +#include "fse.h"
106294 +#include "error_private.h"
106295 +#define ZSTD_DEPS_NEED_MALLOC
106296 +#include "zstd_deps.h"
106299 +/* **************************************************************
106300 +*  Error Management
106301 +****************************************************************/
106302 +#define FSE_isError ERR_isError
106303 +#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)   /* use only *after* variable declarations */
106306 +/* **************************************************************
106307 +*  Templates
106308 +****************************************************************/
106310 +  designed to be included
106311 +  for type-specific functions (template emulation in C)
106312 +  Objective is to write these functions only once, for improved maintenance
106315 +/* safety checks */
106316 +#ifndef FSE_FUNCTION_EXTENSION
106317 +#  error "FSE_FUNCTION_EXTENSION must be defined"
106318 +#endif
106319 +#ifndef FSE_FUNCTION_TYPE
106320 +#  error "FSE_FUNCTION_TYPE must be defined"
106321 +#endif
106323 +/* Function names */
106324 +#define FSE_CAT(X,Y) X##Y
106325 +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
106326 +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
106329 +/* Function templates */
106330 +FSE_DTable* FSE_createDTable (unsigned tableLog)
106332 +    if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
106333 +    return (FSE_DTable*)ZSTD_malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
106336 +void FSE_freeDTable (FSE_DTable* dt)
106338 +    ZSTD_free(dt);
106341 +static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
106343 +    void* const tdPtr = dt+1;   /* because *dt is unsigned, 32-bits aligned on 32-bits */
106344 +    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
106345 +    U16* symbolNext = (U16*)workSpace;
106346 +    BYTE* spread = (BYTE*)(symbolNext + maxSymbolValue + 1);
106348 +    U32 const maxSV1 = maxSymbolValue + 1;
106349 +    U32 const tableSize = 1 << tableLog;
106350 +    U32 highThreshold = tableSize-1;
106352 +    /* Sanity Checks */
106353 +    if (FSE_BUILD_DTABLE_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(maxSymbolValue_tooLarge);
106354 +    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
106355 +    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
106357 +    /* Init, lay down lowprob symbols */
106358 +    {   FSE_DTableHeader DTableH;
106359 +        DTableH.tableLog = (U16)tableLog;
106360 +        DTableH.fastMode = 1;
106361 +        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
106362 +            U32 s;
106363 +            for (s=0; s<maxSV1; s++) {
106364 +                if (normalizedCounter[s]==-1) {
106365 +                    tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
106366 +                    symbolNext[s] = 1;
106367 +                } else {
106368 +                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
106369 +                    symbolNext[s] = normalizedCounter[s];
106370 +        }   }   }
106371 +        ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
106372 +    }
106374 +    /* Spread symbols */
106375 +    if (highThreshold == tableSize - 1) {
106376 +        size_t const tableMask = tableSize-1;
106377 +        size_t const step = FSE_TABLESTEP(tableSize);
106378 +        /* First lay down the symbols in order.
106379 +         * We use a uint64_t to lay down 8 bytes at a time. This reduces branch
106380 +         * misses since small blocks generally have small table logs, so nearly
106381 +         * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
106382 +         * our buffer to handle the over-write.
106383 +         */
106384 +        {
106385 +            U64 const add = 0x0101010101010101ull;
106386 +            size_t pos = 0;
106387 +            U64 sv = 0;
106388 +            U32 s;
106389 +            for (s=0; s<maxSV1; ++s, sv += add) {
106390 +                int i;
106391 +                int const n = normalizedCounter[s];
106392 +                MEM_write64(spread + pos, sv);
106393 +                for (i = 8; i < n; i += 8) {
106394 +                    MEM_write64(spread + pos + i, sv);
106395 +                }
106396 +                pos += n;
106397 +            }
106398 +        }
106399 +        /* Now we spread those positions across the table.
106400 +         * The benefit of doing it in two stages is that we avoid the the
106401 +         * variable size inner loop, which caused lots of branch misses.
106402 +         * Now we can run through all the positions without any branch misses.
106403 +         * We unroll the loop twice, since that is what emperically worked best.
106404 +         */
106405 +        {
106406 +            size_t position = 0;
106407 +            size_t s;
106408 +            size_t const unroll = 2;
106409 +            assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
106410 +            for (s = 0; s < (size_t)tableSize; s += unroll) {
106411 +                size_t u;
106412 +                for (u = 0; u < unroll; ++u) {
106413 +                    size_t const uPosition = (position + (u * step)) & tableMask;
106414 +                    tableDecode[uPosition].symbol = spread[s + u];
106415 +                }
106416 +                position = (position + (unroll * step)) & tableMask;
106417 +            }
106418 +            assert(position == 0);
106419 +        }
106420 +    } else {
106421 +        U32 const tableMask = tableSize-1;
106422 +        U32 const step = FSE_TABLESTEP(tableSize);
106423 +        U32 s, position = 0;
106424 +        for (s=0; s<maxSV1; s++) {
106425 +            int i;
106426 +            for (i=0; i<normalizedCounter[s]; i++) {
106427 +                tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
106428 +                position = (position + step) & tableMask;
106429 +                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
106430 +        }   }
106431 +        if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
106432 +    }
106434 +    /* Build Decoding table */
106435 +    {   U32 u;
106436 +        for (u=0; u<tableSize; u++) {
106437 +            FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
106438 +            U32 const nextState = symbolNext[symbol]++;
106439 +            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
106440 +            tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
106441 +    }   }
106443 +    return 0;
106446 +size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
106448 +    return FSE_buildDTable_internal(dt, normalizedCounter, maxSymbolValue, tableLog, workSpace, wkspSize);
106452 +#ifndef FSE_COMMONDEFS_ONLY
106454 +/*-*******************************************************
106455 +*  Decompression (Byte symbols)
106456 +*********************************************************/
106457 +size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
106459 +    void* ptr = dt;
106460 +    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
106461 +    void* dPtr = dt + 1;
106462 +    FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
106464 +    DTableH->tableLog = 0;
106465 +    DTableH->fastMode = 0;
106467 +    cell->newState = 0;
106468 +    cell->symbol = symbolValue;
106469 +    cell->nbBits = 0;
106471 +    return 0;
106475 +size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
106477 +    void* ptr = dt;
106478 +    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
106479 +    void* dPtr = dt + 1;
106480 +    FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
106481 +    const unsigned tableSize = 1 << nbBits;
106482 +    const unsigned tableMask = tableSize - 1;
106483 +    const unsigned maxSV1 = tableMask+1;
106484 +    unsigned s;
106486 +    /* Sanity checks */
106487 +    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
106489 +    /* Build Decoding Table */
106490 +    DTableH->tableLog = (U16)nbBits;
106491 +    DTableH->fastMode = 1;
106492 +    for (s=0; s<maxSV1; s++) {
106493 +        dinfo[s].newState = 0;
106494 +        dinfo[s].symbol = (BYTE)s;
106495 +        dinfo[s].nbBits = (BYTE)nbBits;
106496 +    }
106498 +    return 0;
106501 +FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
106502 +          void* dst, size_t maxDstSize,
106503 +    const void* cSrc, size_t cSrcSize,
106504 +    const FSE_DTable* dt, const unsigned fast)
106506 +    BYTE* const ostart = (BYTE*) dst;
106507 +    BYTE* op = ostart;
106508 +    BYTE* const omax = op + maxDstSize;
106509 +    BYTE* const olimit = omax-3;
106511 +    BIT_DStream_t bitD;
106512 +    FSE_DState_t state1;
106513 +    FSE_DState_t state2;
106515 +    /* Init */
106516 +    CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
106518 +    FSE_initDState(&state1, &bitD, dt);
106519 +    FSE_initDState(&state2, &bitD, dt);
106521 +#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
106523 +    /* 4 symbols per loop */
106524 +    for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
106525 +        op[0] = FSE_GETSYMBOL(&state1);
106527 +        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
106528 +            BIT_reloadDStream(&bitD);
106530 +        op[1] = FSE_GETSYMBOL(&state2);
106532 +        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
106533 +            { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
106535 +        op[2] = FSE_GETSYMBOL(&state1);
106537 +        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
106538 +            BIT_reloadDStream(&bitD);
106540 +        op[3] = FSE_GETSYMBOL(&state2);
106541 +    }
106543 +    /* tail */
106544 +    /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
106545 +    while (1) {
106546 +        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
106547 +        *op++ = FSE_GETSYMBOL(&state1);
106548 +        if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
106549 +            *op++ = FSE_GETSYMBOL(&state2);
106550 +            break;
106551 +        }
106553 +        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
106554 +        *op++ = FSE_GETSYMBOL(&state2);
106555 +        if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
106556 +            *op++ = FSE_GETSYMBOL(&state1);
106557 +            break;
106558 +    }   }
106560 +    return op-ostart;
106564 +size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
106565 +                            const void* cSrc, size_t cSrcSize,
106566 +                            const FSE_DTable* dt)
106568 +    const void* ptr = dt;
106569 +    const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
106570 +    const U32 fastMode = DTableH->fastMode;
106572 +    /* select fast mode (static) */
106573 +    if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
106574 +    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
106578 +size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
106580 +    return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, /* bmi2 */ 0);
106583 +typedef struct {
106584 +    short ncount[FSE_MAX_SYMBOL_VALUE + 1];
106585 +    FSE_DTable dtable[1]; /* Dynamically sized */
106586 +} FSE_DecompressWksp;
106589 +FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
106590 +        void* dst, size_t dstCapacity,
106591 +        const void* cSrc, size_t cSrcSize,
106592 +        unsigned maxLog, void* workSpace, size_t wkspSize,
106593 +        int bmi2)
106595 +    const BYTE* const istart = (const BYTE*)cSrc;
106596 +    const BYTE* ip = istart;
106597 +    unsigned tableLog;
106598 +    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
106599 +    FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace;
106601 +    DEBUG_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
106602 +    if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC);
106604 +    /* normal FSE decoding mode */
106605 +    {
106606 +        size_t const NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
106607 +        if (FSE_isError(NCountLength)) return NCountLength;
106608 +        if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
106609 +        assert(NCountLength <= cSrcSize);
106610 +        ip += NCountLength;
106611 +        cSrcSize -= NCountLength;
106612 +    }
106614 +    if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge);
106615 +    workSpace = wksp->dtable + FSE_DTABLE_SIZE_U32(tableLog);
106616 +    wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
106618 +    CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
106620 +    {
106621 +        const void* ptr = wksp->dtable;
106622 +        const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
106623 +        const U32 fastMode = DTableH->fastMode;
106625 +        /* select fast mode (static) */
106626 +        if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 1);
106627 +        return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 0);
106628 +    }
106631 +/* Avoids the FORCE_INLINE of the _body() function. */
106632 +static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
106634 +    return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0);
106637 +#if DYNAMIC_BMI2
106638 +TARGET_ATTRIBUTE("bmi2") static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
106640 +    return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1);
106642 +#endif
106644 +size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2)
106646 +#if DYNAMIC_BMI2
106647 +    if (bmi2) {
106648 +        return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
106649 +    }
106650 +#endif
106651 +    (void)bmi2;
106652 +    return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
106656 +typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
106660 +#endif   /* FSE_COMMONDEFS_ONLY */
106661 diff --git a/lib/zstd/common/huf.h b/lib/zstd/common/huf.h
106662 new file mode 100644
106663 index 000000000000..b5dbd386c5e6
106664 --- /dev/null
106665 +++ b/lib/zstd/common/huf.h
106666 @@ -0,0 +1,355 @@
106667 +/* ******************************************************************
106668 + * huff0 huffman codec,
106669 + * part of Finite State Entropy library
106670 + * Copyright (c) Yann Collet, Facebook, Inc.
106672 + * You can contact the author at :
106673 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
106675 + * This source code is licensed under both the BSD-style license (found in the
106676 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
106677 + * in the COPYING file in the root directory of this source tree).
106678 + * You may select, at your option, one of the above-listed licenses.
106679 +****************************************************************** */
106682 +#ifndef HUF_H_298734234
106683 +#define HUF_H_298734234
106685 +/* *** Dependencies *** */
106686 +#include "zstd_deps.h"    /* size_t */
106689 +/* *** library symbols visibility *** */
106690 +/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
106691 + *        HUF symbols remain "private" (internal symbols for library only).
106692 + *        Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
106693 +#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
106694 +#  define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
106695 +#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */
106696 +#  define HUF_PUBLIC_API __declspec(dllexport)
106697 +#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
106698 +#  define HUF_PUBLIC_API __declspec(dllimport)  /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
106699 +#else
106700 +#  define HUF_PUBLIC_API
106701 +#endif
106704 +/* ========================== */
106705 +/* ***  simple functions  *** */
106706 +/* ========================== */
106708 +/** HUF_compress() :
106709 + *  Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
106710 + * 'dst' buffer must be already allocated.
106711 + *  Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
106712 + * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
106713 + * @return : size of compressed data (<= `dstCapacity`).
106714 + *  Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
106715 + *                   if HUF_isError(return), compression failed (more details using HUF_getErrorName())
106716 + */
106717 +HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
106718 +                             const void* src, size_t srcSize);
106720 +/** HUF_decompress() :
106721 + *  Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
106722 + *  into already allocated buffer 'dst', of minimum size 'dstSize'.
106723 + * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
106724 + *  Note : in contrast with FSE, HUF_decompress can regenerate
106725 + *         RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
106726 + *         because it knows size to regenerate (originalSize).
106727 + * @return : size of regenerated data (== originalSize),
106728 + *           or an error code, which can be tested using HUF_isError()
106729 + */
106730 +HUF_PUBLIC_API size_t HUF_decompress(void* dst,  size_t originalSize,
106731 +                               const void* cSrc, size_t cSrcSize);
106734 +/* ***   Tool functions *** */
106735 +#define HUF_BLOCKSIZE_MAX (128 * 1024)                  /**< maximum input size for a single block compressed with HUF_compress */
106736 +HUF_PUBLIC_API size_t HUF_compressBound(size_t size);   /**< maximum compressed size (worst case) */
106738 +/* Error Management */
106739 +HUF_PUBLIC_API unsigned    HUF_isError(size_t code);       /**< tells if a return value is an error code */
106740 +HUF_PUBLIC_API const char* HUF_getErrorName(size_t code);  /**< provides error code string (useful for debugging) */
106743 +/* ***   Advanced function   *** */
106745 +/** HUF_compress2() :
106746 + *  Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
106747 + * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
106748 + * `tableLog` must be `<= HUF_TABLELOG_MAX` . */
106749 +HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
106750 +                               const void* src, size_t srcSize,
106751 +                               unsigned maxSymbolValue, unsigned tableLog);
106753 +/** HUF_compress4X_wksp() :
106754 + *  Same as HUF_compress2(), but uses externally allocated `workSpace`.
106755 + * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */
106756 +#define HUF_WORKSPACE_SIZE ((6 << 10) + 256)
106757 +#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
106758 +HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
106759 +                                     const void* src, size_t srcSize,
106760 +                                     unsigned maxSymbolValue, unsigned tableLog,
106761 +                                     void* workSpace, size_t wkspSize);
106763 +#endif   /* HUF_H_298734234 */
106765 +/* ******************************************************************
106766 + *  WARNING !!
106767 + *  The following section contains advanced and experimental definitions
106768 + *  which shall never be used in the context of a dynamic library,
106769 + *  because they are not guaranteed to remain stable in the future.
106770 + *  Only consider them in association with static linking.
106771 + * *****************************************************************/
106772 +#if !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
106773 +#define HUF_H_HUF_STATIC_LINKING_ONLY
106775 +/* *** Dependencies *** */
106776 +#include "mem.h"   /* U32 */
106777 +#define FSE_STATIC_LINKING_ONLY
106778 +#include "fse.h"
106781 +/* *** Constants *** */
106782 +#define HUF_TABLELOG_MAX      12      /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
106783 +#define HUF_TABLELOG_DEFAULT  11      /* default tableLog value when none specified */
106784 +#define HUF_SYMBOLVALUE_MAX  255
106786 +#define HUF_TABLELOG_ABSOLUTEMAX  15  /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
106787 +#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
106788 +#  error "HUF_TABLELOG_MAX is too large !"
106789 +#endif
106792 +/* ****************************************
106793 +*  Static allocation
106794 +******************************************/
106795 +/* HUF buffer bounds */
106796 +#define HUF_CTABLEBOUND 129
106797 +#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true when incompressible is pre-filtered with fast heuristic */
106798 +#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
106800 +/* static allocation of HUF's Compression Table */
106801 +/* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */
106802 +struct HUF_CElt_s {
106803 +  U16  val;
106804 +  BYTE nbBits;
106805 +};   /* typedef'd to HUF_CElt */
106806 +typedef struct HUF_CElt_s HUF_CElt;   /* consider it an incomplete type */
106807 +#define HUF_CTABLE_SIZE_U32(maxSymbolValue)   ((maxSymbolValue)+1)   /* Use tables of U32, for proper alignment */
106808 +#define HUF_CTABLE_SIZE(maxSymbolValue)       (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32))
106809 +#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
106810 +    HUF_CElt name[HUF_CTABLE_SIZE_U32(maxSymbolValue)] /* no final ; */
106812 +/* static allocation of HUF's DTable */
106813 +typedef U32 HUF_DTable;
106814 +#define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<(maxTableLog)))
106815 +#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \
106816 +        HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
106817 +#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
106818 +        HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
106821 +/* ****************************************
106822 +*  Advanced decompression functions
106823 +******************************************/
106824 +size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
106825 +#ifndef HUF_FORCE_DECOMPRESS_X1
106826 +size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
106827 +#endif
106829 +size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< decodes RLE and uncompressed */
106830 +size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
106831 +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */
106832 +size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
106833 +size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
106834 +#ifndef HUF_FORCE_DECOMPRESS_X1
106835 +size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
106836 +size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
106837 +#endif
106840 +/* ****************************************
106841 + *  HUF detailed API
106842 + * ****************************************/
106844 +/*! HUF_compress() does the following:
106845 + *  1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
106846 + *  2. (optional) refine tableLog using HUF_optimalTableLog()
106847 + *  3. build Huffman table from count using HUF_buildCTable()
106848 + *  4. save Huffman table to memory buffer using HUF_writeCTable()
106849 + *  5. encode the data stream using HUF_compress4X_usingCTable()
106851 + *  The following API allows targeting specific sub-functions for advanced tasks.
106852 + *  For example, it's possible to compress several blocks using the same 'CTable',
106853 + *  or to save and regenerate 'CTable' using external methods.
106854 + */
106855 +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
106856 +size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits);   /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
106857 +size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
106858 +size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
106859 +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
106860 +size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
106861 +int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
106863 +typedef enum {
106864 +   HUF_repeat_none,  /**< Cannot use the previous table */
106865 +   HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
106866 +   HUF_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
106867 + } HUF_repeat;
106868 +/** HUF_compress4X_repeat() :
106869 + *  Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
106870 + *  If it uses hufTable it does not modify hufTable or repeat.
106871 + *  If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
106872 + *  If preferRepeat then the old table will always be used if valid. */
106873 +size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
106874 +                       const void* src, size_t srcSize,
106875 +                       unsigned maxSymbolValue, unsigned tableLog,
106876 +                       void* workSpace, size_t wkspSize,    /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
106877 +                       HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
106879 +/** HUF_buildCTable_wksp() :
106880 + *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
106881 + * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
106882 + */
106883 +#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
106884 +#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
106885 +size_t HUF_buildCTable_wksp (HUF_CElt* tree,
106886 +                       const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
106887 +                             void* workSpace, size_t wkspSize);
106889 +/*! HUF_readStats() :
106890 + *  Read compact Huffman tree, saved by HUF_writeCTable().
106891 + * `huffWeight` is destination buffer.
106892 + * @return : size read from `src` , or an error Code .
106893 + *  Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
106894 +size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
106895 +                     U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
106896 +                     const void* src, size_t srcSize);
106898 +/*! HUF_readStats_wksp() :
106899 + * Same as HUF_readStats() but takes an external workspace which must be
106900 + * 4-byte aligned and its size must be >= HUF_READ_STATS_WORKSPACE_SIZE.
106901 + * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
106902 + */
106903 +#define HUF_READ_STATS_WORKSPACE_SIZE_U32 FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1)
106904 +#define HUF_READ_STATS_WORKSPACE_SIZE (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned))
106905 +size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize,
106906 +                          U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
106907 +                          const void* src, size_t srcSize,
106908 +                          void* workspace, size_t wkspSize,
106909 +                          int bmi2);
106911 +/** HUF_readCTable() :
106912 + *  Loading a CTable saved with HUF_writeCTable() */
106913 +size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
106915 +/** HUF_getNbBits() :
106916 + *  Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
106917 + *  Note 1 : is not inlined, as HUF_CElt definition is private
106918 + *  Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */
106919 +U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue);
106922 + * HUF_decompress() does the following:
106923 + * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
106924 + * 2. build Huffman table from save, using HUF_readDTableX?()
106925 + * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
106926 + */
106928 +/** HUF_selectDecoder() :
106929 + *  Tells which decoder is likely to decode faster,
106930 + *  based on a set of pre-computed metrics.
106931 + * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
106932 + *  Assumption : 0 < dstSize <= 128 KB */
106933 +U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
106936 + *  The minimum workspace size for the `workSpace` used in
106937 + *  HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().
106939 + *  The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
106940 + *  HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
106941 + *  Buffer overflow errors may potentially occur if code modifications result in
106942 + *  a required workspace size greater than that specified in the following
106943 + *  macro.
106944 + */
106945 +#define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9))
106946 +#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
106948 +#ifndef HUF_FORCE_DECOMPRESS_X2
106949 +size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
106950 +size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
106951 +#endif
106952 +#ifndef HUF_FORCE_DECOMPRESS_X1
106953 +size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
106954 +size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
106955 +#endif
106957 +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
106958 +#ifndef HUF_FORCE_DECOMPRESS_X2
106959 +size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
106960 +#endif
106961 +#ifndef HUF_FORCE_DECOMPRESS_X1
106962 +size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
106963 +#endif
106966 +/* ====================== */
106967 +/* single stream variants */
106968 +/* ====================== */
106970 +size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
106971 +size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);  /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
106972 +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
106973 +/** HUF_compress1X_repeat() :
106974 + *  Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
106975 + *  If it uses hufTable it does not modify hufTable or repeat.
106976 + *  If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
106977 + *  If preferRepeat then the old table will always be used if valid. */
106978 +size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
106979 +                       const void* src, size_t srcSize,
106980 +                       unsigned maxSymbolValue, unsigned tableLog,
106981 +                       void* workSpace, size_t wkspSize,   /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
106982 +                       HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
106984 +size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
106985 +#ifndef HUF_FORCE_DECOMPRESS_X1
106986 +size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */
106987 +#endif
106989 +size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
106990 +size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
106991 +#ifndef HUF_FORCE_DECOMPRESS_X2
106992 +size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
106993 +size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
106994 +#endif
106995 +#ifndef HUF_FORCE_DECOMPRESS_X1
106996 +size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
106997 +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
106998 +#endif
107000 +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);   /**< automatic selection of sing or double symbol decoder, based on DTable */
107001 +#ifndef HUF_FORCE_DECOMPRESS_X2
107002 +size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
107003 +#endif
107004 +#ifndef HUF_FORCE_DECOMPRESS_X1
107005 +size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
107006 +#endif
107008 +/* BMI2 variants.
107009 + * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
107010 + */
107011 +size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
107012 +#ifndef HUF_FORCE_DECOMPRESS_X2
107013 +size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
107014 +#endif
107015 +size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
107016 +size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
107017 +#ifndef HUF_FORCE_DECOMPRESS_X2
107018 +size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
107019 +#endif
107021 +#endif /* HUF_STATIC_LINKING_ONLY */
107022 diff --git a/lib/zstd/common/mem.h b/lib/zstd/common/mem.h
107023 new file mode 100644
107024 index 000000000000..4b5db5756a6f
107025 --- /dev/null
107026 +++ b/lib/zstd/common/mem.h
107027 @@ -0,0 +1,259 @@
107028 +/* SPDX-License-Identifier: GPL-2.0-only */
107030 + * Copyright (c) Yann Collet, Facebook, Inc.
107031 + * All rights reserved.
107033 + * This source code is licensed under both the BSD-style license (found in the
107034 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
107035 + * in the COPYING file in the root directory of this source tree).
107036 + * You may select, at your option, one of the above-listed licenses.
107037 + */
107039 +#ifndef MEM_H_MODULE
107040 +#define MEM_H_MODULE
107042 +/*-****************************************
107043 +*  Dependencies
107044 +******************************************/
107045 +#include <asm/unaligned.h>  /* get_unaligned, put_unaligned* */
107046 +#include <linux/compiler.h>  /* inline */
107047 +#include <linux/swab.h>  /* swab32, swab64 */
107048 +#include <linux/types.h>  /* size_t, ptrdiff_t */
107049 +#include "debug.h"  /* DEBUG_STATIC_ASSERT */
107051 +/*-****************************************
107052 +*  Compiler specifics
107053 +******************************************/
107054 +#define MEM_STATIC static inline
107056 +/*-**************************************************************
107057 +*  Basic Types
107058 +*****************************************************************/
107059 +typedef uint8_t  BYTE;
107060 +typedef uint16_t U16;
107061 +typedef int16_t  S16;
107062 +typedef uint32_t U32;
107063 +typedef int32_t  S32;
107064 +typedef uint64_t U64;
107065 +typedef int64_t  S64;
107067 +/*-**************************************************************
107068 +*  Memory I/O API
107069 +*****************************************************************/
107070 +/*=== Static platform detection ===*/
107071 +MEM_STATIC unsigned MEM_32bits(void);
107072 +MEM_STATIC unsigned MEM_64bits(void);
107073 +MEM_STATIC unsigned MEM_isLittleEndian(void);
107075 +/*=== Native unaligned read/write ===*/
107076 +MEM_STATIC U16 MEM_read16(const void* memPtr);
107077 +MEM_STATIC U32 MEM_read32(const void* memPtr);
107078 +MEM_STATIC U64 MEM_read64(const void* memPtr);
107079 +MEM_STATIC size_t MEM_readST(const void* memPtr);
107081 +MEM_STATIC void MEM_write16(void* memPtr, U16 value);
107082 +MEM_STATIC void MEM_write32(void* memPtr, U32 value);
107083 +MEM_STATIC void MEM_write64(void* memPtr, U64 value);
107085 +/*=== Little endian unaligned read/write ===*/
107086 +MEM_STATIC U16 MEM_readLE16(const void* memPtr);
107087 +MEM_STATIC U32 MEM_readLE24(const void* memPtr);
107088 +MEM_STATIC U32 MEM_readLE32(const void* memPtr);
107089 +MEM_STATIC U64 MEM_readLE64(const void* memPtr);
107090 +MEM_STATIC size_t MEM_readLEST(const void* memPtr);
107092 +MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val);
107093 +MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val);
107094 +MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32);
107095 +MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64);
107096 +MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val);
107098 +/*=== Big endian unaligned read/write ===*/
107099 +MEM_STATIC U32 MEM_readBE32(const void* memPtr);
107100 +MEM_STATIC U64 MEM_readBE64(const void* memPtr);
107101 +MEM_STATIC size_t MEM_readBEST(const void* memPtr);
107103 +MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32);
107104 +MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64);
107105 +MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val);
107107 +/*=== Byteswap ===*/
107108 +MEM_STATIC U32 MEM_swap32(U32 in);
107109 +MEM_STATIC U64 MEM_swap64(U64 in);
107110 +MEM_STATIC size_t MEM_swapST(size_t in);
107112 +/*-**************************************************************
107113 +*  Memory I/O Implementation
107114 +*****************************************************************/
107115 +MEM_STATIC unsigned MEM_32bits(void)
107117 +    return sizeof(size_t) == 4;
107120 +MEM_STATIC unsigned MEM_64bits(void)
107122 +    return sizeof(size_t) == 8;
107125 +#if defined(__LITTLE_ENDIAN)
107126 +#define MEM_LITTLE_ENDIAN 1
107127 +#else
107128 +#define MEM_LITTLE_ENDIAN 0
107129 +#endif
107131 +MEM_STATIC unsigned MEM_isLittleEndian(void)
107133 +    return MEM_LITTLE_ENDIAN;
107136 +MEM_STATIC U16 MEM_read16(const void *memPtr)
107138 +    return get_unaligned((const U16 *)memPtr);
107141 +MEM_STATIC U32 MEM_read32(const void *memPtr)
107143 +    return get_unaligned((const U32 *)memPtr);
107146 +MEM_STATIC U64 MEM_read64(const void *memPtr)
107148 +    return get_unaligned((const U64 *)memPtr);
107151 +MEM_STATIC size_t MEM_readST(const void *memPtr)
107153 +    return get_unaligned((const size_t *)memPtr);
107156 +MEM_STATIC void MEM_write16(void *memPtr, U16 value)
107158 +    put_unaligned(value, (U16 *)memPtr);
107161 +MEM_STATIC void MEM_write32(void *memPtr, U32 value)
107163 +    put_unaligned(value, (U32 *)memPtr);
107166 +MEM_STATIC void MEM_write64(void *memPtr, U64 value)
107168 +    put_unaligned(value, (U64 *)memPtr);
107171 +/*=== Little endian r/w ===*/
107173 +MEM_STATIC U16 MEM_readLE16(const void *memPtr)
107175 +    return get_unaligned_le16(memPtr);
107178 +MEM_STATIC void MEM_writeLE16(void *memPtr, U16 val)
107180 +    put_unaligned_le16(val, memPtr);
107183 +MEM_STATIC U32 MEM_readLE24(const void *memPtr)
107185 +    return MEM_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16);
107188 +MEM_STATIC void MEM_writeLE24(void *memPtr, U32 val)
107190 +       MEM_writeLE16(memPtr, (U16)val);
107191 +       ((BYTE *)memPtr)[2] = (BYTE)(val >> 16);
107194 +MEM_STATIC U32 MEM_readLE32(const void *memPtr)
107196 +    return get_unaligned_le32(memPtr);
107199 +MEM_STATIC void MEM_writeLE32(void *memPtr, U32 val32)
107201 +    put_unaligned_le32(val32, memPtr);
107204 +MEM_STATIC U64 MEM_readLE64(const void *memPtr)
107206 +    return get_unaligned_le64(memPtr);
107209 +MEM_STATIC void MEM_writeLE64(void *memPtr, U64 val64)
107211 +    put_unaligned_le64(val64, memPtr);
107214 +MEM_STATIC size_t MEM_readLEST(const void *memPtr)
107216 +       if (MEM_32bits())
107217 +               return (size_t)MEM_readLE32(memPtr);
107218 +       else
107219 +               return (size_t)MEM_readLE64(memPtr);
107222 +MEM_STATIC void MEM_writeLEST(void *memPtr, size_t val)
107224 +       if (MEM_32bits())
107225 +               MEM_writeLE32(memPtr, (U32)val);
107226 +       else
107227 +               MEM_writeLE64(memPtr, (U64)val);
107230 +/*=== Big endian r/w ===*/
107232 +MEM_STATIC U32 MEM_readBE32(const void *memPtr)
107234 +    return get_unaligned_be32(memPtr);
107237 +MEM_STATIC void MEM_writeBE32(void *memPtr, U32 val32)
107239 +    put_unaligned_be32(val32, memPtr);
107242 +MEM_STATIC U64 MEM_readBE64(const void *memPtr)
107244 +    return get_unaligned_be64(memPtr);
107247 +MEM_STATIC void MEM_writeBE64(void *memPtr, U64 val64)
107249 +    put_unaligned_be64(val64, memPtr);
107252 +MEM_STATIC size_t MEM_readBEST(const void *memPtr)
107254 +       if (MEM_32bits())
107255 +               return (size_t)MEM_readBE32(memPtr);
107256 +       else
107257 +               return (size_t)MEM_readBE64(memPtr);
107260 +MEM_STATIC void MEM_writeBEST(void *memPtr, size_t val)
107262 +       if (MEM_32bits())
107263 +               MEM_writeBE32(memPtr, (U32)val);
107264 +       else
107265 +               MEM_writeBE64(memPtr, (U64)val);
107268 +MEM_STATIC U32 MEM_swap32(U32 in)
107270 +    return swab32(in);
107273 +MEM_STATIC U64 MEM_swap64(U64 in)
107275 +    return swab64(in);
107278 +MEM_STATIC size_t MEM_swapST(size_t in)
107280 +    if (MEM_32bits())
107281 +        return (size_t)MEM_swap32((U32)in);
107282 +    else
107283 +        return (size_t)MEM_swap64((U64)in);
107286 +#endif /* MEM_H_MODULE */
107287 diff --git a/lib/zstd/common/zstd_common.c b/lib/zstd/common/zstd_common.c
107288 new file mode 100644
107289 index 000000000000..3d7e35b309b5
107290 --- /dev/null
107291 +++ b/lib/zstd/common/zstd_common.c
107292 @@ -0,0 +1,83 @@
107294 + * Copyright (c) Yann Collet, Facebook, Inc.
107295 + * All rights reserved.
107297 + * This source code is licensed under both the BSD-style license (found in the
107298 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
107299 + * in the COPYING file in the root directory of this source tree).
107300 + * You may select, at your option, one of the above-listed licenses.
107301 + */
107305 +/*-*************************************
107306 +*  Dependencies
107307 +***************************************/
107308 +#define ZSTD_DEPS_NEED_MALLOC
107309 +#include "zstd_deps.h"   /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
107310 +#include "error_private.h"
107311 +#include "zstd_internal.h"
107314 +/*-****************************************
107315 +*  Version
107316 +******************************************/
107317 +unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
107319 +const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
107322 +/*-****************************************
107323 +*  ZSTD Error Management
107324 +******************************************/
107325 +#undef ZSTD_isError   /* defined within zstd_internal.h */
107326 +/*! ZSTD_isError() :
107327 + *  tells if a return value is an error code
107328 + *  symbol is required for external callers */
107329 +unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
107331 +/*! ZSTD_getErrorName() :
107332 + *  provides error code string from function result (useful for debugging) */
107333 +const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
107335 +/*! ZSTD_getError() :
107336 + *  convert a `size_t` function result into a proper ZSTD_errorCode enum */
107337 +ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
107339 +/*! ZSTD_getErrorString() :
107340 + *  provides error code string from enum */
107341 +const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
107345 +/*=**************************************************************
107346 +*  Custom allocator
107347 +****************************************************************/
107348 +void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
107350 +    if (customMem.customAlloc)
107351 +        return customMem.customAlloc(customMem.opaque, size);
107352 +    return ZSTD_malloc(size);
107355 +void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
107357 +    if (customMem.customAlloc) {
107358 +        /* calloc implemented as malloc+memset;
107359 +         * not as efficient as calloc, but next best guess for custom malloc */
107360 +        void* const ptr = customMem.customAlloc(customMem.opaque, size);
107361 +        ZSTD_memset(ptr, 0, size);
107362 +        return ptr;
107363 +    }
107364 +    return ZSTD_calloc(1, size);
107367 +void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
107369 +    if (ptr!=NULL) {
107370 +        if (customMem.customFree)
107371 +            customMem.customFree(customMem.opaque, ptr);
107372 +        else
107373 +            ZSTD_free(ptr);
107374 +    }
107376 diff --git a/lib/zstd/common/zstd_deps.h b/lib/zstd/common/zstd_deps.h
107377 new file mode 100644
107378 index 000000000000..853b72426215
107379 --- /dev/null
107380 +++ b/lib/zstd/common/zstd_deps.h
107381 @@ -0,0 +1,125 @@
107382 +/* SPDX-License-Identifier: GPL-2.0-only */
107384 + * Copyright (c) Facebook, Inc.
107385 + * All rights reserved.
107387 + * This source code is licensed under both the BSD-style license (found in the
107388 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
107389 + * in the COPYING file in the root directory of this source tree).
107390 + * You may select, at your option, one of the above-listed licenses.
107391 + */
107394 + * This file provides common libc dependencies that zstd requires.
107395 + * The purpose is to allow replacing this file with a custom implementation
107396 + * to compile zstd without libc support.
107397 + */
107399 +/* Need:
107400 + * NULL
107401 + * INT_MAX
107402 + * UINT_MAX
107403 + * ZSTD_memcpy()
107404 + * ZSTD_memset()
107405 + * ZSTD_memmove()
107406 + */
107407 +#ifndef ZSTD_DEPS_COMMON
107408 +#define ZSTD_DEPS_COMMON
107410 +#include <linux/limits.h>
107411 +#include <linux/stddef.h>
107413 +#define ZSTD_memcpy(d,s,n) __builtin_memcpy((d),(s),(n))
107414 +#define ZSTD_memmove(d,s,n) __builtin_memmove((d),(s),(n))
107415 +#define ZSTD_memset(d,s,n) __builtin_memset((d),(s),(n))
107417 +#endif /* ZSTD_DEPS_COMMON */
107420 + * Define malloc as always failing. That means the user must
107421 + * either use ZSTD_customMem or statically allocate memory.
107422 + * Need:
107423 + * ZSTD_malloc()
107424 + * ZSTD_free()
107425 + * ZSTD_calloc()
107426 + */
107427 +#ifdef ZSTD_DEPS_NEED_MALLOC
107428 +#ifndef ZSTD_DEPS_MALLOC
107429 +#define ZSTD_DEPS_MALLOC
107431 +#define ZSTD_malloc(s) ({ (void)(s); NULL; })
107432 +#define ZSTD_free(p) ((void)(p))
107433 +#define ZSTD_calloc(n,s) ({ (void)(n); (void)(s); NULL; })
107435 +#endif /* ZSTD_DEPS_MALLOC */
107436 +#endif /* ZSTD_DEPS_NEED_MALLOC */
107439 + * Provides 64-bit math support.
107440 + * Need:
107441 + * U64 ZSTD_div64(U64 dividend, U32 divisor)
107442 + */
107443 +#ifdef ZSTD_DEPS_NEED_MATH64
107444 +#ifndef ZSTD_DEPS_MATH64
107445 +#define ZSTD_DEPS_MATH64
107447 +#include <linux/math64.h>
107449 +static uint64_t ZSTD_div64(uint64_t dividend, uint32_t divisor) {
107450 +  return div_u64(dividend, divisor);
107453 +#endif /* ZSTD_DEPS_MATH64 */
107454 +#endif /* ZSTD_DEPS_NEED_MATH64 */
107457 + * This is only requested when DEBUGLEVEL >= 1, meaning
107458 + * it is disabled in production.
107459 + * Need:
107460 + * assert()
107461 + */
107462 +#ifdef ZSTD_DEPS_NEED_ASSERT
107463 +#ifndef ZSTD_DEPS_ASSERT
107464 +#define ZSTD_DEPS_ASSERT
107466 +#include <linux/kernel.h>
107468 +#define assert(x) WARN_ON((x))
107470 +#endif /* ZSTD_DEPS_ASSERT */
107471 +#endif /* ZSTD_DEPS_NEED_ASSERT */
107474 + * This is only requested when DEBUGLEVEL >= 2, meaning
107475 + * it is disabled in production.
107476 + * Need:
107477 + * ZSTD_DEBUG_PRINT()
107478 + */
107479 +#ifdef ZSTD_DEPS_NEED_IO
107480 +#ifndef ZSTD_DEPS_IO
107481 +#define ZSTD_DEPS_IO
107483 +#include <linux/printk.h>
107485 +#define ZSTD_DEBUG_PRINT(...) pr_debug(__VA_ARGS__)
107487 +#endif /* ZSTD_DEPS_IO */
107488 +#endif /* ZSTD_DEPS_NEED_IO */
107491 + * Only requested when MSAN is enabled.
107492 + * Need:
107493 + * intptr_t
107494 + */
107495 +#ifdef ZSTD_DEPS_NEED_STDINT
107496 +#ifndef ZSTD_DEPS_STDINT
107497 +#define ZSTD_DEPS_STDINT
107500 + * The Linux Kernel doesn't provide intptr_t, only uintptr_t, which
107501 + * is an unsigned long.
107502 + */
107503 +typedef long intptr_t;
107505 +#endif /* ZSTD_DEPS_STDINT */
107506 +#endif /* ZSTD_DEPS_NEED_STDINT */
107507 diff --git a/lib/zstd/common/zstd_internal.h b/lib/zstd/common/zstd_internal.h
107508 new file mode 100644
107509 index 000000000000..1f939cbe05ed
107510 --- /dev/null
107511 +++ b/lib/zstd/common/zstd_internal.h
107512 @@ -0,0 +1,450 @@
107514 + * Copyright (c) Yann Collet, Facebook, Inc.
107515 + * All rights reserved.
107517 + * This source code is licensed under both the BSD-style license (found in the
107518 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
107519 + * in the COPYING file in the root directory of this source tree).
107520 + * You may select, at your option, one of the above-listed licenses.
107521 + */
107523 +#ifndef ZSTD_CCOMMON_H_MODULE
107524 +#define ZSTD_CCOMMON_H_MODULE
107526 +/* this module contains definitions which must be identical
107527 + * across compression, decompression and dictBuilder.
107528 + * It also contains a few functions useful to at least 2 of them
107529 + * and which benefit from being inlined */
107531 +/*-*************************************
107532 +*  Dependencies
107533 +***************************************/
107534 +#include "compiler.h"
107535 +#include "mem.h"
107536 +#include "debug.h"                 /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
107537 +#include "error_private.h"
107538 +#define ZSTD_STATIC_LINKING_ONLY
107539 +#include <linux/zstd.h>
107540 +#define FSE_STATIC_LINKING_ONLY
107541 +#include "fse.h"
107542 +#define HUF_STATIC_LINKING_ONLY
107543 +#include "huf.h"
107544 +#include <linux/xxhash.h>                /* XXH_reset, update, digest */
107545 +#define ZSTD_TRACE 0
107548 +/* ---- static assert (debug) --- */
107549 +#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
107550 +#define ZSTD_isError ERR_isError   /* for inlining */
107551 +#define FSE_isError  ERR_isError
107552 +#define HUF_isError  ERR_isError
107555 +/*-*************************************
107556 +*  shared macros
107557 +***************************************/
107558 +#undef MIN
107559 +#undef MAX
107560 +#define MIN(a,b) ((a)<(b) ? (a) : (b))
107561 +#define MAX(a,b) ((a)>(b) ? (a) : (b))
107564 + * Ignore: this is an internal helper.
107566 + * This is a helper function to help force C99-correctness during compilation.
107567 + * Under strict compilation modes, variadic macro arguments can't be empty.
107568 + * However, variadic function arguments can be. Using a function therefore lets
107569 + * us statically check that at least one (string) argument was passed,
107570 + * independent of the compilation flags.
107571 + */
107572 +static INLINE_KEYWORD UNUSED_ATTR
107573 +void _force_has_format_string(const char *format, ...) {
107574 +  (void)format;
107578 + * Ignore: this is an internal helper.
107580 + * We want to force this function invocation to be syntactically correct, but
107581 + * we don't want to force runtime evaluation of its arguments.
107582 + */
107583 +#define _FORCE_HAS_FORMAT_STRING(...) \
107584 +  if (0) { \
107585 +    _force_has_format_string(__VA_ARGS__); \
107586 +  }
107589 + * Return the specified error if the condition evaluates to true.
107591 + * In debug modes, prints additional information.
107592 + * In order to do that (particularly, printing the conditional that failed),
107593 + * this can't just wrap RETURN_ERROR().
107594 + */
107595 +#define RETURN_ERROR_IF(cond, err, ...) \
107596 +  if (cond) { \
107597 +    RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
107598 +           __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
107599 +    _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
107600 +    RAWLOG(3, ": " __VA_ARGS__); \
107601 +    RAWLOG(3, "\n"); \
107602 +    return ERROR(err); \
107603 +  }
107606 + * Unconditionally return the specified error.
107608 + * In debug modes, prints additional information.
107609 + */
107610 +#define RETURN_ERROR(err, ...) \
107611 +  do { \
107612 +    RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
107613 +           __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
107614 +    _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
107615 +    RAWLOG(3, ": " __VA_ARGS__); \
107616 +    RAWLOG(3, "\n"); \
107617 +    return ERROR(err); \
107618 +  } while(0);
107621 + * If the provided expression evaluates to an error code, returns that error code.
107623 + * In debug modes, prints additional information.
107624 + */
107625 +#define FORWARD_IF_ERROR(err, ...) \
107626 +  do { \
107627 +    size_t const err_code = (err); \
107628 +    if (ERR_isError(err_code)) { \
107629 +      RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
107630 +             __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
107631 +      _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
107632 +      RAWLOG(3, ": " __VA_ARGS__); \
107633 +      RAWLOG(3, "\n"); \
107634 +      return err_code; \
107635 +    } \
107636 +  } while(0);
107639 +/*-*************************************
107640 +*  Common constants
107641 +***************************************/
107642 +#define ZSTD_OPT_NUM    (1<<12)
107644 +#define ZSTD_REP_NUM      3                 /* number of repcodes */
107645 +#define ZSTD_REP_MOVE     (ZSTD_REP_NUM-1)
107646 +static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
107648 +#define KB *(1 <<10)
107649 +#define MB *(1 <<20)
107650 +#define GB *(1U<<30)
107652 +#define BIT7 128
107653 +#define BIT6  64
107654 +#define BIT5  32
107655 +#define BIT4  16
107656 +#define BIT1   2
107657 +#define BIT0   1
107659 +#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
107660 +static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
107661 +static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
107663 +#define ZSTD_FRAMEIDSIZE 4   /* magic number size */
107665 +#define ZSTD_BLOCKHEADERSIZE 3   /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
107666 +static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
107667 +typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
107669 +#define ZSTD_FRAMECHECKSUMSIZE 4
107671 +#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
107672 +#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */)   /* for a non-null block */
107674 +#define HufLog 12
107675 +typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
107677 +#define LONGNBSEQ 0x7F00
107679 +#define MINMATCH 3
107681 +#define Litbits  8
107682 +#define MaxLit ((1<<Litbits) - 1)
107683 +#define MaxML   52
107684 +#define MaxLL   35
107685 +#define DefaultMaxOff 28
107686 +#define MaxOff  31
107687 +#define MaxSeq MAX(MaxLL, MaxML)   /* Assumption : MaxOff < MaxLL,MaxML */
107688 +#define MLFSELog    9
107689 +#define LLFSELog    9
107690 +#define OffFSELog   8
107691 +#define MaxFSELog  MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
107693 +#define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
107694 +/* Each table cannot take more than #symbols * FSELog bits */
107695 +#define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)
107697 +static UNUSED_ATTR const U32 LL_bits[MaxLL+1] = {
107698 +     0, 0, 0, 0, 0, 0, 0, 0,
107699 +     0, 0, 0, 0, 0, 0, 0, 0,
107700 +     1, 1, 1, 1, 2, 2, 3, 3,
107701 +     4, 6, 7, 8, 9,10,11,12,
107702 +    13,14,15,16
107704 +static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = {
107705 +     4, 3, 2, 2, 2, 2, 2, 2,
107706 +     2, 2, 2, 2, 2, 1, 1, 1,
107707 +     2, 2, 2, 2, 2, 2, 2, 2,
107708 +     2, 3, 2, 1, 1, 1, 1, 1,
107709 +    -1,-1,-1,-1
107711 +#define LL_DEFAULTNORMLOG 6  /* for static allocation */
107712 +static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
107714 +static UNUSED_ATTR const U32 ML_bits[MaxML+1] = {
107715 +     0, 0, 0, 0, 0, 0, 0, 0,
107716 +     0, 0, 0, 0, 0, 0, 0, 0,
107717 +     0, 0, 0, 0, 0, 0, 0, 0,
107718 +     0, 0, 0, 0, 0, 0, 0, 0,
107719 +     1, 1, 1, 1, 2, 2, 3, 3,
107720 +     4, 4, 5, 7, 8, 9,10,11,
107721 +    12,13,14,15,16
107723 +static UNUSED_ATTR const S16 ML_defaultNorm[MaxML+1] = {
107724 +     1, 4, 3, 2, 2, 2, 2, 2,
107725 +     2, 1, 1, 1, 1, 1, 1, 1,
107726 +     1, 1, 1, 1, 1, 1, 1, 1,
107727 +     1, 1, 1, 1, 1, 1, 1, 1,
107728 +     1, 1, 1, 1, 1, 1, 1, 1,
107729 +     1, 1, 1, 1, 1, 1,-1,-1,
107730 +    -1,-1,-1,-1,-1
107732 +#define ML_DEFAULTNORMLOG 6  /* for static allocation */
107733 +static UNUSED_ATTR const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
107735 +static UNUSED_ATTR const S16 OF_defaultNorm[DefaultMaxOff+1] = {
107736 +     1, 1, 1, 1, 1, 1, 2, 2,
107737 +     2, 1, 1, 1, 1, 1, 1, 1,
107738 +     1, 1, 1, 1, 1, 1, 1, 1,
107739 +    -1,-1,-1,-1,-1
107741 +#define OF_DEFAULTNORMLOG 5  /* for static allocation */
107742 +static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
107745 +/*-*******************************************
107746 +*  Shared functions to include for inlining
107747 +*********************************************/
107748 +static void ZSTD_copy8(void* dst, const void* src) {
107749 +    ZSTD_memcpy(dst, src, 8);
107752 +#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
107753 +static void ZSTD_copy16(void* dst, const void* src) {
107754 +    ZSTD_memcpy(dst, src, 16);
107756 +#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
107758 +#define WILDCOPY_OVERLENGTH 32
107759 +#define WILDCOPY_VECLEN 16
107761 +typedef enum {
107762 +    ZSTD_no_overlap,
107763 +    ZSTD_overlap_src_before_dst
107764 +    /*  ZSTD_overlap_dst_before_src, */
107765 +} ZSTD_overlap_e;
107767 +/*! ZSTD_wildcopy() :
107768 + *  Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
107769 + *  @param ovtype controls the overlap detection
107770 + *         - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
107771 + *         - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
107772 + *           The src buffer must be before the dst buffer.
107773 + */
107774 +MEM_STATIC FORCE_INLINE_ATTR
107775 +void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
107777 +    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
107778 +    const BYTE* ip = (const BYTE*)src;
107779 +    BYTE* op = (BYTE*)dst;
107780 +    BYTE* const oend = op + length;
107782 +    assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff <= -WILDCOPY_VECLEN));
107784 +    if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
107785 +        /* Handle short offset copies. */
107786 +        do {
107787 +            COPY8(op, ip)
107788 +        } while (op < oend);
107789 +    } else {
107790 +        assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
107791 +        /* Separate out the first COPY16() call because the copy length is
107792 +         * almost certain to be short, so the branches have different
107793 +         * probabilities. Since it is almost certain to be short, only do
107794 +         * one COPY16() in the first call. Then, do two calls per loop since
107795 +         * at that point it is more likely to have a high trip count.
107796 +         */
107797 +#ifdef __aarch64__
107798 +        do {
107799 +            COPY16(op, ip);
107800 +        }
107801 +        while (op < oend);
107802 +#else
107803 +        ZSTD_copy16(op, ip);
107804 +        if (16 >= length) return;
107805 +        op += 16;
107806 +        ip += 16;
107807 +        do {
107808 +            COPY16(op, ip);
107809 +            COPY16(op, ip);
107810 +        }
107811 +        while (op < oend);
107812 +#endif
107813 +    }
107816 +MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
107818 +    size_t const length = MIN(dstCapacity, srcSize);
107819 +    if (length > 0) {
107820 +        ZSTD_memcpy(dst, src, length);
107821 +    }
107822 +    return length;
107825 +/* define "workspace is too large" as this number of times larger than needed */
107826 +#define ZSTD_WORKSPACETOOLARGE_FACTOR 3
107828 +/* when workspace is continuously too large
107829 + * during at least this number of times,
107830 + * context's memory usage is considered wasteful,
107831 + * because it's sized to handle a worst case scenario which rarely happens.
107832 + * In which case, resize it down to free some memory */
107833 +#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
107835 +/* Controls whether the input/output buffer is buffered or stable. */
107836 +typedef enum {
107837 +    ZSTD_bm_buffered = 0,  /* Buffer the input/output */
107838 +    ZSTD_bm_stable = 1     /* ZSTD_inBuffer/ZSTD_outBuffer is stable */
107839 +} ZSTD_bufferMode_e;
107842 +/*-*******************************************
107843 +*  Private declarations
107844 +*********************************************/
107845 +typedef struct seqDef_s {
107846 +    U32 offset;         /* Offset code of the sequence */
107847 +    U16 litLength;
107848 +    U16 matchLength;
107849 +} seqDef;
107851 +typedef struct {
107852 +    seqDef* sequencesStart;
107853 +    seqDef* sequences;      /* ptr to end of sequences */
107854 +    BYTE* litStart;
107855 +    BYTE* lit;              /* ptr to end of literals */
107856 +    BYTE* llCode;
107857 +    BYTE* mlCode;
107858 +    BYTE* ofCode;
107859 +    size_t maxNbSeq;
107860 +    size_t maxNbLit;
107862 +    /* longLengthPos and longLengthID to allow us to represent either a single litLength or matchLength
107863 +     * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
107864 +     * the existing value of the litLength or matchLength by 0x10000.
107865 +     */
107866 +    U32   longLengthID;   /* 0 == no longLength; 1 == Represent the long literal; 2 == Represent the long match; */
107867 +    U32   longLengthPos;  /* Index of the sequence to apply long length modification to */
107868 +} seqStore_t;
107870 +typedef struct {
107871 +    U32 litLength;
107872 +    U32 matchLength;
107873 +} ZSTD_sequenceLength;
107876 + * Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences
107877 + * indicated by longLengthPos and longLengthID, and adds MINMATCH back to matchLength.
107878 + */
107879 +MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq)
107881 +    ZSTD_sequenceLength seqLen;
107882 +    seqLen.litLength = seq->litLength;
107883 +    seqLen.matchLength = seq->matchLength + MINMATCH;
107884 +    if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
107885 +        if (seqStore->longLengthID == 1) {
107886 +            seqLen.litLength += 0xFFFF;
107887 +        }
107888 +        if (seqStore->longLengthID == 2) {
107889 +            seqLen.matchLength += 0xFFFF;
107890 +        }
107891 +    }
107892 +    return seqLen;
107896 + * Contains the compressed frame size and an upper-bound for the decompressed frame size.
107897 + * Note: before using `compressedSize`, check for errors using ZSTD_isError().
107898 + *       similarly, before using `decompressedBound`, check for errors using:
107899 + *          `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
107900 + */
107901 +typedef struct {
107902 +    size_t compressedSize;
107903 +    unsigned long long decompressedBound;
107904 +} ZSTD_frameSizeInfo;   /* decompress & legacy */
107906 +const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx);   /* compress & dictBuilder */
107907 +void ZSTD_seqToCodes(const seqStore_t* seqStorePtr);   /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
107909 +/* custom memory allocation functions */
107910 +void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem);
107911 +void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem);
107912 +void ZSTD_customFree(void* ptr, ZSTD_customMem customMem);
107915 +MEM_STATIC U32 ZSTD_highbit32(U32 val)   /* compress, dictBuilder, decodeCorpus */
107917 +    assert(val != 0);
107918 +    {
107919 +#   if (__GNUC__ >= 3)   /* GCC Intrinsic */
107920 +        return __builtin_clz (val) ^ 31;
107921 +#   else   /* Software version */
107922 +        static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
107923 +        U32 v = val;
107924 +        v |= v >> 1;
107925 +        v |= v >> 2;
107926 +        v |= v >> 4;
107927 +        v |= v >> 8;
107928 +        v |= v >> 16;
107929 +        return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
107930 +#   endif
107931 +    }
107935 +/* ZSTD_invalidateRepCodes() :
107936 + * ensures next compression will not use repcodes from previous block.
107937 + * Note : only works with regular variant;
107938 + *        do not use with extDict variant ! */
107939 +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx);   /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
107942 +typedef struct {
107943 +    blockType_e blockType;
107944 +    U32 lastBlock;
107945 +    U32 origSize;
107946 +} blockProperties_t;   /* declared here for decompress and fullbench */
107948 +/*! ZSTD_getcBlockSize() :
107949 + *  Provides the size of compressed block from block header `src` */
107950 +/* Used by: decompress, fullbench (does not get its definition from here) */
107951 +size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
107952 +                          blockProperties_t* bpPtr);
107954 +/*! ZSTD_decodeSeqHeaders() :
107955 + *  decode sequence header from src */
107956 +/* Used by: decompress, fullbench (does not get its definition from here) */
107957 +size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
107958 +                       const void* src, size_t srcSize);
107962 +#endif   /* ZSTD_CCOMMON_H_MODULE */
107963 diff --git a/lib/zstd/compress.c b/lib/zstd/compress.c
107964 deleted file mode 100644
107965 index b080264ed3ad..000000000000
107966 --- a/lib/zstd/compress.c
107967 +++ /dev/null
107968 @@ -1,3485 +0,0 @@
107970 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
107971 - * All rights reserved.
107973 - * This source code is licensed under the BSD-style license found in the
107974 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
107975 - * An additional grant of patent rights can be found in the PATENTS file in the
107976 - * same directory.
107978 - * This program is free software; you can redistribute it and/or modify it under
107979 - * the terms of the GNU General Public License version 2 as published by the
107980 - * Free Software Foundation. This program is dual-licensed; you may select
107981 - * either version 2 of the GNU General Public License ("GPL") or BSD license
107982 - * ("BSD").
107983 - */
107985 -/*-*************************************
107986 -*  Dependencies
107987 -***************************************/
107988 -#include "fse.h"
107989 -#include "huf.h"
107990 -#include "mem.h"
107991 -#include "zstd_internal.h" /* includes zstd.h */
107992 -#include <linux/kernel.h>
107993 -#include <linux/module.h>
107994 -#include <linux/string.h> /* memset */
107996 -/*-*************************************
107997 -*  Constants
107998 -***************************************/
107999 -static const U32 g_searchStrength = 8; /* control skip over incompressible data */
108000 -#define HASH_READ_SIZE 8
108001 -typedef enum { ZSTDcs_created = 0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
108003 -/*-*************************************
108004 -*  Helper functions
108005 -***************************************/
108006 -size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; }
108008 -/*-*************************************
108009 -*  Sequence storage
108010 -***************************************/
108011 -static void ZSTD_resetSeqStore(seqStore_t *ssPtr)
108013 -       ssPtr->lit = ssPtr->litStart;
108014 -       ssPtr->sequences = ssPtr->sequencesStart;
108015 -       ssPtr->longLengthID = 0;
108018 -/*-*************************************
108019 -*  Context memory management
108020 -***************************************/
108021 -struct ZSTD_CCtx_s {
108022 -       const BYTE *nextSrc;  /* next block here to continue on curr prefix */
108023 -       const BYTE *base;     /* All regular indexes relative to this position */
108024 -       const BYTE *dictBase; /* extDict indexes relative to this position */
108025 -       U32 dictLimit;  /* below that point, need extDict */
108026 -       U32 lowLimit;    /* below that point, no more data */
108027 -       U32 nextToUpdate;     /* index from which to continue dictionary update */
108028 -       U32 nextToUpdate3;    /* index from which to continue dictionary update */
108029 -       U32 hashLog3;    /* dispatch table : larger == faster, more memory */
108030 -       U32 loadedDictEnd;    /* index of end of dictionary */
108031 -       U32 forceWindow;      /* force back-references to respect limit of 1<<wLog, even for dictionary */
108032 -       U32 forceRawDict;     /* Force loading dictionary in "content-only" mode (no header analysis) */
108033 -       ZSTD_compressionStage_e stage;
108034 -       U32 rep[ZSTD_REP_NUM];
108035 -       U32 repToConfirm[ZSTD_REP_NUM];
108036 -       U32 dictID;
108037 -       ZSTD_parameters params;
108038 -       void *workSpace;
108039 -       size_t workSpaceSize;
108040 -       size_t blockSize;
108041 -       U64 frameContentSize;
108042 -       struct xxh64_state xxhState;
108043 -       ZSTD_customMem customMem;
108045 -       seqStore_t seqStore; /* sequences storage ptrs */
108046 -       U32 *hashTable;
108047 -       U32 *hashTable3;
108048 -       U32 *chainTable;
108049 -       HUF_CElt *hufTable;
108050 -       U32 flagStaticTables;
108051 -       HUF_repeat flagStaticHufTable;
108052 -       FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
108053 -       FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
108054 -       FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
108055 -       unsigned tmpCounters[HUF_COMPRESS_WORKSPACE_SIZE_U32];
108058 -size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams)
108060 -       size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << cParams.windowLog);
108061 -       U32 const divider = (cParams.searchLength == 3) ? 3 : 4;
108062 -       size_t const maxNbSeq = blockSize / divider;
108063 -       size_t const tokenSpace = blockSize + 11 * maxNbSeq;
108064 -       size_t const chainSize = (cParams.strategy == ZSTD_fast) ? 0 : (1 << cParams.chainLog);
108065 -       size_t const hSize = ((size_t)1) << cParams.hashLog;
108066 -       U32 const hashLog3 = (cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
108067 -       size_t const h3Size = ((size_t)1) << hashLog3;
108068 -       size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
108069 -       size_t const optSpace =
108070 -           ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) + (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
108071 -       size_t const workspaceSize = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace +
108072 -                                    (((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
108074 -       return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_CCtx)) + ZSTD_ALIGN(workspaceSize);
108077 -static ZSTD_CCtx *ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
108079 -       ZSTD_CCtx *cctx;
108080 -       if (!customMem.customAlloc || !customMem.customFree)
108081 -               return NULL;
108082 -       cctx = (ZSTD_CCtx *)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
108083 -       if (!cctx)
108084 -               return NULL;
108085 -       memset(cctx, 0, sizeof(ZSTD_CCtx));
108086 -       cctx->customMem = customMem;
108087 -       return cctx;
108090 -ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize)
108092 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
108093 -       ZSTD_CCtx *cctx = ZSTD_createCCtx_advanced(stackMem);
108094 -       if (cctx) {
108095 -               cctx->workSpace = ZSTD_stackAllocAll(cctx->customMem.opaque, &cctx->workSpaceSize);
108096 -       }
108097 -       return cctx;
108100 -size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx)
108102 -       if (cctx == NULL)
108103 -               return 0; /* support free on NULL */
108104 -       ZSTD_free(cctx->workSpace, cctx->customMem);
108105 -       ZSTD_free(cctx, cctx->customMem);
108106 -       return 0; /* reserved as a potential error code in the future */
108109 -const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx) /* hidden interface */ { return &(ctx->seqStore); }
108111 -static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx *cctx) { return cctx->params; }
108113 -/** ZSTD_checkParams() :
108114 -       ensure param values remain within authorized range.
108115 -       @return : 0, or an error code if one value is beyond authorized range */
108116 -size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
108118 -#define CLAMPCHECK(val, min, max)                                       \
108119 -       {                                                               \
108120 -               if ((val < min) | (val > max))                          \
108121 -                       return ERROR(compressionParameter_unsupported); \
108122 -       }
108123 -       CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
108124 -       CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
108125 -       CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
108126 -       CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
108127 -       CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
108128 -       CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
108129 -       if ((U32)(cParams.strategy) > (U32)ZSTD_btopt2)
108130 -               return ERROR(compressionParameter_unsupported);
108131 -       return 0;
108134 -/** ZSTD_cycleLog() :
108135 - *  condition for correct operation : hashLog > 1 */
108136 -static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
108138 -       U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
108139 -       return hashLog - btScale;
108142 -/** ZSTD_adjustCParams() :
108143 -       optimize `cPar` for a given input (`srcSize` and `dictSize`).
108144 -       mostly downsizing to reduce memory consumption and initialization.
108145 -       Both `srcSize` and `dictSize` are optional (use 0 if unknown),
108146 -       but if both are 0, no optimization can be done.
108147 -       Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */
108148 -ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
108150 -       if (srcSize + dictSize == 0)
108151 -               return cPar; /* no size information available : no adjustment */
108153 -       /* resize params, to use less memory when necessary */
108154 -       {
108155 -               U32 const minSrcSize = (srcSize == 0) ? 500 : 0;
108156 -               U64 const rSize = srcSize + dictSize + minSrcSize;
108157 -               if (rSize < ((U64)1 << ZSTD_WINDOWLOG_MAX)) {
108158 -                       U32 const srcLog = MAX(ZSTD_HASHLOG_MIN, ZSTD_highbit32((U32)(rSize)-1) + 1);
108159 -                       if (cPar.windowLog > srcLog)
108160 -                               cPar.windowLog = srcLog;
108161 -               }
108162 -       }
108163 -       if (cPar.hashLog > cPar.windowLog)
108164 -               cPar.hashLog = cPar.windowLog;
108165 -       {
108166 -               U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
108167 -               if (cycleLog > cPar.windowLog)
108168 -                       cPar.chainLog -= (cycleLog - cPar.windowLog);
108169 -       }
108171 -       if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
108172 -               cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */
108174 -       return cPar;
108177 -static U32 ZSTD_equivalentParams(ZSTD_parameters param1, ZSTD_parameters param2)
108179 -       return (param1.cParams.hashLog == param2.cParams.hashLog) & (param1.cParams.chainLog == param2.cParams.chainLog) &
108180 -              (param1.cParams.strategy == param2.cParams.strategy) & ((param1.cParams.searchLength == 3) == (param2.cParams.searchLength == 3));
108183 -/*! ZSTD_continueCCtx() :
108184 -       reuse CCtx without reset (note : requires no dictionary) */
108185 -static size_t ZSTD_continueCCtx(ZSTD_CCtx *cctx, ZSTD_parameters params, U64 frameContentSize)
108187 -       U32 const end = (U32)(cctx->nextSrc - cctx->base);
108188 -       cctx->params = params;
108189 -       cctx->frameContentSize = frameContentSize;
108190 -       cctx->lowLimit = end;
108191 -       cctx->dictLimit = end;
108192 -       cctx->nextToUpdate = end + 1;
108193 -       cctx->stage = ZSTDcs_init;
108194 -       cctx->dictID = 0;
108195 -       cctx->loadedDictEnd = 0;
108196 -       {
108197 -               int i;
108198 -               for (i = 0; i < ZSTD_REP_NUM; i++)
108199 -                       cctx->rep[i] = repStartValue[i];
108200 -       }
108201 -       cctx->seqStore.litLengthSum = 0; /* force reset of btopt stats */
108202 -       xxh64_reset(&cctx->xxhState, 0);
108203 -       return 0;
108206 -typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e;
108208 -/*! ZSTD_resetCCtx_advanced() :
108209 -       note : `params` must be validated */
108210 -static size_t ZSTD_resetCCtx_advanced(ZSTD_CCtx *zc, ZSTD_parameters params, U64 frameContentSize, ZSTD_compResetPolicy_e const crp)
108212 -       if (crp == ZSTDcrp_continue)
108213 -               if (ZSTD_equivalentParams(params, zc->params)) {
108214 -                       zc->flagStaticTables = 0;
108215 -                       zc->flagStaticHufTable = HUF_repeat_none;
108216 -                       return ZSTD_continueCCtx(zc, params, frameContentSize);
108217 -               }
108219 -       {
108220 -               size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog);
108221 -               U32 const divider = (params.cParams.searchLength == 3) ? 3 : 4;
108222 -               size_t const maxNbSeq = blockSize / divider;
108223 -               size_t const tokenSpace = blockSize + 11 * maxNbSeq;
108224 -               size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? 0 : (1 << params.cParams.chainLog);
108225 -               size_t const hSize = ((size_t)1) << params.cParams.hashLog;
108226 -               U32 const hashLog3 = (params.cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog);
108227 -               size_t const h3Size = ((size_t)1) << hashLog3;
108228 -               size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
108229 -               void *ptr;
108231 -               /* Check if workSpace is large enough, alloc a new one if needed */
108232 -               {
108233 -                       size_t const optSpace = ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) +
108234 -                                               (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
108235 -                       size_t const neededSpace = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace +
108236 -                                                  (((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
108237 -                       if (zc->workSpaceSize < neededSpace) {
108238 -                               ZSTD_free(zc->workSpace, zc->customMem);
108239 -                               zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
108240 -                               if (zc->workSpace == NULL)
108241 -                                       return ERROR(memory_allocation);
108242 -                               zc->workSpaceSize = neededSpace;
108243 -                       }
108244 -               }
108246 -               if (crp != ZSTDcrp_noMemset)
108247 -                       memset(zc->workSpace, 0, tableSpace); /* reset tables only */
108248 -               xxh64_reset(&zc->xxhState, 0);
108249 -               zc->hashLog3 = hashLog3;
108250 -               zc->hashTable = (U32 *)(zc->workSpace);
108251 -               zc->chainTable = zc->hashTable + hSize;
108252 -               zc->hashTable3 = zc->chainTable + chainSize;
108253 -               ptr = zc->hashTable3 + h3Size;
108254 -               zc->hufTable = (HUF_CElt *)ptr;
108255 -               zc->flagStaticTables = 0;
108256 -               zc->flagStaticHufTable = HUF_repeat_none;
108257 -               ptr = ((U32 *)ptr) + 256; /* note : HUF_CElt* is incomplete type, size is simulated using U32 */
108259 -               zc->nextToUpdate = 1;
108260 -               zc->nextSrc = NULL;
108261 -               zc->base = NULL;
108262 -               zc->dictBase = NULL;
108263 -               zc->dictLimit = 0;
108264 -               zc->lowLimit = 0;
108265 -               zc->params = params;
108266 -               zc->blockSize = blockSize;
108267 -               zc->frameContentSize = frameContentSize;
108268 -               {
108269 -                       int i;
108270 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
108271 -                               zc->rep[i] = repStartValue[i];
108272 -               }
108274 -               if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) {
108275 -                       zc->seqStore.litFreq = (U32 *)ptr;
108276 -                       zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1 << Litbits);
108277 -                       zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL + 1);
108278 -                       zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML + 1);
108279 -                       ptr = zc->seqStore.offCodeFreq + (MaxOff + 1);
108280 -                       zc->seqStore.matchTable = (ZSTD_match_t *)ptr;
108281 -                       ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM + 1;
108282 -                       zc->seqStore.priceTable = (ZSTD_optimal_t *)ptr;
108283 -                       ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM + 1;
108284 -                       zc->seqStore.litLengthSum = 0;
108285 -               }
108286 -               zc->seqStore.sequencesStart = (seqDef *)ptr;
108287 -               ptr = zc->seqStore.sequencesStart + maxNbSeq;
108288 -               zc->seqStore.llCode = (BYTE *)ptr;
108289 -               zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
108290 -               zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
108291 -               zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
108293 -               zc->stage = ZSTDcs_init;
108294 -               zc->dictID = 0;
108295 -               zc->loadedDictEnd = 0;
108297 -               return 0;
108298 -       }
108301 -/* ZSTD_invalidateRepCodes() :
108302 - * ensures next compression will not use repcodes from previous block.
108303 - * Note : only works with regular variant;
108304 - *        do not use with extDict variant ! */
108305 -void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx)
108307 -       int i;
108308 -       for (i = 0; i < ZSTD_REP_NUM; i++)
108309 -               cctx->rep[i] = 0;
108312 -/*! ZSTD_copyCCtx() :
108313 -*   Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
108314 -*   Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
108315 -*   @return : 0, or an error code */
108316 -size_t ZSTD_copyCCtx(ZSTD_CCtx *dstCCtx, const ZSTD_CCtx *srcCCtx, unsigned long long pledgedSrcSize)
108318 -       if (srcCCtx->stage != ZSTDcs_init)
108319 -               return ERROR(stage_wrong);
108321 -       memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
108322 -       {
108323 -               ZSTD_parameters params = srcCCtx->params;
108324 -               params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
108325 -               ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset);
108326 -       }
108328 -       /* copy tables */
108329 -       {
108330 -               size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog);
108331 -               size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog;
108332 -               size_t const h3Size = (size_t)1 << srcCCtx->hashLog3;
108333 -               size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
108334 -               memcpy(dstCCtx->workSpace, srcCCtx->workSpace, tableSpace);
108335 -       }
108337 -       /* copy dictionary offsets */
108338 -       dstCCtx->nextToUpdate = srcCCtx->nextToUpdate;
108339 -       dstCCtx->nextToUpdate3 = srcCCtx->nextToUpdate3;
108340 -       dstCCtx->nextSrc = srcCCtx->nextSrc;
108341 -       dstCCtx->base = srcCCtx->base;
108342 -       dstCCtx->dictBase = srcCCtx->dictBase;
108343 -       dstCCtx->dictLimit = srcCCtx->dictLimit;
108344 -       dstCCtx->lowLimit = srcCCtx->lowLimit;
108345 -       dstCCtx->loadedDictEnd = srcCCtx->loadedDictEnd;
108346 -       dstCCtx->dictID = srcCCtx->dictID;
108348 -       /* copy entropy tables */
108349 -       dstCCtx->flagStaticTables = srcCCtx->flagStaticTables;
108350 -       dstCCtx->flagStaticHufTable = srcCCtx->flagStaticHufTable;
108351 -       if (srcCCtx->flagStaticTables) {
108352 -               memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable));
108353 -               memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable));
108354 -               memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable));
108355 -       }
108356 -       if (srcCCtx->flagStaticHufTable) {
108357 -               memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256 * 4);
108358 -       }
108360 -       return 0;
108363 -/*! ZSTD_reduceTable() :
108364 -*   reduce table indexes by `reducerValue` */
108365 -static void ZSTD_reduceTable(U32 *const table, U32 const size, U32 const reducerValue)
108367 -       U32 u;
108368 -       for (u = 0; u < size; u++) {
108369 -               if (table[u] < reducerValue)
108370 -                       table[u] = 0;
108371 -               else
108372 -                       table[u] -= reducerValue;
108373 -       }
108376 -/*! ZSTD_reduceIndex() :
108377 -*   rescale all indexes to avoid future overflow (indexes are U32) */
108378 -static void ZSTD_reduceIndex(ZSTD_CCtx *zc, const U32 reducerValue)
108380 -       {
108381 -               U32 const hSize = 1 << zc->params.cParams.hashLog;
108382 -               ZSTD_reduceTable(zc->hashTable, hSize, reducerValue);
108383 -       }
108385 -       {
108386 -               U32 const chainSize = (zc->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->params.cParams.chainLog);
108387 -               ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue);
108388 -       }
108390 -       {
108391 -               U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0;
108392 -               ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue);
108393 -       }
108396 -/*-*******************************************************
108397 -*  Block entropic compression
108398 -*********************************************************/
108400 -/* See doc/zstd_compression_format.md for detailed format description */
108402 -size_t ZSTD_noCompressBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
108404 -       if (srcSize + ZSTD_blockHeaderSize > dstCapacity)
108405 -               return ERROR(dstSize_tooSmall);
108406 -       memcpy((BYTE *)dst + ZSTD_blockHeaderSize, src, srcSize);
108407 -       ZSTD_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
108408 -       return ZSTD_blockHeaderSize + srcSize;
108411 -static size_t ZSTD_noCompressLiterals(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
108413 -       BYTE *const ostart = (BYTE * const)dst;
108414 -       U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095);
108416 -       if (srcSize + flSize > dstCapacity)
108417 -               return ERROR(dstSize_tooSmall);
108419 -       switch (flSize) {
108420 -       case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_basic + (srcSize << 3)); break;
108421 -       case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_basic + (1 << 2) + (srcSize << 4))); break;
108422 -       default: /*note : should not be necessary : flSize is within {1,2,3} */
108423 -       case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_basic + (3 << 2) + (srcSize << 4))); break;
108424 -       }
108426 -       memcpy(ostart + flSize, src, srcSize);
108427 -       return srcSize + flSize;
108430 -static size_t ZSTD_compressRleLiteralsBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
108432 -       BYTE *const ostart = (BYTE * const)dst;
108433 -       U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095);
108435 -       (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
108437 -       switch (flSize) {
108438 -       case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_rle + (srcSize << 3)); break;
108439 -       case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_rle + (1 << 2) + (srcSize << 4))); break;
108440 -       default: /*note : should not be necessary : flSize is necessarily within {1,2,3} */
108441 -       case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_rle + (3 << 2) + (srcSize << 4))); break;
108442 -       }
108444 -       ostart[flSize] = *(const BYTE *)src;
108445 -       return flSize + 1;
108448 -static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
108450 -static size_t ZSTD_compressLiterals(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
108452 -       size_t const minGain = ZSTD_minGain(srcSize);
108453 -       size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
108454 -       BYTE *const ostart = (BYTE *)dst;
108455 -       U32 singleStream = srcSize < 256;
108456 -       symbolEncodingType_e hType = set_compressed;
108457 -       size_t cLitSize;
108459 -/* small ? don't even attempt compression (speed opt) */
108460 -#define LITERAL_NOENTROPY 63
108461 -       {
108462 -               size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
108463 -               if (srcSize <= minLitSize)
108464 -                       return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
108465 -       }
108467 -       if (dstCapacity < lhSize + 1)
108468 -               return ERROR(dstSize_tooSmall); /* not enough space for compression */
108469 -       {
108470 -               HUF_repeat repeat = zc->flagStaticHufTable;
108471 -               int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
108472 -               if (repeat == HUF_repeat_valid && lhSize == 3)
108473 -                       singleStream = 1;
108474 -               cLitSize = singleStream ? HUF_compress1X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters,
108475 -                                                               sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat)
108476 -                                       : HUF_compress4X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters,
108477 -                                                               sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat);
108478 -               if (repeat != HUF_repeat_none) {
108479 -                       hType = set_repeat;
108480 -               } /* reused the existing table */
108481 -               else {
108482 -                       zc->flagStaticHufTable = HUF_repeat_check;
108483 -               } /* now have a table to reuse */
108484 -       }
108486 -       if ((cLitSize == 0) | (cLitSize >= srcSize - minGain)) {
108487 -               zc->flagStaticHufTable = HUF_repeat_none;
108488 -               return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
108489 -       }
108490 -       if (cLitSize == 1) {
108491 -               zc->flagStaticHufTable = HUF_repeat_none;
108492 -               return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
108493 -       }
108495 -       /* Build header */
108496 -       switch (lhSize) {
108497 -       case 3: /* 2 - 2 - 10 - 10 */
108498 -       {
108499 -               U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 14);
108500 -               ZSTD_writeLE24(ostart, lhc);
108501 -               break;
108502 -       }
108503 -       case 4: /* 2 - 2 - 14 - 14 */
108504 -       {
108505 -               U32 const lhc = hType + (2 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 18);
108506 -               ZSTD_writeLE32(ostart, lhc);
108507 -               break;
108508 -       }
108509 -       default: /* should not be necessary, lhSize is only {3,4,5} */
108510 -       case 5:  /* 2 - 2 - 18 - 18 */
108511 -       {
108512 -               U32 const lhc = hType + (3 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 22);
108513 -               ZSTD_writeLE32(ostart, lhc);
108514 -               ostart[4] = (BYTE)(cLitSize >> 10);
108515 -               break;
108516 -       }
108517 -       }
108518 -       return lhSize + cLitSize;
108521 -static const BYTE LL_Code[64] = {0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18,
108522 -                                19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
108523 -                                23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24};
108525 -static const BYTE ML_Code[128] = {0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
108526 -                                 26, 27, 28, 29, 30, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38,
108527 -                                 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
108528 -                                 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42,
108529 -                                 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42};
108531 -void ZSTD_seqToCodes(const seqStore_t *seqStorePtr)
108533 -       BYTE const LL_deltaCode = 19;
108534 -       BYTE const ML_deltaCode = 36;
108535 -       const seqDef *const sequences = seqStorePtr->sequencesStart;
108536 -       BYTE *const llCodeTable = seqStorePtr->llCode;
108537 -       BYTE *const ofCodeTable = seqStorePtr->ofCode;
108538 -       BYTE *const mlCodeTable = seqStorePtr->mlCode;
108539 -       U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
108540 -       U32 u;
108541 -       for (u = 0; u < nbSeq; u++) {
108542 -               U32 const llv = sequences[u].litLength;
108543 -               U32 const mlv = sequences[u].matchLength;
108544 -               llCodeTable[u] = (llv > 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv];
108545 -               ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
108546 -               mlCodeTable[u] = (mlv > 127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv];
108547 -       }
108548 -       if (seqStorePtr->longLengthID == 1)
108549 -               llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
108550 -       if (seqStorePtr->longLengthID == 2)
108551 -               mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
108554 -ZSTD_STATIC size_t ZSTD_compressSequences_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity)
108556 -       const int longOffsets = zc->params.cParams.windowLog > STREAM_ACCUMULATOR_MIN;
108557 -       const seqStore_t *seqStorePtr = &(zc->seqStore);
108558 -       FSE_CTable *CTable_LitLength = zc->litlengthCTable;
108559 -       FSE_CTable *CTable_OffsetBits = zc->offcodeCTable;
108560 -       FSE_CTable *CTable_MatchLength = zc->matchlengthCTable;
108561 -       U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
108562 -       const seqDef *const sequences = seqStorePtr->sequencesStart;
108563 -       const BYTE *const ofCodeTable = seqStorePtr->ofCode;
108564 -       const BYTE *const llCodeTable = seqStorePtr->llCode;
108565 -       const BYTE *const mlCodeTable = seqStorePtr->mlCode;
108566 -       BYTE *const ostart = (BYTE *)dst;
108567 -       BYTE *const oend = ostart + dstCapacity;
108568 -       BYTE *op = ostart;
108569 -       size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
108570 -       BYTE *seqHead;
108572 -       U32 *count;
108573 -       S16 *norm;
108574 -       U32 *workspace;
108575 -       size_t workspaceSize = sizeof(zc->tmpCounters);
108576 -       {
108577 -               size_t spaceUsed32 = 0;
108578 -               count = (U32 *)zc->tmpCounters + spaceUsed32;
108579 -               spaceUsed32 += MaxSeq + 1;
108580 -               norm = (S16 *)((U32 *)zc->tmpCounters + spaceUsed32);
108581 -               spaceUsed32 += ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2;
108583 -               workspace = (U32 *)zc->tmpCounters + spaceUsed32;
108584 -               workspaceSize -= (spaceUsed32 << 2);
108585 -       }
108587 -       /* Compress literals */
108588 -       {
108589 -               const BYTE *const literals = seqStorePtr->litStart;
108590 -               size_t const litSize = seqStorePtr->lit - literals;
108591 -               size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize);
108592 -               if (ZSTD_isError(cSize))
108593 -                       return cSize;
108594 -               op += cSize;
108595 -       }
108597 -       /* Sequences Header */
108598 -       if ((oend - op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */)
108599 -               return ERROR(dstSize_tooSmall);
108600 -       if (nbSeq < 0x7F)
108601 -               *op++ = (BYTE)nbSeq;
108602 -       else if (nbSeq < LONGNBSEQ)
108603 -               op[0] = (BYTE)((nbSeq >> 8) + 0x80), op[1] = (BYTE)nbSeq, op += 2;
108604 -       else
108605 -               op[0] = 0xFF, ZSTD_writeLE16(op + 1, (U16)(nbSeq - LONGNBSEQ)), op += 3;
108606 -       if (nbSeq == 0)
108607 -               return op - ostart;
108609 -       /* seqHead : flags for FSE encoding type */
108610 -       seqHead = op++;
108612 -#define MIN_SEQ_FOR_DYNAMIC_FSE 64
108613 -#define MAX_SEQ_FOR_STATIC_FSE 1000
108615 -       /* convert length/distances into codes */
108616 -       ZSTD_seqToCodes(seqStorePtr);
108618 -       /* CTable for Literal Lengths */
108619 -       {
108620 -               U32 max = MaxLL;
108621 -               size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace);
108622 -               if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
108623 -                       *op++ = llCodeTable[0];
108624 -                       FSE_buildCTable_rle(CTable_LitLength, (BYTE)max);
108625 -                       LLtype = set_rle;
108626 -               } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
108627 -                       LLtype = set_repeat;
108628 -               } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog - 1)))) {
108629 -                       FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, workspace, workspaceSize);
108630 -                       LLtype = set_basic;
108631 -               } else {
108632 -                       size_t nbSeq_1 = nbSeq;
108633 -                       const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max);
108634 -                       if (count[llCodeTable[nbSeq - 1]] > 1) {
108635 -                               count[llCodeTable[nbSeq - 1]]--;
108636 -                               nbSeq_1--;
108637 -                       }
108638 -                       FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
108639 -                       {
108640 -                               size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
108641 -                               if (FSE_isError(NCountSize))
108642 -                                       return NCountSize;
108643 -                               op += NCountSize;
108644 -                       }
108645 -                       FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, workspace, workspaceSize);
108646 -                       LLtype = set_compressed;
108647 -               }
108648 -       }
108650 -       /* CTable for Offsets */
108651 -       {
108652 -               U32 max = MaxOff;
108653 -               size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace);
108654 -               if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
108655 -                       *op++ = ofCodeTable[0];
108656 -                       FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max);
108657 -                       Offtype = set_rle;
108658 -               } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
108659 -                       Offtype = set_repeat;
108660 -               } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog - 1)))) {
108661 -                       FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, workspace, workspaceSize);
108662 -                       Offtype = set_basic;
108663 -               } else {
108664 -                       size_t nbSeq_1 = nbSeq;
108665 -                       const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max);
108666 -                       if (count[ofCodeTable[nbSeq - 1]] > 1) {
108667 -                               count[ofCodeTable[nbSeq - 1]]--;
108668 -                               nbSeq_1--;
108669 -                       }
108670 -                       FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
108671 -                       {
108672 -                               size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
108673 -                               if (FSE_isError(NCountSize))
108674 -                                       return NCountSize;
108675 -                               op += NCountSize;
108676 -                       }
108677 -                       FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, workspace, workspaceSize);
108678 -                       Offtype = set_compressed;
108679 -               }
108680 -       }
108682 -       /* CTable for MatchLengths */
108683 -       {
108684 -               U32 max = MaxML;
108685 -               size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace);
108686 -               if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
108687 -                       *op++ = *mlCodeTable;
108688 -                       FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max);
108689 -                       MLtype = set_rle;
108690 -               } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
108691 -                       MLtype = set_repeat;
108692 -               } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog - 1)))) {
108693 -                       FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, workspace, workspaceSize);
108694 -                       MLtype = set_basic;
108695 -               } else {
108696 -                       size_t nbSeq_1 = nbSeq;
108697 -                       const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max);
108698 -                       if (count[mlCodeTable[nbSeq - 1]] > 1) {
108699 -                               count[mlCodeTable[nbSeq - 1]]--;
108700 -                               nbSeq_1--;
108701 -                       }
108702 -                       FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
108703 -                       {
108704 -                               size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
108705 -                               if (FSE_isError(NCountSize))
108706 -                                       return NCountSize;
108707 -                               op += NCountSize;
108708 -                       }
108709 -                       FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, workspace, workspaceSize);
108710 -                       MLtype = set_compressed;
108711 -               }
108712 -       }
108714 -       *seqHead = (BYTE)((LLtype << 6) + (Offtype << 4) + (MLtype << 2));
108715 -       zc->flagStaticTables = 0;
108717 -       /* Encoding Sequences */
108718 -       {
108719 -               BIT_CStream_t blockStream;
108720 -               FSE_CState_t stateMatchLength;
108721 -               FSE_CState_t stateOffsetBits;
108722 -               FSE_CState_t stateLitLength;
108724 -               CHECK_E(BIT_initCStream(&blockStream, op, oend - op), dstSize_tooSmall); /* not enough space remaining */
108726 -               /* first symbols */
108727 -               FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq - 1]);
108728 -               FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq - 1]);
108729 -               FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq - 1]);
108730 -               BIT_addBits(&blockStream, sequences[nbSeq - 1].litLength, LL_bits[llCodeTable[nbSeq - 1]]);
108731 -               if (ZSTD_32bits())
108732 -                       BIT_flushBits(&blockStream);
108733 -               BIT_addBits(&blockStream, sequences[nbSeq - 1].matchLength, ML_bits[mlCodeTable[nbSeq - 1]]);
108734 -               if (ZSTD_32bits())
108735 -                       BIT_flushBits(&blockStream);
108736 -               if (longOffsets) {
108737 -                       U32 const ofBits = ofCodeTable[nbSeq - 1];
108738 -                       int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1);
108739 -                       if (extraBits) {
108740 -                               BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, extraBits);
108741 -                               BIT_flushBits(&blockStream);
108742 -                       }
108743 -                       BIT_addBits(&blockStream, sequences[nbSeq - 1].offset >> extraBits, ofBits - extraBits);
108744 -               } else {
108745 -                       BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, ofCodeTable[nbSeq - 1]);
108746 -               }
108747 -               BIT_flushBits(&blockStream);
108749 -               {
108750 -                       size_t n;
108751 -                       for (n = nbSeq - 2; n < nbSeq; n--) { /* intentional underflow */
108752 -                               BYTE const llCode = llCodeTable[n];
108753 -                               BYTE const ofCode = ofCodeTable[n];
108754 -                               BYTE const mlCode = mlCodeTable[n];
108755 -                               U32 const llBits = LL_bits[llCode];
108756 -                               U32 const ofBits = ofCode; /* 32b*/ /* 64b*/
108757 -                               U32 const mlBits = ML_bits[mlCode];
108758 -                               /* (7)*/                                                            /* (7)*/
108759 -                               FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */  /* 15 */
108760 -                               FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
108761 -                               if (ZSTD_32bits())
108762 -                                       BIT_flushBits(&blockStream);                              /* (7)*/
108763 -                               FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
108764 -                               if (ZSTD_32bits() || (ofBits + mlBits + llBits >= 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
108765 -                                       BIT_flushBits(&blockStream); /* (7)*/
108766 -                               BIT_addBits(&blockStream, sequences[n].litLength, llBits);
108767 -                               if (ZSTD_32bits() && ((llBits + mlBits) > 24))
108768 -                                       BIT_flushBits(&blockStream);
108769 -                               BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
108770 -                               if (ZSTD_32bits())
108771 -                                       BIT_flushBits(&blockStream); /* (7)*/
108772 -                               if (longOffsets) {
108773 -                                       int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1);
108774 -                                       if (extraBits) {
108775 -                                               BIT_addBits(&blockStream, sequences[n].offset, extraBits);
108776 -                                               BIT_flushBits(&blockStream); /* (7)*/
108777 -                                       }
108778 -                                       BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ofBits - extraBits); /* 31 */
108779 -                               } else {
108780 -                                       BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
108781 -                               }
108782 -                               BIT_flushBits(&blockStream); /* (7)*/
108783 -                       }
108784 -               }
108786 -               FSE_flushCState(&blockStream, &stateMatchLength);
108787 -               FSE_flushCState(&blockStream, &stateOffsetBits);
108788 -               FSE_flushCState(&blockStream, &stateLitLength);
108790 -               {
108791 -                       size_t const streamSize = BIT_closeCStream(&blockStream);
108792 -                       if (streamSize == 0)
108793 -                               return ERROR(dstSize_tooSmall); /* not enough space */
108794 -                       op += streamSize;
108795 -               }
108796 -       }
108797 -       return op - ostart;
108800 -ZSTD_STATIC size_t ZSTD_compressSequences(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, size_t srcSize)
108802 -       size_t const cSize = ZSTD_compressSequences_internal(zc, dst, dstCapacity);
108803 -       size_t const minGain = ZSTD_minGain(srcSize);
108804 -       size_t const maxCSize = srcSize - minGain;
108805 -       /* If the srcSize <= dstCapacity, then there is enough space to write a
108806 -        * raw uncompressed block. Since we ran out of space, the block must not
108807 -        * be compressible, so fall back to a raw uncompressed block.
108808 -        */
108809 -       int const uncompressibleError = cSize == ERROR(dstSize_tooSmall) && srcSize <= dstCapacity;
108810 -       int i;
108812 -       if (ZSTD_isError(cSize) && !uncompressibleError)
108813 -               return cSize;
108814 -       if (cSize >= maxCSize || uncompressibleError) {
108815 -               zc->flagStaticHufTable = HUF_repeat_none;
108816 -               return 0;
108817 -       }
108818 -       /* confirm repcodes */
108819 -       for (i = 0; i < ZSTD_REP_NUM; i++)
108820 -               zc->rep[i] = zc->repToConfirm[i];
108821 -       return cSize;
108824 -/*! ZSTD_storeSeq() :
108825 -       Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
108826 -       `offsetCode` : distance to match, or 0 == repCode.
108827 -       `matchCode` : matchLength - MINMATCH
108829 -ZSTD_STATIC void ZSTD_storeSeq(seqStore_t *seqStorePtr, size_t litLength, const void *literals, U32 offsetCode, size_t matchCode)
108831 -       /* copy Literals */
108832 -       ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
108833 -       seqStorePtr->lit += litLength;
108835 -       /* literal Length */
108836 -       if (litLength > 0xFFFF) {
108837 -               seqStorePtr->longLengthID = 1;
108838 -               seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
108839 -       }
108840 -       seqStorePtr->sequences[0].litLength = (U16)litLength;
108842 -       /* match offset */
108843 -       seqStorePtr->sequences[0].offset = offsetCode + 1;
108845 -       /* match Length */
108846 -       if (matchCode > 0xFFFF) {
108847 -               seqStorePtr->longLengthID = 2;
108848 -               seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
108849 -       }
108850 -       seqStorePtr->sequences[0].matchLength = (U16)matchCode;
108852 -       seqStorePtr->sequences++;
108855 -/*-*************************************
108856 -*  Match length counter
108857 -***************************************/
108858 -static unsigned ZSTD_NbCommonBytes(register size_t val)
108860 -       if (ZSTD_isLittleEndian()) {
108861 -               if (ZSTD_64bits()) {
108862 -                       return (__builtin_ctzll((U64)val) >> 3);
108863 -               } else { /* 32 bits */
108864 -                       return (__builtin_ctz((U32)val) >> 3);
108865 -               }
108866 -       } else { /* Big Endian CPU */
108867 -               if (ZSTD_64bits()) {
108868 -                       return (__builtin_clzll(val) >> 3);
108869 -               } else { /* 32 bits */
108870 -                       return (__builtin_clz((U32)val) >> 3);
108871 -               }
108872 -       }
108875 -static size_t ZSTD_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *const pInLimit)
108877 -       const BYTE *const pStart = pIn;
108878 -       const BYTE *const pInLoopLimit = pInLimit - (sizeof(size_t) - 1);
108880 -       while (pIn < pInLoopLimit) {
108881 -               size_t const diff = ZSTD_readST(pMatch) ^ ZSTD_readST(pIn);
108882 -               if (!diff) {
108883 -                       pIn += sizeof(size_t);
108884 -                       pMatch += sizeof(size_t);
108885 -                       continue;
108886 -               }
108887 -               pIn += ZSTD_NbCommonBytes(diff);
108888 -               return (size_t)(pIn - pStart);
108889 -       }
108890 -       if (ZSTD_64bits())
108891 -               if ((pIn < (pInLimit - 3)) && (ZSTD_read32(pMatch) == ZSTD_read32(pIn))) {
108892 -                       pIn += 4;
108893 -                       pMatch += 4;
108894 -               }
108895 -       if ((pIn < (pInLimit - 1)) && (ZSTD_read16(pMatch) == ZSTD_read16(pIn))) {
108896 -               pIn += 2;
108897 -               pMatch += 2;
108898 -       }
108899 -       if ((pIn < pInLimit) && (*pMatch == *pIn))
108900 -               pIn++;
108901 -       return (size_t)(pIn - pStart);
108904 -/** ZSTD_count_2segments() :
108905 -*   can count match length with `ip` & `match` in 2 different segments.
108906 -*   convention : on reaching mEnd, match count continue starting from iStart
108908 -static size_t ZSTD_count_2segments(const BYTE *ip, const BYTE *match, const BYTE *iEnd, const BYTE *mEnd, const BYTE *iStart)
108910 -       const BYTE *const vEnd = MIN(ip + (mEnd - match), iEnd);
108911 -       size_t const matchLength = ZSTD_count(ip, match, vEnd);
108912 -       if (match + matchLength != mEnd)
108913 -               return matchLength;
108914 -       return matchLength + ZSTD_count(ip + matchLength, iStart, iEnd);
108917 -/*-*************************************
108918 -*  Hashes
108919 -***************************************/
108920 -static const U32 prime3bytes = 506832829U;
108921 -static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32 - 24)) * prime3bytes) >> (32 - h); }
108922 -ZSTD_STATIC size_t ZSTD_hash3Ptr(const void *ptr, U32 h) { return ZSTD_hash3(ZSTD_readLE32(ptr), h); } /* only in zstd_opt.h */
108924 -static const U32 prime4bytes = 2654435761U;
108925 -static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32 - h); }
108926 -static size_t ZSTD_hash4Ptr(const void *ptr, U32 h) { return ZSTD_hash4(ZSTD_read32(ptr), h); }
108928 -static const U64 prime5bytes = 889523592379ULL;
108929 -static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64 - 40)) * prime5bytes) >> (64 - h)); }
108930 -static size_t ZSTD_hash5Ptr(const void *p, U32 h) { return ZSTD_hash5(ZSTD_readLE64(p), h); }
108932 -static const U64 prime6bytes = 227718039650203ULL;
108933 -static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64 - 48)) * prime6bytes) >> (64 - h)); }
108934 -static size_t ZSTD_hash6Ptr(const void *p, U32 h) { return ZSTD_hash6(ZSTD_readLE64(p), h); }
108936 -static const U64 prime7bytes = 58295818150454627ULL;
108937 -static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64 - 56)) * prime7bytes) >> (64 - h)); }
108938 -static size_t ZSTD_hash7Ptr(const void *p, U32 h) { return ZSTD_hash7(ZSTD_readLE64(p), h); }
108940 -static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
108941 -static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u)*prime8bytes) >> (64 - h)); }
108942 -static size_t ZSTD_hash8Ptr(const void *p, U32 h) { return ZSTD_hash8(ZSTD_readLE64(p), h); }
108944 -static size_t ZSTD_hashPtr(const void *p, U32 hBits, U32 mls)
108946 -       switch (mls) {
108947 -       // case 3: return ZSTD_hash3Ptr(p, hBits);
108948 -       default:
108949 -       case 4: return ZSTD_hash4Ptr(p, hBits);
108950 -       case 5: return ZSTD_hash5Ptr(p, hBits);
108951 -       case 6: return ZSTD_hash6Ptr(p, hBits);
108952 -       case 7: return ZSTD_hash7Ptr(p, hBits);
108953 -       case 8: return ZSTD_hash8Ptr(p, hBits);
108954 -       }
108957 -/*-*************************************
108958 -*  Fast Scan
108959 -***************************************/
108960 -static void ZSTD_fillHashTable(ZSTD_CCtx *zc, const void *end, const U32 mls)
108962 -       U32 *const hashTable = zc->hashTable;
108963 -       U32 const hBits = zc->params.cParams.hashLog;
108964 -       const BYTE *const base = zc->base;
108965 -       const BYTE *ip = base + zc->nextToUpdate;
108966 -       const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE;
108967 -       const size_t fastHashFillStep = 3;
108969 -       while (ip <= iend) {
108970 -               hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
108971 -               ip += fastHashFillStep;
108972 -       }
108975 -FORCE_INLINE
108976 -void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls)
108978 -       U32 *const hashTable = cctx->hashTable;
108979 -       U32 const hBits = cctx->params.cParams.hashLog;
108980 -       seqStore_t *seqStorePtr = &(cctx->seqStore);
108981 -       const BYTE *const base = cctx->base;
108982 -       const BYTE *const istart = (const BYTE *)src;
108983 -       const BYTE *ip = istart;
108984 -       const BYTE *anchor = istart;
108985 -       const U32 lowestIndex = cctx->dictLimit;
108986 -       const BYTE *const lowest = base + lowestIndex;
108987 -       const BYTE *const iend = istart + srcSize;
108988 -       const BYTE *const ilimit = iend - HASH_READ_SIZE;
108989 -       U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1];
108990 -       U32 offsetSaved = 0;
108992 -       /* init */
108993 -       ip += (ip == lowest);
108994 -       {
108995 -               U32 const maxRep = (U32)(ip - lowest);
108996 -               if (offset_2 > maxRep)
108997 -                       offsetSaved = offset_2, offset_2 = 0;
108998 -               if (offset_1 > maxRep)
108999 -                       offsetSaved = offset_1, offset_1 = 0;
109000 -       }
109002 -       /* Main Search Loop */
109003 -       while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
109004 -               size_t mLength;
109005 -               size_t const h = ZSTD_hashPtr(ip, hBits, mls);
109006 -               U32 const curr = (U32)(ip - base);
109007 -               U32 const matchIndex = hashTable[h];
109008 -               const BYTE *match = base + matchIndex;
109009 -               hashTable[h] = curr; /* update hash table */
109011 -               if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
109012 -                       mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
109013 -                       ip++;
109014 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
109015 -               } else {
109016 -                       U32 offset;
109017 -                       if ((matchIndex <= lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) {
109018 -                               ip += ((ip - anchor) >> g_searchStrength) + 1;
109019 -                               continue;
109020 -                       }
109021 -                       mLength = ZSTD_count(ip + 4, match + 4, iend) + 4;
109022 -                       offset = (U32)(ip - match);
109023 -                       while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) {
109024 -                               ip--;
109025 -                               match--;
109026 -                               mLength++;
109027 -                       } /* catch up */
109028 -                       offset_2 = offset_1;
109029 -                       offset_1 = offset;
109031 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
109032 -               }
109034 -               /* match found */
109035 -               ip += mLength;
109036 -               anchor = ip;
109038 -               if (ip <= ilimit) {
109039 -                       /* Fill Table */
109040 -                       hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2; /* here because curr+2 could be > iend-8 */
109041 -                       hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
109042 -                       /* check immediate repcode */
109043 -                       while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
109044 -                               /* store sequence */
109045 -                               size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
109046 -                               {
109047 -                                       U32 const tmpOff = offset_2;
109048 -                                       offset_2 = offset_1;
109049 -                                       offset_1 = tmpOff;
109050 -                               } /* swap offset_2 <=> offset_1 */
109051 -                               hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
109052 -                               ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH);
109053 -                               ip += rLength;
109054 -                               anchor = ip;
109055 -                               continue; /* faster when present ... (?) */
109056 -                       }
109057 -               }
109058 -       }
109060 -       /* save reps for next block */
109061 -       cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
109062 -       cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
109064 -       /* Last Literals */
109065 -       {
109066 -               size_t const lastLLSize = iend - anchor;
109067 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
109068 -               seqStorePtr->lit += lastLLSize;
109069 -       }
109072 -static void ZSTD_compressBlock_fast(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
109074 -       const U32 mls = ctx->params.cParams.searchLength;
109075 -       switch (mls) {
109076 -       default: /* includes case 3 */
109077 -       case 4: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return;
109078 -       case 5: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return;
109079 -       case 6: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return;
109080 -       case 7: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return;
109081 -       }
109084 -static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls)
109086 -       U32 *hashTable = ctx->hashTable;
109087 -       const U32 hBits = ctx->params.cParams.hashLog;
109088 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
109089 -       const BYTE *const base = ctx->base;
109090 -       const BYTE *const dictBase = ctx->dictBase;
109091 -       const BYTE *const istart = (const BYTE *)src;
109092 -       const BYTE *ip = istart;
109093 -       const BYTE *anchor = istart;
109094 -       const U32 lowestIndex = ctx->lowLimit;
109095 -       const BYTE *const dictStart = dictBase + lowestIndex;
109096 -       const U32 dictLimit = ctx->dictLimit;
109097 -       const BYTE *const lowPrefixPtr = base + dictLimit;
109098 -       const BYTE *const dictEnd = dictBase + dictLimit;
109099 -       const BYTE *const iend = istart + srcSize;
109100 -       const BYTE *const ilimit = iend - 8;
109101 -       U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
109103 -       /* Search Loop */
109104 -       while (ip < ilimit) { /* < instead of <=, because (ip+1) */
109105 -               const size_t h = ZSTD_hashPtr(ip, hBits, mls);
109106 -               const U32 matchIndex = hashTable[h];
109107 -               const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base;
109108 -               const BYTE *match = matchBase + matchIndex;
109109 -               const U32 curr = (U32)(ip - base);
109110 -               const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
109111 -               const BYTE *repBase = repIndex < dictLimit ? dictBase : base;
109112 -               const BYTE *repMatch = repBase + repIndex;
109113 -               size_t mLength;
109114 -               hashTable[h] = curr; /* update hash table */
109116 -               if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) &&
109117 -                   (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) {
109118 -                       const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
109119 -                       mLength = ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32;
109120 -                       ip++;
109121 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
109122 -               } else {
109123 -                       if ((matchIndex < lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) {
109124 -                               ip += ((ip - anchor) >> g_searchStrength) + 1;
109125 -                               continue;
109126 -                       }
109127 -                       {
109128 -                               const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend;
109129 -                               const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
109130 -                               U32 offset;
109131 -                               mLength = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32;
109132 -                               while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) {
109133 -                                       ip--;
109134 -                                       match--;
109135 -                                       mLength++;
109136 -                               } /* catch up */
109137 -                               offset = curr - matchIndex;
109138 -                               offset_2 = offset_1;
109139 -                               offset_1 = offset;
109140 -                               ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
109141 -                       }
109142 -               }
109144 -               /* found a match : store it */
109145 -               ip += mLength;
109146 -               anchor = ip;
109148 -               if (ip <= ilimit) {
109149 -                       /* Fill Table */
109150 -                       hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2;
109151 -                       hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
109152 -                       /* check immediate repcode */
109153 -                       while (ip <= ilimit) {
109154 -                               U32 const curr2 = (U32)(ip - base);
109155 -                               U32 const repIndex2 = curr2 - offset_2;
109156 -                               const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
109157 -                               if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
109158 -                                   && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) {
109159 -                                       const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
109160 -                                       size_t repLength2 =
109161 -                                           ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
109162 -                                       U32 tmpOffset = offset_2;
109163 -                                       offset_2 = offset_1;
109164 -                                       offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
109165 -                                       ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH);
109166 -                                       hashTable[ZSTD_hashPtr(ip, hBits, mls)] = curr2;
109167 -                                       ip += repLength2;
109168 -                                       anchor = ip;
109169 -                                       continue;
109170 -                               }
109171 -                               break;
109172 -                       }
109173 -               }
109174 -       }
109176 -       /* save reps for next block */
109177 -       ctx->repToConfirm[0] = offset_1;
109178 -       ctx->repToConfirm[1] = offset_2;
109180 -       /* Last Literals */
109181 -       {
109182 -               size_t const lastLLSize = iend - anchor;
109183 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
109184 -               seqStorePtr->lit += lastLLSize;
109185 -       }
109188 -static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
109190 -       U32 const mls = ctx->params.cParams.searchLength;
109191 -       switch (mls) {
109192 -       default: /* includes case 3 */
109193 -       case 4: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return;
109194 -       case 5: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return;
109195 -       case 6: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return;
109196 -       case 7: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return;
109197 -       }
109200 -/*-*************************************
109201 -*  Double Fast
109202 -***************************************/
109203 -static void ZSTD_fillDoubleHashTable(ZSTD_CCtx *cctx, const void *end, const U32 mls)
109205 -       U32 *const hashLarge = cctx->hashTable;
109206 -       U32 const hBitsL = cctx->params.cParams.hashLog;
109207 -       U32 *const hashSmall = cctx->chainTable;
109208 -       U32 const hBitsS = cctx->params.cParams.chainLog;
109209 -       const BYTE *const base = cctx->base;
109210 -       const BYTE *ip = base + cctx->nextToUpdate;
109211 -       const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE;
109212 -       const size_t fastHashFillStep = 3;
109214 -       while (ip <= iend) {
109215 -               hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
109216 -               hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
109217 -               ip += fastHashFillStep;
109218 -       }
109221 -FORCE_INLINE
109222 -void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls)
109224 -       U32 *const hashLong = cctx->hashTable;
109225 -       const U32 hBitsL = cctx->params.cParams.hashLog;
109226 -       U32 *const hashSmall = cctx->chainTable;
109227 -       const U32 hBitsS = cctx->params.cParams.chainLog;
109228 -       seqStore_t *seqStorePtr = &(cctx->seqStore);
109229 -       const BYTE *const base = cctx->base;
109230 -       const BYTE *const istart = (const BYTE *)src;
109231 -       const BYTE *ip = istart;
109232 -       const BYTE *anchor = istart;
109233 -       const U32 lowestIndex = cctx->dictLimit;
109234 -       const BYTE *const lowest = base + lowestIndex;
109235 -       const BYTE *const iend = istart + srcSize;
109236 -       const BYTE *const ilimit = iend - HASH_READ_SIZE;
109237 -       U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1];
109238 -       U32 offsetSaved = 0;
109240 -       /* init */
109241 -       ip += (ip == lowest);
109242 -       {
109243 -               U32 const maxRep = (U32)(ip - lowest);
109244 -               if (offset_2 > maxRep)
109245 -                       offsetSaved = offset_2, offset_2 = 0;
109246 -               if (offset_1 > maxRep)
109247 -                       offsetSaved = offset_1, offset_1 = 0;
109248 -       }
109250 -       /* Main Search Loop */
109251 -       while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
109252 -               size_t mLength;
109253 -               size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
109254 -               size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
109255 -               U32 const curr = (U32)(ip - base);
109256 -               U32 const matchIndexL = hashLong[h2];
109257 -               U32 const matchIndexS = hashSmall[h];
109258 -               const BYTE *matchLong = base + matchIndexL;
109259 -               const BYTE *match = base + matchIndexS;
109260 -               hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
109262 -               if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { /* note : by construction, offset_1 <= curr */
109263 -                       mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
109264 -                       ip++;
109265 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
109266 -               } else {
109267 -                       U32 offset;
109268 -                       if ((matchIndexL > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) {
109269 -                               mLength = ZSTD_count(ip + 8, matchLong + 8, iend) + 8;
109270 -                               offset = (U32)(ip - matchLong);
109271 -                               while (((ip > anchor) & (matchLong > lowest)) && (ip[-1] == matchLong[-1])) {
109272 -                                       ip--;
109273 -                                       matchLong--;
109274 -                                       mLength++;
109275 -                               } /* catch up */
109276 -                       } else if ((matchIndexS > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) {
109277 -                               size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8);
109278 -                               U32 const matchIndex3 = hashLong[h3];
109279 -                               const BYTE *match3 = base + matchIndex3;
109280 -                               hashLong[h3] = curr + 1;
109281 -                               if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) {
109282 -                                       mLength = ZSTD_count(ip + 9, match3 + 8, iend) + 8;
109283 -                                       ip++;
109284 -                                       offset = (U32)(ip - match3);
109285 -                                       while (((ip > anchor) & (match3 > lowest)) && (ip[-1] == match3[-1])) {
109286 -                                               ip--;
109287 -                                               match3--;
109288 -                                               mLength++;
109289 -                                       } /* catch up */
109290 -                               } else {
109291 -                                       mLength = ZSTD_count(ip + 4, match + 4, iend) + 4;
109292 -                                       offset = (U32)(ip - match);
109293 -                                       while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) {
109294 -                                               ip--;
109295 -                                               match--;
109296 -                                               mLength++;
109297 -                                       } /* catch up */
109298 -                               }
109299 -                       } else {
109300 -                               ip += ((ip - anchor) >> g_searchStrength) + 1;
109301 -                               continue;
109302 -                       }
109304 -                       offset_2 = offset_1;
109305 -                       offset_1 = offset;
109307 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
109308 -               }
109310 -               /* match found */
109311 -               ip += mLength;
109312 -               anchor = ip;
109314 -               if (ip <= ilimit) {
109315 -                       /* Fill Table */
109316 -                       hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] =
109317 -                           curr + 2; /* here because curr+2 could be > iend-8 */
109318 -                       hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
109320 -                       /* check immediate repcode */
109321 -                       while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
109322 -                               /* store sequence */
109323 -                               size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
109324 -                               {
109325 -                                       U32 const tmpOff = offset_2;
109326 -                                       offset_2 = offset_1;
109327 -                                       offset_1 = tmpOff;
109328 -                               } /* swap offset_2 <=> offset_1 */
109329 -                               hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
109330 -                               hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
109331 -                               ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH);
109332 -                               ip += rLength;
109333 -                               anchor = ip;
109334 -                               continue; /* faster when present ... (?) */
109335 -                       }
109336 -               }
109337 -       }
109339 -       /* save reps for next block */
109340 -       cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
109341 -       cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
109343 -       /* Last Literals */
109344 -       {
109345 -               size_t const lastLLSize = iend - anchor;
109346 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
109347 -               seqStorePtr->lit += lastLLSize;
109348 -       }
109351 -static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
109353 -       const U32 mls = ctx->params.cParams.searchLength;
109354 -       switch (mls) {
109355 -       default: /* includes case 3 */
109356 -       case 4: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return;
109357 -       case 5: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return;
109358 -       case 6: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return;
109359 -       case 7: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return;
109360 -       }
109363 -static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls)
109365 -       U32 *const hashLong = ctx->hashTable;
109366 -       U32 const hBitsL = ctx->params.cParams.hashLog;
109367 -       U32 *const hashSmall = ctx->chainTable;
109368 -       U32 const hBitsS = ctx->params.cParams.chainLog;
109369 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
109370 -       const BYTE *const base = ctx->base;
109371 -       const BYTE *const dictBase = ctx->dictBase;
109372 -       const BYTE *const istart = (const BYTE *)src;
109373 -       const BYTE *ip = istart;
109374 -       const BYTE *anchor = istart;
109375 -       const U32 lowestIndex = ctx->lowLimit;
109376 -       const BYTE *const dictStart = dictBase + lowestIndex;
109377 -       const U32 dictLimit = ctx->dictLimit;
109378 -       const BYTE *const lowPrefixPtr = base + dictLimit;
109379 -       const BYTE *const dictEnd = dictBase + dictLimit;
109380 -       const BYTE *const iend = istart + srcSize;
109381 -       const BYTE *const ilimit = iend - 8;
109382 -       U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
109384 -       /* Search Loop */
109385 -       while (ip < ilimit) { /* < instead of <=, because (ip+1) */
109386 -               const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
109387 -               const U32 matchIndex = hashSmall[hSmall];
109388 -               const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base;
109389 -               const BYTE *match = matchBase + matchIndex;
109391 -               const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
109392 -               const U32 matchLongIndex = hashLong[hLong];
109393 -               const BYTE *matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
109394 -               const BYTE *matchLong = matchLongBase + matchLongIndex;
109396 -               const U32 curr = (U32)(ip - base);
109397 -               const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
109398 -               const BYTE *repBase = repIndex < dictLimit ? dictBase : base;
109399 -               const BYTE *repMatch = repBase + repIndex;
109400 -               size_t mLength;
109401 -               hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
109403 -               if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) &&
109404 -                   (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) {
109405 -                       const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
109406 -                       mLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, lowPrefixPtr) + 4;
109407 -                       ip++;
109408 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
109409 -               } else {
109410 -                       if ((matchLongIndex > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) {
109411 -                               const BYTE *matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
109412 -                               const BYTE *lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
109413 -                               U32 offset;
109414 -                               mLength = ZSTD_count_2segments(ip + 8, matchLong + 8, iend, matchEnd, lowPrefixPtr) + 8;
109415 -                               offset = curr - matchLongIndex;
109416 -                               while (((ip > anchor) & (matchLong > lowMatchPtr)) && (ip[-1] == matchLong[-1])) {
109417 -                                       ip--;
109418 -                                       matchLong--;
109419 -                                       mLength++;
109420 -                               } /* catch up */
109421 -                               offset_2 = offset_1;
109422 -                               offset_1 = offset;
109423 -                               ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
109425 -                       } else if ((matchIndex > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) {
109426 -                               size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8);
109427 -                               U32 const matchIndex3 = hashLong[h3];
109428 -                               const BYTE *const match3Base = matchIndex3 < dictLimit ? dictBase : base;
109429 -                               const BYTE *match3 = match3Base + matchIndex3;
109430 -                               U32 offset;
109431 -                               hashLong[h3] = curr + 1;
109432 -                               if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) {
109433 -                                       const BYTE *matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
109434 -                                       const BYTE *lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
109435 -                                       mLength = ZSTD_count_2segments(ip + 9, match3 + 8, iend, matchEnd, lowPrefixPtr) + 8;
109436 -                                       ip++;
109437 -                                       offset = curr + 1 - matchIndex3;
109438 -                                       while (((ip > anchor) & (match3 > lowMatchPtr)) && (ip[-1] == match3[-1])) {
109439 -                                               ip--;
109440 -                                               match3--;
109441 -                                               mLength++;
109442 -                                       } /* catch up */
109443 -                               } else {
109444 -                                       const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend;
109445 -                                       const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
109446 -                                       mLength = ZSTD_count_2segments(ip + 4, match + 4, iend, matchEnd, lowPrefixPtr) + 4;
109447 -                                       offset = curr - matchIndex;
109448 -                                       while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) {
109449 -                                               ip--;
109450 -                                               match--;
109451 -                                               mLength++;
109452 -                                       } /* catch up */
109453 -                               }
109454 -                               offset_2 = offset_1;
109455 -                               offset_1 = offset;
109456 -                               ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
109458 -                       } else {
109459 -                               ip += ((ip - anchor) >> g_searchStrength) + 1;
109460 -                               continue;
109461 -                       }
109462 -               }
109464 -               /* found a match : store it */
109465 -               ip += mLength;
109466 -               anchor = ip;
109468 -               if (ip <= ilimit) {
109469 -                       /* Fill Table */
109470 -                       hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] = curr + 2;
109471 -                       hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = curr + 2;
109472 -                       hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
109473 -                       hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (U32)(ip - 2 - base);
109474 -                       /* check immediate repcode */
109475 -                       while (ip <= ilimit) {
109476 -                               U32 const curr2 = (U32)(ip - base);
109477 -                               U32 const repIndex2 = curr2 - offset_2;
109478 -                               const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
109479 -                               if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
109480 -                                   && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) {
109481 -                                       const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
109482 -                                       size_t const repLength2 =
109483 -                                           ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
109484 -                                       U32 tmpOffset = offset_2;
109485 -                                       offset_2 = offset_1;
109486 -                                       offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
109487 -                                       ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH);
109488 -                                       hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = curr2;
109489 -                                       hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = curr2;
109490 -                                       ip += repLength2;
109491 -                                       anchor = ip;
109492 -                                       continue;
109493 -                               }
109494 -                               break;
109495 -                       }
109496 -               }
109497 -       }
109499 -       /* save reps for next block */
109500 -       ctx->repToConfirm[0] = offset_1;
109501 -       ctx->repToConfirm[1] = offset_2;
109503 -       /* Last Literals */
109504 -       {
109505 -               size_t const lastLLSize = iend - anchor;
109506 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
109507 -               seqStorePtr->lit += lastLLSize;
109508 -       }
109511 -static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
109513 -       U32 const mls = ctx->params.cParams.searchLength;
109514 -       switch (mls) {
109515 -       default: /* includes case 3 */
109516 -       case 4: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return;
109517 -       case 5: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return;
109518 -       case 6: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return;
109519 -       case 7: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return;
109520 -       }
109523 -/*-*************************************
109524 -*  Binary Tree search
109525 -***************************************/
109526 -/** ZSTD_insertBt1() : add one or multiple positions to tree.
109527 -*   ip : assumed <= iend-8 .
109528 -*   @return : nb of positions added */
109529 -static U32 ZSTD_insertBt1(ZSTD_CCtx *zc, const BYTE *const ip, const U32 mls, const BYTE *const iend, U32 nbCompares, U32 extDict)
109531 -       U32 *const hashTable = zc->hashTable;
109532 -       U32 const hashLog = zc->params.cParams.hashLog;
109533 -       size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
109534 -       U32 *const bt = zc->chainTable;
109535 -       U32 const btLog = zc->params.cParams.chainLog - 1;
109536 -       U32 const btMask = (1 << btLog) - 1;
109537 -       U32 matchIndex = hashTable[h];
109538 -       size_t commonLengthSmaller = 0, commonLengthLarger = 0;
109539 -       const BYTE *const base = zc->base;
109540 -       const BYTE *const dictBase = zc->dictBase;
109541 -       const U32 dictLimit = zc->dictLimit;
109542 -       const BYTE *const dictEnd = dictBase + dictLimit;
109543 -       const BYTE *const prefixStart = base + dictLimit;
109544 -       const BYTE *match;
109545 -       const U32 curr = (U32)(ip - base);
109546 -       const U32 btLow = btMask >= curr ? 0 : curr - btMask;
109547 -       U32 *smallerPtr = bt + 2 * (curr & btMask);
109548 -       U32 *largerPtr = smallerPtr + 1;
109549 -       U32 dummy32; /* to be nullified at the end */
109550 -       U32 const windowLow = zc->lowLimit;
109551 -       U32 matchEndIdx = curr + 8;
109552 -       size_t bestLength = 8;
109554 -       hashTable[h] = curr; /* Update Hash Table */
109556 -       while (nbCompares-- && (matchIndex > windowLow)) {
109557 -               U32 *const nextPtr = bt + 2 * (matchIndex & btMask);
109558 -               size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
109560 -               if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
109561 -                       match = base + matchIndex;
109562 -                       if (match[matchLength] == ip[matchLength])
109563 -                               matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1;
109564 -               } else {
109565 -                       match = dictBase + matchIndex;
109566 -                       matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart);
109567 -                       if (matchIndex + matchLength >= dictLimit)
109568 -                               match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
109569 -               }
109571 -               if (matchLength > bestLength) {
109572 -                       bestLength = matchLength;
109573 -                       if (matchLength > matchEndIdx - matchIndex)
109574 -                               matchEndIdx = matchIndex + (U32)matchLength;
109575 -               }
109577 -               if (ip + matchLength == iend) /* equal : no way to know if inf or sup */
109578 -                       break;                /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */
109580 -               if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */
109581 -                       /* match is smaller than curr */
109582 -                       *smallerPtr = matchIndex;         /* update smaller idx */
109583 -                       commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
109584 -                       if (matchIndex <= btLow) {
109585 -                               smallerPtr = &dummy32;
109586 -                               break;
109587 -                       }                         /* beyond tree size, stop the search */
109588 -                       smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
109589 -                       matchIndex = nextPtr[1];  /* new matchIndex larger than previous (closer to curr) */
109590 -               } else {
109591 -                       /* match is larger than curr */
109592 -                       *largerPtr = matchIndex;
109593 -                       commonLengthLarger = matchLength;
109594 -                       if (matchIndex <= btLow) {
109595 -                               largerPtr = &dummy32;
109596 -                               break;
109597 -                       } /* beyond tree size, stop the search */
109598 -                       largerPtr = nextPtr;
109599 -                       matchIndex = nextPtr[0];
109600 -               }
109601 -       }
109603 -       *smallerPtr = *largerPtr = 0;
109604 -       if (bestLength > 384)
109605 -               return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
109606 -       if (matchEndIdx > curr + 8)
109607 -               return matchEndIdx - curr - 8;
109608 -       return 1;
109611 -static size_t ZSTD_insertBtAndFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, size_t *offsetPtr, U32 nbCompares, const U32 mls,
109612 -                                           U32 extDict)
109614 -       U32 *const hashTable = zc->hashTable;
109615 -       U32 const hashLog = zc->params.cParams.hashLog;
109616 -       size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
109617 -       U32 *const bt = zc->chainTable;
109618 -       U32 const btLog = zc->params.cParams.chainLog - 1;
109619 -       U32 const btMask = (1 << btLog) - 1;
109620 -       U32 matchIndex = hashTable[h];
109621 -       size_t commonLengthSmaller = 0, commonLengthLarger = 0;
109622 -       const BYTE *const base = zc->base;
109623 -       const BYTE *const dictBase = zc->dictBase;
109624 -       const U32 dictLimit = zc->dictLimit;
109625 -       const BYTE *const dictEnd = dictBase + dictLimit;
109626 -       const BYTE *const prefixStart = base + dictLimit;
109627 -       const U32 curr = (U32)(ip - base);
109628 -       const U32 btLow = btMask >= curr ? 0 : curr - btMask;
109629 -       const U32 windowLow = zc->lowLimit;
109630 -       U32 *smallerPtr = bt + 2 * (curr & btMask);
109631 -       U32 *largerPtr = bt + 2 * (curr & btMask) + 1;
109632 -       U32 matchEndIdx = curr + 8;
109633 -       U32 dummy32; /* to be nullified at the end */
109634 -       size_t bestLength = 0;
109636 -       hashTable[h] = curr; /* Update Hash Table */
109638 -       while (nbCompares-- && (matchIndex > windowLow)) {
109639 -               U32 *const nextPtr = bt + 2 * (matchIndex & btMask);
109640 -               size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
109641 -               const BYTE *match;
109643 -               if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
109644 -                       match = base + matchIndex;
109645 -                       if (match[matchLength] == ip[matchLength])
109646 -                               matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1;
109647 -               } else {
109648 -                       match = dictBase + matchIndex;
109649 -                       matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart);
109650 -                       if (matchIndex + matchLength >= dictLimit)
109651 -                               match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
109652 -               }
109654 -               if (matchLength > bestLength) {
109655 -                       if (matchLength > matchEndIdx - matchIndex)
109656 -                               matchEndIdx = matchIndex + (U32)matchLength;
109657 -                       if ((4 * (int)(matchLength - bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)offsetPtr[0] + 1)))
109658 -                               bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
109659 -                       if (ip + matchLength == iend) /* equal : no way to know if inf or sup */
109660 -                               break;                /* drop, to guarantee consistency (miss a little bit of compression) */
109661 -               }
109663 -               if (match[matchLength] < ip[matchLength]) {
109664 -                       /* match is smaller than curr */
109665 -                       *smallerPtr = matchIndex;         /* update smaller idx */
109666 -                       commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
109667 -                       if (matchIndex <= btLow) {
109668 -                               smallerPtr = &dummy32;
109669 -                               break;
109670 -                       }                         /* beyond tree size, stop the search */
109671 -                       smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
109672 -                       matchIndex = nextPtr[1];  /* new matchIndex larger than previous (closer to curr) */
109673 -               } else {
109674 -                       /* match is larger than curr */
109675 -                       *largerPtr = matchIndex;
109676 -                       commonLengthLarger = matchLength;
109677 -                       if (matchIndex <= btLow) {
109678 -                               largerPtr = &dummy32;
109679 -                               break;
109680 -                       } /* beyond tree size, stop the search */
109681 -                       largerPtr = nextPtr;
109682 -                       matchIndex = nextPtr[0];
109683 -               }
109684 -       }
109686 -       *smallerPtr = *largerPtr = 0;
109688 -       zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1;
109689 -       return bestLength;
109692 -static void ZSTD_updateTree(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls)
109694 -       const BYTE *const base = zc->base;
109695 -       const U32 target = (U32)(ip - base);
109696 -       U32 idx = zc->nextToUpdate;
109698 -       while (idx < target)
109699 -               idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 0);
109702 -/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
109703 -static size_t ZSTD_BtFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls)
109705 -       if (ip < zc->base + zc->nextToUpdate)
109706 -               return 0; /* skipped area */
109707 -       ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
109708 -       return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0);
109711 -static size_t ZSTD_BtFindBestMatch_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */
109712 -                                            const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 matchLengthSearch)
109714 -       switch (matchLengthSearch) {
109715 -       default: /* includes case 3 */
109716 -       case 4: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
109717 -       case 5: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
109718 -       case 7:
109719 -       case 6: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
109720 -       }
109723 -static void ZSTD_updateTree_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls)
109725 -       const BYTE *const base = zc->base;
109726 -       const U32 target = (U32)(ip - base);
109727 -       U32 idx = zc->nextToUpdate;
109729 -       while (idx < target)
109730 -               idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 1);
109733 -/** Tree updater, providing best match */
109734 -static size_t ZSTD_BtFindBestMatch_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
109735 -                                          const U32 mls)
109737 -       if (ip < zc->base + zc->nextToUpdate)
109738 -               return 0; /* skipped area */
109739 -       ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
109740 -       return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1);
109743 -static size_t ZSTD_BtFindBestMatch_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */
109744 -                                                    const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
109745 -                                                    const U32 matchLengthSearch)
109747 -       switch (matchLengthSearch) {
109748 -       default: /* includes case 3 */
109749 -       case 4: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
109750 -       case 5: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
109751 -       case 7:
109752 -       case 6: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
109753 -       }
109756 -/* *********************************
109757 -*  Hash Chain
109758 -***********************************/
109759 -#define NEXT_IN_CHAIN(d, mask) chainTable[(d)&mask]
109761 -/* Update chains up to ip (excluded)
109762 -   Assumption : always within prefix (i.e. not within extDict) */
109763 -FORCE_INLINE
109764 -U32 ZSTD_insertAndFindFirstIndex(ZSTD_CCtx *zc, const BYTE *ip, U32 mls)
109766 -       U32 *const hashTable = zc->hashTable;
109767 -       const U32 hashLog = zc->params.cParams.hashLog;
109768 -       U32 *const chainTable = zc->chainTable;
109769 -       const U32 chainMask = (1 << zc->params.cParams.chainLog) - 1;
109770 -       const BYTE *const base = zc->base;
109771 -       const U32 target = (U32)(ip - base);
109772 -       U32 idx = zc->nextToUpdate;
109774 -       while (idx < target) { /* catch up */
109775 -               size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls);
109776 -               NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
109777 -               hashTable[h] = idx;
109778 -               idx++;
109779 -       }
109781 -       zc->nextToUpdate = target;
109782 -       return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
109785 -/* inlining is important to hardwire a hot branch (template emulation) */
109786 -FORCE_INLINE
109787 -size_t ZSTD_HcFindBestMatch_generic(ZSTD_CCtx *zc, /* Index table will be updated */
109788 -                                   const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls,
109789 -                                   const U32 extDict)
109791 -       U32 *const chainTable = zc->chainTable;
109792 -       const U32 chainSize = (1 << zc->params.cParams.chainLog);
109793 -       const U32 chainMask = chainSize - 1;
109794 -       const BYTE *const base = zc->base;
109795 -       const BYTE *const dictBase = zc->dictBase;
109796 -       const U32 dictLimit = zc->dictLimit;
109797 -       const BYTE *const prefixStart = base + dictLimit;
109798 -       const BYTE *const dictEnd = dictBase + dictLimit;
109799 -       const U32 lowLimit = zc->lowLimit;
109800 -       const U32 curr = (U32)(ip - base);
109801 -       const U32 minChain = curr > chainSize ? curr - chainSize : 0;
109802 -       int nbAttempts = maxNbAttempts;
109803 -       size_t ml = EQUAL_READ32 - 1;
109805 -       /* HC4 match finder */
109806 -       U32 matchIndex = ZSTD_insertAndFindFirstIndex(zc, ip, mls);
109808 -       for (; (matchIndex > lowLimit) & (nbAttempts > 0); nbAttempts--) {
109809 -               const BYTE *match;
109810 -               size_t currMl = 0;
109811 -               if ((!extDict) || matchIndex >= dictLimit) {
109812 -                       match = base + matchIndex;
109813 -                       if (match[ml] == ip[ml]) /* potentially better */
109814 -                               currMl = ZSTD_count(ip, match, iLimit);
109815 -               } else {
109816 -                       match = dictBase + matchIndex;
109817 -                       if (ZSTD_read32(match) == ZSTD_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
109818 -                               currMl = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32;
109819 -               }
109821 -               /* save best solution */
109822 -               if (currMl > ml) {
109823 -                       ml = currMl;
109824 -                       *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
109825 -                       if (ip + currMl == iLimit)
109826 -                               break; /* best possible, and avoid read overflow*/
109827 -               }
109829 -               if (matchIndex <= minChain)
109830 -                       break;
109831 -               matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
109832 -       }
109834 -       return ml;
109837 -FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
109838 -                                                  const U32 matchLengthSearch)
109840 -       switch (matchLengthSearch) {
109841 -       default: /* includes case 3 */
109842 -       case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0);
109843 -       case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0);
109844 -       case 7:
109845 -       case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0);
109846 -       }
109849 -FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
109850 -                                                          const U32 matchLengthSearch)
109852 -       switch (matchLengthSearch) {
109853 -       default: /* includes case 3 */
109854 -       case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1);
109855 -       case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1);
109856 -       case 7:
109857 -       case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1);
109858 -       }
109861 -/* *******************************
109862 -*  Common parser - lazy strategy
109863 -*********************************/
109864 -FORCE_INLINE
109865 -void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth)
109867 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
109868 -       const BYTE *const istart = (const BYTE *)src;
109869 -       const BYTE *ip = istart;
109870 -       const BYTE *anchor = istart;
109871 -       const BYTE *const iend = istart + srcSize;
109872 -       const BYTE *const ilimit = iend - 8;
109873 -       const BYTE *const base = ctx->base + ctx->dictLimit;
109875 -       U32 const maxSearches = 1 << ctx->params.cParams.searchLog;
109876 -       U32 const mls = ctx->params.cParams.searchLength;
109878 -       typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch);
109879 -       searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
109880 -       U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset = 0;
109882 -       /* init */
109883 -       ip += (ip == base);
109884 -       ctx->nextToUpdate3 = ctx->nextToUpdate;
109885 -       {
109886 -               U32 const maxRep = (U32)(ip - base);
109887 -               if (offset_2 > maxRep)
109888 -                       savedOffset = offset_2, offset_2 = 0;
109889 -               if (offset_1 > maxRep)
109890 -                       savedOffset = offset_1, offset_1 = 0;
109891 -       }
109893 -       /* Match Loop */
109894 -       while (ip < ilimit) {
109895 -               size_t matchLength = 0;
109896 -               size_t offset = 0;
109897 -               const BYTE *start = ip + 1;
109899 -               /* check repCode */
109900 -               if ((offset_1 > 0) & (ZSTD_read32(ip + 1) == ZSTD_read32(ip + 1 - offset_1))) {
109901 -                       /* repcode : we take it */
109902 -                       matchLength = ZSTD_count(ip + 1 + EQUAL_READ32, ip + 1 + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
109903 -                       if (depth == 0)
109904 -                               goto _storeSequence;
109905 -               }
109907 -               /* first search (depth 0) */
109908 -               {
109909 -                       size_t offsetFound = 99999999;
109910 -                       size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
109911 -                       if (ml2 > matchLength)
109912 -                               matchLength = ml2, start = ip, offset = offsetFound;
109913 -               }
109915 -               if (matchLength < EQUAL_READ32) {
109916 -                       ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
109917 -                       continue;
109918 -               }
109920 -               /* let's try to find a better solution */
109921 -               if (depth >= 1)
109922 -                       while (ip < ilimit) {
109923 -                               ip++;
109924 -                               if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
109925 -                                       size_t const mlRep = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
109926 -                                       int const gain2 = (int)(mlRep * 3);
109927 -                                       int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
109928 -                                       if ((mlRep >= EQUAL_READ32) && (gain2 > gain1))
109929 -                                               matchLength = mlRep, offset = 0, start = ip;
109930 -                               }
109931 -                               {
109932 -                                       size_t offset2 = 99999999;
109933 -                                       size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
109934 -                                       int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
109935 -                                       int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4);
109936 -                                       if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
109937 -                                               matchLength = ml2, offset = offset2, start = ip;
109938 -                                               continue; /* search a better one */
109939 -                                       }
109940 -                               }
109942 -                               /* let's find an even better one */
109943 -                               if ((depth == 2) && (ip < ilimit)) {
109944 -                                       ip++;
109945 -                                       if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
109946 -                                               size_t const ml2 = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
109947 -                                               int const gain2 = (int)(ml2 * 4);
109948 -                                               int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
109949 -                                               if ((ml2 >= EQUAL_READ32) && (gain2 > gain1))
109950 -                                                       matchLength = ml2, offset = 0, start = ip;
109951 -                                       }
109952 -                                       {
109953 -                                               size_t offset2 = 99999999;
109954 -                                               size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
109955 -                                               int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
109956 -                                               int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7);
109957 -                                               if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
109958 -                                                       matchLength = ml2, offset = offset2, start = ip;
109959 -                                                       continue;
109960 -                                               }
109961 -                                       }
109962 -                               }
109963 -                               break; /* nothing found : store previous solution */
109964 -                       }
109966 -               /* NOTE:
109967 -                * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
109968 -                * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
109969 -                * overflows the pointer, which is undefined behavior.
109970 -                */
109971 -               /* catch up */
109972 -               if (offset) {
109973 -                       while ((start > anchor) && (start > base + offset - ZSTD_REP_MOVE) &&
109974 -                              (start[-1] == (start-offset+ZSTD_REP_MOVE)[-1])) /* only search for offset within prefix */
109975 -                       {
109976 -                               start--;
109977 -                               matchLength++;
109978 -                       }
109979 -                       offset_2 = offset_1;
109980 -                       offset_1 = (U32)(offset - ZSTD_REP_MOVE);
109981 -               }
109983 -       /* store sequence */
109984 -_storeSequence:
109985 -               {
109986 -                       size_t const litLength = start - anchor;
109987 -                       ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH);
109988 -                       anchor = ip = start + matchLength;
109989 -               }
109991 -               /* check immediate repcode */
109992 -               while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
109993 -                       /* store sequence */
109994 -                       matchLength = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_2, iend) + EQUAL_READ32;
109995 -                       offset = offset_2;
109996 -                       offset_2 = offset_1;
109997 -                       offset_1 = (U32)offset; /* swap repcodes */
109998 -                       ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH);
109999 -                       ip += matchLength;
110000 -                       anchor = ip;
110001 -                       continue; /* faster when present ... (?) */
110002 -               }
110003 -       }
110005 -       /* Save reps for next block */
110006 -       ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
110007 -       ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
110009 -       /* Last Literals */
110010 -       {
110011 -               size_t const lastLLSize = iend - anchor;
110012 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
110013 -               seqStorePtr->lit += lastLLSize;
110014 -       }
110017 -static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2); }
110019 -static void ZSTD_compressBlock_lazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2); }
110021 -static void ZSTD_compressBlock_lazy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1); }
110023 -static void ZSTD_compressBlock_greedy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0); }
110025 -FORCE_INLINE
110026 -void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth)
110028 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
110029 -       const BYTE *const istart = (const BYTE *)src;
110030 -       const BYTE *ip = istart;
110031 -       const BYTE *anchor = istart;
110032 -       const BYTE *const iend = istart + srcSize;
110033 -       const BYTE *const ilimit = iend - 8;
110034 -       const BYTE *const base = ctx->base;
110035 -       const U32 dictLimit = ctx->dictLimit;
110036 -       const U32 lowestIndex = ctx->lowLimit;
110037 -       const BYTE *const prefixStart = base + dictLimit;
110038 -       const BYTE *const dictBase = ctx->dictBase;
110039 -       const BYTE *const dictEnd = dictBase + dictLimit;
110040 -       const BYTE *const dictStart = dictBase + ctx->lowLimit;
110042 -       const U32 maxSearches = 1 << ctx->params.cParams.searchLog;
110043 -       const U32 mls = ctx->params.cParams.searchLength;
110045 -       typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch);
110046 -       searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
110048 -       U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
110050 -       /* init */
110051 -       ctx->nextToUpdate3 = ctx->nextToUpdate;
110052 -       ip += (ip == prefixStart);
110054 -       /* Match Loop */
110055 -       while (ip < ilimit) {
110056 -               size_t matchLength = 0;
110057 -               size_t offset = 0;
110058 -               const BYTE *start = ip + 1;
110059 -               U32 curr = (U32)(ip - base);
110061 -               /* check repCode */
110062 -               {
110063 -                       const U32 repIndex = (U32)(curr + 1 - offset_1);
110064 -                       const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
110065 -                       const BYTE *const repMatch = repBase + repIndex;
110066 -                       if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
110067 -                               if (ZSTD_read32(ip + 1) == ZSTD_read32(repMatch)) {
110068 -                                       /* repcode detected we should take it */
110069 -                                       const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
110070 -                                       matchLength =
110071 -                                           ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
110072 -                                       if (depth == 0)
110073 -                                               goto _storeSequence;
110074 -                               }
110075 -               }
110077 -               /* first search (depth 0) */
110078 -               {
110079 -                       size_t offsetFound = 99999999;
110080 -                       size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
110081 -                       if (ml2 > matchLength)
110082 -                               matchLength = ml2, start = ip, offset = offsetFound;
110083 -               }
110085 -               if (matchLength < EQUAL_READ32) {
110086 -                       ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
110087 -                       continue;
110088 -               }
110090 -               /* let's try to find a better solution */
110091 -               if (depth >= 1)
110092 -                       while (ip < ilimit) {
110093 -                               ip++;
110094 -                               curr++;
110095 -                               /* check repCode */
110096 -                               if (offset) {
110097 -                                       const U32 repIndex = (U32)(curr - offset_1);
110098 -                                       const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
110099 -                                       const BYTE *const repMatch = repBase + repIndex;
110100 -                                       if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
110101 -                                               if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
110102 -                                                       /* repcode detected */
110103 -                                                       const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
110104 -                                                       size_t const repLength =
110105 -                                                           ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) +
110106 -                                                           EQUAL_READ32;
110107 -                                                       int const gain2 = (int)(repLength * 3);
110108 -                                                       int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
110109 -                                                       if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
110110 -                                                               matchLength = repLength, offset = 0, start = ip;
110111 -                                               }
110112 -                               }
110114 -                               /* search match, depth 1 */
110115 -                               {
110116 -                                       size_t offset2 = 99999999;
110117 -                                       size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
110118 -                                       int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
110119 -                                       int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4);
110120 -                                       if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
110121 -                                               matchLength = ml2, offset = offset2, start = ip;
110122 -                                               continue; /* search a better one */
110123 -                                       }
110124 -                               }
110126 -                               /* let's find an even better one */
110127 -                               if ((depth == 2) && (ip < ilimit)) {
110128 -                                       ip++;
110129 -                                       curr++;
110130 -                                       /* check repCode */
110131 -                                       if (offset) {
110132 -                                               const U32 repIndex = (U32)(curr - offset_1);
110133 -                                               const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
110134 -                                               const BYTE *const repMatch = repBase + repIndex;
110135 -                                               if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
110136 -                                                       if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
110137 -                                                               /* repcode detected */
110138 -                                                               const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
110139 -                                                               size_t repLength = ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend,
110140 -                                                                                                       repEnd, prefixStart) +
110141 -                                                                                  EQUAL_READ32;
110142 -                                                               int gain2 = (int)(repLength * 4);
110143 -                                                               int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
110144 -                                                               if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
110145 -                                                                       matchLength = repLength, offset = 0, start = ip;
110146 -                                                       }
110147 -                                       }
110149 -                                       /* search match, depth 2 */
110150 -                                       {
110151 -                                               size_t offset2 = 99999999;
110152 -                                               size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
110153 -                                               int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
110154 -                                               int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7);
110155 -                                               if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
110156 -                                                       matchLength = ml2, offset = offset2, start = ip;
110157 -                                                       continue;
110158 -                                               }
110159 -                                       }
110160 -                               }
110161 -                               break; /* nothing found : store previous solution */
110162 -                       }
110164 -               /* catch up */
110165 -               if (offset) {
110166 -                       U32 const matchIndex = (U32)((start - base) - (offset - ZSTD_REP_MOVE));
110167 -                       const BYTE *match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
110168 -                       const BYTE *const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
110169 -                       while ((start > anchor) && (match > mStart) && (start[-1] == match[-1])) {
110170 -                               start--;
110171 -                               match--;
110172 -                               matchLength++;
110173 -                       } /* catch up */
110174 -                       offset_2 = offset_1;
110175 -                       offset_1 = (U32)(offset - ZSTD_REP_MOVE);
110176 -               }
110178 -       /* store sequence */
110179 -       _storeSequence : {
110180 -               size_t const litLength = start - anchor;
110181 -               ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH);
110182 -               anchor = ip = start + matchLength;
110183 -       }
110185 -               /* check immediate repcode */
110186 -               while (ip <= ilimit) {
110187 -                       const U32 repIndex = (U32)((ip - base) - offset_2);
110188 -                       const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
110189 -                       const BYTE *const repMatch = repBase + repIndex;
110190 -                       if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
110191 -                               if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
110192 -                                       /* repcode detected we should take it */
110193 -                                       const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
110194 -                                       matchLength =
110195 -                                           ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
110196 -                                       offset = offset_2;
110197 -                                       offset_2 = offset_1;
110198 -                                       offset_1 = (U32)offset; /* swap offset history */
110199 -                                       ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH);
110200 -                                       ip += matchLength;
110201 -                                       anchor = ip;
110202 -                                       continue; /* faster when present ... (?) */
110203 -                               }
110204 -                       break;
110205 -               }
110206 -       }
110208 -       /* Save reps for next block */
110209 -       ctx->repToConfirm[0] = offset_1;
110210 -       ctx->repToConfirm[1] = offset_2;
110212 -       /* Last Literals */
110213 -       {
110214 -               size_t const lastLLSize = iend - anchor;
110215 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
110216 -               seqStorePtr->lit += lastLLSize;
110217 -       }
110220 -void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0); }
110222 -static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
110224 -       ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1);
110227 -static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
110229 -       ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2);
110232 -static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
110234 -       ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2);
110237 -/* The optimal parser */
110238 -#include "zstd_opt.h"
110240 -static void ZSTD_compressBlock_btopt(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
110242 -#ifdef ZSTD_OPT_H_91842398743
110243 -       ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0);
110244 -#else
110245 -       (void)ctx;
110246 -       (void)src;
110247 -       (void)srcSize;
110248 -       return;
110249 -#endif
110252 -static void ZSTD_compressBlock_btopt2(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
110254 -#ifdef ZSTD_OPT_H_91842398743
110255 -       ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1);
110256 -#else
110257 -       (void)ctx;
110258 -       (void)src;
110259 -       (void)srcSize;
110260 -       return;
110261 -#endif
110264 -static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
110266 -#ifdef ZSTD_OPT_H_91842398743
110267 -       ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0);
110268 -#else
110269 -       (void)ctx;
110270 -       (void)src;
110271 -       (void)srcSize;
110272 -       return;
110273 -#endif
110276 -static void ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
110278 -#ifdef ZSTD_OPT_H_91842398743
110279 -       ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1);
110280 -#else
110281 -       (void)ctx;
110282 -       (void)src;
110283 -       (void)srcSize;
110284 -       return;
110285 -#endif
110288 -typedef void (*ZSTD_blockCompressor)(ZSTD_CCtx *ctx, const void *src, size_t srcSize);
110290 -static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
110292 -       static const ZSTD_blockCompressor blockCompressor[2][8] = {
110293 -           {ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2,
110294 -            ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btopt2},
110295 -           {ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict,
110296 -            ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btopt2_extDict}};
110298 -       return blockCompressor[extDict][(U32)strat];
110301 -static size_t ZSTD_compressBlock_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
110303 -       ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit);
110304 -       const BYTE *const base = zc->base;
110305 -       const BYTE *const istart = (const BYTE *)src;
110306 -       const U32 curr = (U32)(istart - base);
110307 -       if (srcSize < MIN_CBLOCK_SIZE + ZSTD_blockHeaderSize + 1)
110308 -               return 0; /* don't even attempt compression below a certain srcSize */
110309 -       ZSTD_resetSeqStore(&(zc->seqStore));
110310 -       if (curr > zc->nextToUpdate + 384)
110311 -               zc->nextToUpdate = curr - MIN(192, (U32)(curr - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */
110312 -       blockCompressor(zc, src, srcSize);
110313 -       return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize);
110316 -/*! ZSTD_compress_generic() :
110317 -*   Compress a chunk of data into one or multiple blocks.
110318 -*   All blocks will be terminated, all input will be consumed.
110319 -*   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
110320 -*   Frame is supposed already started (header already produced)
110321 -*   @return : compressed size, or an error code
110323 -static size_t ZSTD_compress_generic(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastFrameChunk)
110325 -       size_t blockSize = cctx->blockSize;
110326 -       size_t remaining = srcSize;
110327 -       const BYTE *ip = (const BYTE *)src;
110328 -       BYTE *const ostart = (BYTE *)dst;
110329 -       BYTE *op = ostart;
110330 -       U32 const maxDist = 1 << cctx->params.cParams.windowLog;
110332 -       if (cctx->params.fParams.checksumFlag && srcSize)
110333 -               xxh64_update(&cctx->xxhState, src, srcSize);
110335 -       while (remaining) {
110336 -               U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
110337 -               size_t cSize;
110339 -               if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
110340 -                       return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */
110341 -               if (remaining < blockSize)
110342 -                       blockSize = remaining;
110344 -               /* preemptive overflow correction */
110345 -               if (cctx->lowLimit > (3U << 29)) {
110346 -                       U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1;
110347 -                       U32 const curr = (U32)(ip - cctx->base);
110348 -                       U32 const newCurr = (curr & cycleMask) + (1 << cctx->params.cParams.windowLog);
110349 -                       U32 const correction = curr - newCurr;
110350 -                       ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30);
110351 -                       ZSTD_reduceIndex(cctx, correction);
110352 -                       cctx->base += correction;
110353 -                       cctx->dictBase += correction;
110354 -                       cctx->lowLimit -= correction;
110355 -                       cctx->dictLimit -= correction;
110356 -                       if (cctx->nextToUpdate < correction)
110357 -                               cctx->nextToUpdate = 0;
110358 -                       else
110359 -                               cctx->nextToUpdate -= correction;
110360 -               }
110362 -               if ((U32)(ip + blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) {
110363 -                       /* enforce maxDist */
110364 -                       U32 const newLowLimit = (U32)(ip + blockSize - cctx->base) - maxDist;
110365 -                       if (cctx->lowLimit < newLowLimit)
110366 -                               cctx->lowLimit = newLowLimit;
110367 -                       if (cctx->dictLimit < cctx->lowLimit)
110368 -                               cctx->dictLimit = cctx->lowLimit;
110369 -               }
110371 -               cSize = ZSTD_compressBlock_internal(cctx, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, ip, blockSize);
110372 -               if (ZSTD_isError(cSize))
110373 -                       return cSize;
110375 -               if (cSize == 0) { /* block is not compressible */
110376 -                       U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw) << 1) + (U32)(blockSize << 3);
110377 -                       if (blockSize + ZSTD_blockHeaderSize > dstCapacity)
110378 -                               return ERROR(dstSize_tooSmall);
110379 -                       ZSTD_writeLE32(op, cBlockHeader24); /* no pb, 4th byte will be overwritten */
110380 -                       memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
110381 -                       cSize = ZSTD_blockHeaderSize + blockSize;
110382 -               } else {
110383 -                       U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed) << 1) + (U32)(cSize << 3);
110384 -                       ZSTD_writeLE24(op, cBlockHeader24);
110385 -                       cSize += ZSTD_blockHeaderSize;
110386 -               }
110388 -               remaining -= blockSize;
110389 -               dstCapacity -= cSize;
110390 -               ip += blockSize;
110391 -               op += cSize;
110392 -       }
110394 -       if (lastFrameChunk && (op > ostart))
110395 -               cctx->stage = ZSTDcs_ending;
110396 -       return op - ostart;
110399 -static size_t ZSTD_writeFrameHeader(void *dst, size_t dstCapacity, ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID)
110401 -       BYTE *const op = (BYTE *)dst;
110402 -       U32 const dictIDSizeCode = (dictID > 0) + (dictID >= 256) + (dictID >= 65536); /* 0-3 */
110403 -       U32 const checksumFlag = params.fParams.checksumFlag > 0;
110404 -       U32 const windowSize = 1U << params.cParams.windowLog;
110405 -       U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
110406 -       BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
110407 -       U32 const fcsCode =
110408 -           params.fParams.contentSizeFlag ? (pledgedSrcSize >= 256) + (pledgedSrcSize >= 65536 + 256) + (pledgedSrcSize >= 0xFFFFFFFFU) : 0; /* 0-3 */
110409 -       BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6));
110410 -       size_t pos;
110412 -       if (dstCapacity < ZSTD_frameHeaderSize_max)
110413 -               return ERROR(dstSize_tooSmall);
110415 -       ZSTD_writeLE32(dst, ZSTD_MAGICNUMBER);
110416 -       op[4] = frameHeaderDecriptionByte;
110417 -       pos = 5;
110418 -       if (!singleSegment)
110419 -               op[pos++] = windowLogByte;
110420 -       switch (dictIDSizeCode) {
110421 -       default: /* impossible */
110422 -       case 0: break;
110423 -       case 1:
110424 -               op[pos] = (BYTE)(dictID);
110425 -               pos++;
110426 -               break;
110427 -       case 2:
110428 -               ZSTD_writeLE16(op + pos, (U16)dictID);
110429 -               pos += 2;
110430 -               break;
110431 -       case 3:
110432 -               ZSTD_writeLE32(op + pos, dictID);
110433 -               pos += 4;
110434 -               break;
110435 -       }
110436 -       switch (fcsCode) {
110437 -       default: /* impossible */
110438 -       case 0:
110439 -               if (singleSegment)
110440 -                       op[pos++] = (BYTE)(pledgedSrcSize);
110441 -               break;
110442 -       case 1:
110443 -               ZSTD_writeLE16(op + pos, (U16)(pledgedSrcSize - 256));
110444 -               pos += 2;
110445 -               break;
110446 -       case 2:
110447 -               ZSTD_writeLE32(op + pos, (U32)(pledgedSrcSize));
110448 -               pos += 4;
110449 -               break;
110450 -       case 3:
110451 -               ZSTD_writeLE64(op + pos, (U64)(pledgedSrcSize));
110452 -               pos += 8;
110453 -               break;
110454 -       }
110455 -       return pos;
110458 -static size_t ZSTD_compressContinue_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 frame, U32 lastFrameChunk)
110460 -       const BYTE *const ip = (const BYTE *)src;
110461 -       size_t fhSize = 0;
110463 -       if (cctx->stage == ZSTDcs_created)
110464 -               return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */
110466 -       if (frame && (cctx->stage == ZSTDcs_init)) {
110467 -               fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, cctx->frameContentSize, cctx->dictID);
110468 -               if (ZSTD_isError(fhSize))
110469 -                       return fhSize;
110470 -               dstCapacity -= fhSize;
110471 -               dst = (char *)dst + fhSize;
110472 -               cctx->stage = ZSTDcs_ongoing;
110473 -       }
110475 -       /* Check if blocks follow each other */
110476 -       if (src != cctx->nextSrc) {
110477 -               /* not contiguous */
110478 -               ptrdiff_t const delta = cctx->nextSrc - ip;
110479 -               cctx->lowLimit = cctx->dictLimit;
110480 -               cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base);
110481 -               cctx->dictBase = cctx->base;
110482 -               cctx->base -= delta;
110483 -               cctx->nextToUpdate = cctx->dictLimit;
110484 -               if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE)
110485 -                       cctx->lowLimit = cctx->dictLimit; /* too small extDict */
110486 -       }
110488 -       /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
110489 -       if ((ip + srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) {
110490 -               ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase;
110491 -               U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx;
110492 -               cctx->lowLimit = lowLimitMax;
110493 -       }
110495 -       cctx->nextSrc = ip + srcSize;
110497 -       if (srcSize) {
110498 -               size_t const cSize = frame ? ZSTD_compress_generic(cctx, dst, dstCapacity, src, srcSize, lastFrameChunk)
110499 -                                          : ZSTD_compressBlock_internal(cctx, dst, dstCapacity, src, srcSize);
110500 -               if (ZSTD_isError(cSize))
110501 -                       return cSize;
110502 -               return cSize + fhSize;
110503 -       } else
110504 -               return fhSize;
110507 -size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
110509 -       return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0);
110512 -size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx) { return MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << cctx->params.cParams.windowLog); }
110514 -size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
110516 -       size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx);
110517 -       if (srcSize > blockSizeMax)
110518 -               return ERROR(srcSize_wrong);
110519 -       return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0);
110522 -/*! ZSTD_loadDictionaryContent() :
110523 - *  @return : 0, or an error code
110524 - */
110525 -static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx *zc, const void *src, size_t srcSize)
110527 -       const BYTE *const ip = (const BYTE *)src;
110528 -       const BYTE *const iend = ip + srcSize;
110530 -       /* input becomes curr prefix */
110531 -       zc->lowLimit = zc->dictLimit;
110532 -       zc->dictLimit = (U32)(zc->nextSrc - zc->base);
110533 -       zc->dictBase = zc->base;
110534 -       zc->base += ip - zc->nextSrc;
110535 -       zc->nextToUpdate = zc->dictLimit;
110536 -       zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base);
110538 -       zc->nextSrc = iend;
110539 -       if (srcSize <= HASH_READ_SIZE)
110540 -               return 0;
110542 -       switch (zc->params.cParams.strategy) {
110543 -       case ZSTD_fast: ZSTD_fillHashTable(zc, iend, zc->params.cParams.searchLength); break;
110545 -       case ZSTD_dfast: ZSTD_fillDoubleHashTable(zc, iend, zc->params.cParams.searchLength); break;
110547 -       case ZSTD_greedy:
110548 -       case ZSTD_lazy:
110549 -       case ZSTD_lazy2:
110550 -               if (srcSize >= HASH_READ_SIZE)
110551 -                       ZSTD_insertAndFindFirstIndex(zc, iend - HASH_READ_SIZE, zc->params.cParams.searchLength);
110552 -               break;
110554 -       case ZSTD_btlazy2:
110555 -       case ZSTD_btopt:
110556 -       case ZSTD_btopt2:
110557 -               if (srcSize >= HASH_READ_SIZE)
110558 -                       ZSTD_updateTree(zc, iend - HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength);
110559 -               break;
110561 -       default:
110562 -               return ERROR(GENERIC); /* strategy doesn't exist; impossible */
110563 -       }
110565 -       zc->nextToUpdate = (U32)(iend - zc->base);
110566 -       return 0;
110569 -/* Dictionaries that assign zero probability to symbols that show up causes problems
110570 -   when FSE encoding.  Refuse dictionaries that assign zero probability to symbols
110571 -   that we may encounter during compression.
110572 -   NOTE: This behavior is not standard and could be improved in the future. */
110573 -static size_t ZSTD_checkDictNCount(short *normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
110575 -       U32 s;
110576 -       if (dictMaxSymbolValue < maxSymbolValue)
110577 -               return ERROR(dictionary_corrupted);
110578 -       for (s = 0; s <= maxSymbolValue; ++s) {
110579 -               if (normalizedCounter[s] == 0)
110580 -                       return ERROR(dictionary_corrupted);
110581 -       }
110582 -       return 0;
110585 -/* Dictionary format :
110586 - * See :
110587 - * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
110588 - */
110589 -/*! ZSTD_loadZstdDictionary() :
110590 - * @return : 0, or an error code
110591 - *  assumptions : magic number supposed already checked
110592 - *                dictSize supposed > 8
110593 - */
110594 -static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
110596 -       const BYTE *dictPtr = (const BYTE *)dict;
110597 -       const BYTE *const dictEnd = dictPtr + dictSize;
110598 -       short offcodeNCount[MaxOff + 1];
110599 -       unsigned offcodeMaxValue = MaxOff;
110601 -       dictPtr += 4; /* skip magic number */
110602 -       cctx->dictID = cctx->params.fParams.noDictIDFlag ? 0 : ZSTD_readLE32(dictPtr);
110603 -       dictPtr += 4;
110605 -       {
110606 -               size_t const hufHeaderSize = HUF_readCTable_wksp(cctx->hufTable, 255, dictPtr, dictEnd - dictPtr, cctx->tmpCounters, sizeof(cctx->tmpCounters));
110607 -               if (HUF_isError(hufHeaderSize))
110608 -                       return ERROR(dictionary_corrupted);
110609 -               dictPtr += hufHeaderSize;
110610 -       }
110612 -       {
110613 -               unsigned offcodeLog;
110614 -               size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr);
110615 -               if (FSE_isError(offcodeHeaderSize))
110616 -                       return ERROR(dictionary_corrupted);
110617 -               if (offcodeLog > OffFSELog)
110618 -                       return ERROR(dictionary_corrupted);
110619 -               /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
110620 -               CHECK_E(FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
110621 -                       dictionary_corrupted);
110622 -               dictPtr += offcodeHeaderSize;
110623 -       }
110625 -       {
110626 -               short matchlengthNCount[MaxML + 1];
110627 -               unsigned matchlengthMaxValue = MaxML, matchlengthLog;
110628 -               size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr);
110629 -               if (FSE_isError(matchlengthHeaderSize))
110630 -                       return ERROR(dictionary_corrupted);
110631 -               if (matchlengthLog > MLFSELog)
110632 -                       return ERROR(dictionary_corrupted);
110633 -               /* Every match length code must have non-zero probability */
110634 -               CHECK_F(ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
110635 -               CHECK_E(
110636 -                   FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
110637 -                   dictionary_corrupted);
110638 -               dictPtr += matchlengthHeaderSize;
110639 -       }
110641 -       {
110642 -               short litlengthNCount[MaxLL + 1];
110643 -               unsigned litlengthMaxValue = MaxLL, litlengthLog;
110644 -               size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr);
110645 -               if (FSE_isError(litlengthHeaderSize))
110646 -                       return ERROR(dictionary_corrupted);
110647 -               if (litlengthLog > LLFSELog)
110648 -                       return ERROR(dictionary_corrupted);
110649 -               /* Every literal length code must have non-zero probability */
110650 -               CHECK_F(ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
110651 -               CHECK_E(FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
110652 -                       dictionary_corrupted);
110653 -               dictPtr += litlengthHeaderSize;
110654 -       }
110656 -       if (dictPtr + 12 > dictEnd)
110657 -               return ERROR(dictionary_corrupted);
110658 -       cctx->rep[0] = ZSTD_readLE32(dictPtr + 0);
110659 -       cctx->rep[1] = ZSTD_readLE32(dictPtr + 4);
110660 -       cctx->rep[2] = ZSTD_readLE32(dictPtr + 8);
110661 -       dictPtr += 12;
110663 -       {
110664 -               size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
110665 -               U32 offcodeMax = MaxOff;
110666 -               if (dictContentSize <= ((U32)-1) - 128 KB) {
110667 -                       U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
110668 -                       offcodeMax = ZSTD_highbit32(maxOffset);              /* Calculate minimum offset code required to represent maxOffset */
110669 -               }
110670 -               /* All offset values <= dictContentSize + 128 KB must be representable */
110671 -               CHECK_F(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
110672 -               /* All repCodes must be <= dictContentSize and != 0*/
110673 -               {
110674 -                       U32 u;
110675 -                       for (u = 0; u < 3; u++) {
110676 -                               if (cctx->rep[u] == 0)
110677 -                                       return ERROR(dictionary_corrupted);
110678 -                               if (cctx->rep[u] > dictContentSize)
110679 -                                       return ERROR(dictionary_corrupted);
110680 -                       }
110681 -               }
110683 -               cctx->flagStaticTables = 1;
110684 -               cctx->flagStaticHufTable = HUF_repeat_valid;
110685 -               return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize);
110686 -       }
110689 -/** ZSTD_compress_insertDictionary() :
110690 -*   @return : 0, or an error code */
110691 -static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
110693 -       if ((dict == NULL) || (dictSize <= 8))
110694 -               return 0;
110696 -       /* dict as pure content */
110697 -       if ((ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC) || (cctx->forceRawDict))
110698 -               return ZSTD_loadDictionaryContent(cctx, dict, dictSize);
110700 -       /* dict as zstd dictionary */
110701 -       return ZSTD_loadZstdDictionary(cctx, dict, dictSize);
110704 -/*! ZSTD_compressBegin_internal() :
110705 -*   @return : 0, or an error code */
110706 -static size_t ZSTD_compressBegin_internal(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, U64 pledgedSrcSize)
110708 -       ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue;
110709 -       CHECK_F(ZSTD_resetCCtx_advanced(cctx, params, pledgedSrcSize, crp));
110710 -       return ZSTD_compress_insertDictionary(cctx, dict, dictSize);
110713 -/*! ZSTD_compressBegin_advanced() :
110714 -*   @return : 0, or an error code */
110715 -size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
110717 -       /* compression parameters verification and optimization */
110718 -       CHECK_F(ZSTD_checkCParams(params.cParams));
110719 -       return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, pledgedSrcSize);
110722 -size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, int compressionLevel)
110724 -       ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
110725 -       return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, 0);
110728 -size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel) { return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); }
110730 -/*! ZSTD_writeEpilogue() :
110731 -*   Ends a frame.
110732 -*   @return : nb of bytes written into dst (or an error code) */
110733 -static size_t ZSTD_writeEpilogue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity)
110735 -       BYTE *const ostart = (BYTE *)dst;
110736 -       BYTE *op = ostart;
110737 -       size_t fhSize = 0;
110739 -       if (cctx->stage == ZSTDcs_created)
110740 -               return ERROR(stage_wrong); /* init missing */
110742 -       /* special case : empty frame */
110743 -       if (cctx->stage == ZSTDcs_init) {
110744 -               fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, 0, 0);
110745 -               if (ZSTD_isError(fhSize))
110746 -                       return fhSize;
110747 -               dstCapacity -= fhSize;
110748 -               op += fhSize;
110749 -               cctx->stage = ZSTDcs_ongoing;
110750 -       }
110752 -       if (cctx->stage != ZSTDcs_ending) {
110753 -               /* write one last empty block, make it the "last" block */
110754 -               U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw) << 1) + 0;
110755 -               if (dstCapacity < 4)
110756 -                       return ERROR(dstSize_tooSmall);
110757 -               ZSTD_writeLE32(op, cBlockHeader24);
110758 -               op += ZSTD_blockHeaderSize;
110759 -               dstCapacity -= ZSTD_blockHeaderSize;
110760 -       }
110762 -       if (cctx->params.fParams.checksumFlag) {
110763 -               U32 const checksum = (U32)xxh64_digest(&cctx->xxhState);
110764 -               if (dstCapacity < 4)
110765 -                       return ERROR(dstSize_tooSmall);
110766 -               ZSTD_writeLE32(op, checksum);
110767 -               op += 4;
110768 -       }
110770 -       cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
110771 -       return op - ostart;
110774 -size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
110776 -       size_t endResult;
110777 -       size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1);
110778 -       if (ZSTD_isError(cSize))
110779 -               return cSize;
110780 -       endResult = ZSTD_writeEpilogue(cctx, (char *)dst + cSize, dstCapacity - cSize);
110781 -       if (ZSTD_isError(endResult))
110782 -               return endResult;
110783 -       return cSize + endResult;
110786 -static size_t ZSTD_compress_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
110787 -                                    ZSTD_parameters params)
110789 -       CHECK_F(ZSTD_compressBegin_internal(cctx, dict, dictSize, params, srcSize));
110790 -       return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
110793 -size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
110794 -                              ZSTD_parameters params)
110796 -       return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
110799 -size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, ZSTD_parameters params)
110801 -       return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, NULL, 0, params);
110804 -/* =====  Dictionary API  ===== */
110806 -struct ZSTD_CDict_s {
110807 -       void *dictBuffer;
110808 -       const void *dictContent;
110809 -       size_t dictContentSize;
110810 -       ZSTD_CCtx *refContext;
110811 -}; /* typedef'd tp ZSTD_CDict within "zstd.h" */
110813 -size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams) { return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CDict)); }
110815 -static ZSTD_CDict *ZSTD_createCDict_advanced(const void *dictBuffer, size_t dictSize, unsigned byReference, ZSTD_parameters params, ZSTD_customMem customMem)
110817 -       if (!customMem.customAlloc || !customMem.customFree)
110818 -               return NULL;
110820 -       {
110821 -               ZSTD_CDict *const cdict = (ZSTD_CDict *)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
110822 -               ZSTD_CCtx *const cctx = ZSTD_createCCtx_advanced(customMem);
110824 -               if (!cdict || !cctx) {
110825 -                       ZSTD_free(cdict, customMem);
110826 -                       ZSTD_freeCCtx(cctx);
110827 -                       return NULL;
110828 -               }
110830 -               if ((byReference) || (!dictBuffer) || (!dictSize)) {
110831 -                       cdict->dictBuffer = NULL;
110832 -                       cdict->dictContent = dictBuffer;
110833 -               } else {
110834 -                       void *const internalBuffer = ZSTD_malloc(dictSize, customMem);
110835 -                       if (!internalBuffer) {
110836 -                               ZSTD_free(cctx, customMem);
110837 -                               ZSTD_free(cdict, customMem);
110838 -                               return NULL;
110839 -                       }
110840 -                       memcpy(internalBuffer, dictBuffer, dictSize);
110841 -                       cdict->dictBuffer = internalBuffer;
110842 -                       cdict->dictContent = internalBuffer;
110843 -               }
110845 -               {
110846 -                       size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0);
110847 -                       if (ZSTD_isError(errorCode)) {
110848 -                               ZSTD_free(cdict->dictBuffer, customMem);
110849 -                               ZSTD_free(cdict, customMem);
110850 -                               ZSTD_freeCCtx(cctx);
110851 -                               return NULL;
110852 -                       }
110853 -               }
110855 -               cdict->refContext = cctx;
110856 -               cdict->dictContentSize = dictSize;
110857 -               return cdict;
110858 -       }
110861 -ZSTD_CDict *ZSTD_initCDict(const void *dict, size_t dictSize, ZSTD_parameters params, void *workspace, size_t workspaceSize)
110863 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
110864 -       return ZSTD_createCDict_advanced(dict, dictSize, 1, params, stackMem);
110867 -size_t ZSTD_freeCDict(ZSTD_CDict *cdict)
110869 -       if (cdict == NULL)
110870 -               return 0; /* support free on NULL */
110871 -       {
110872 -               ZSTD_customMem const cMem = cdict->refContext->customMem;
110873 -               ZSTD_freeCCtx(cdict->refContext);
110874 -               ZSTD_free(cdict->dictBuffer, cMem);
110875 -               ZSTD_free(cdict, cMem);
110876 -               return 0;
110877 -       }
110880 -static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict *cdict) { return ZSTD_getParamsFromCCtx(cdict->refContext); }
110882 -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize)
110884 -       if (cdict->dictContentSize)
110885 -               CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize))
110886 -       else {
110887 -               ZSTD_parameters params = cdict->refContext->params;
110888 -               params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
110889 -               CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, params, pledgedSrcSize));
110890 -       }
110891 -       return 0;
110894 -/*! ZSTD_compress_usingCDict() :
110895 -*   Compression using a digested Dictionary.
110896 -*   Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
110897 -*   Note that compression level is decided during dictionary creation */
110898 -size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_CDict *cdict)
110900 -       CHECK_F(ZSTD_compressBegin_usingCDict(cctx, cdict, srcSize));
110902 -       if (cdict->refContext->params.fParams.contentSizeFlag == 1) {
110903 -               cctx->params.fParams.contentSizeFlag = 1;
110904 -               cctx->frameContentSize = srcSize;
110905 -       } else {
110906 -               cctx->params.fParams.contentSizeFlag = 0;
110907 -       }
110909 -       return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
110912 -/* ******************************************************************
110913 -*  Streaming
110914 -********************************************************************/
110916 -typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage;
110918 -struct ZSTD_CStream_s {
110919 -       ZSTD_CCtx *cctx;
110920 -       ZSTD_CDict *cdictLocal;
110921 -       const ZSTD_CDict *cdict;
110922 -       char *inBuff;
110923 -       size_t inBuffSize;
110924 -       size_t inToCompress;
110925 -       size_t inBuffPos;
110926 -       size_t inBuffTarget;
110927 -       size_t blockSize;
110928 -       char *outBuff;
110929 -       size_t outBuffSize;
110930 -       size_t outBuffContentSize;
110931 -       size_t outBuffFlushedSize;
110932 -       ZSTD_cStreamStage stage;
110933 -       U32 checksum;
110934 -       U32 frameEnded;
110935 -       U64 pledgedSrcSize;
110936 -       U64 inputProcessed;
110937 -       ZSTD_parameters params;
110938 -       ZSTD_customMem customMem;
110939 -}; /* typedef'd to ZSTD_CStream within "zstd.h" */
110941 -size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams)
110943 -       size_t const inBuffSize = (size_t)1 << cParams.windowLog;
110944 -       size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, inBuffSize);
110945 -       size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
110947 -       return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize);
110950 -ZSTD_CStream *ZSTD_createCStream_advanced(ZSTD_customMem customMem)
110952 -       ZSTD_CStream *zcs;
110954 -       if (!customMem.customAlloc || !customMem.customFree)
110955 -               return NULL;
110957 -       zcs = (ZSTD_CStream *)ZSTD_malloc(sizeof(ZSTD_CStream), customMem);
110958 -       if (zcs == NULL)
110959 -               return NULL;
110960 -       memset(zcs, 0, sizeof(ZSTD_CStream));
110961 -       memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem));
110962 -       zcs->cctx = ZSTD_createCCtx_advanced(customMem);
110963 -       if (zcs->cctx == NULL) {
110964 -               ZSTD_freeCStream(zcs);
110965 -               return NULL;
110966 -       }
110967 -       return zcs;
110970 -size_t ZSTD_freeCStream(ZSTD_CStream *zcs)
110972 -       if (zcs == NULL)
110973 -               return 0; /* support free on NULL */
110974 -       {
110975 -               ZSTD_customMem const cMem = zcs->customMem;
110976 -               ZSTD_freeCCtx(zcs->cctx);
110977 -               zcs->cctx = NULL;
110978 -               ZSTD_freeCDict(zcs->cdictLocal);
110979 -               zcs->cdictLocal = NULL;
110980 -               ZSTD_free(zcs->inBuff, cMem);
110981 -               zcs->inBuff = NULL;
110982 -               ZSTD_free(zcs->outBuff, cMem);
110983 -               zcs->outBuff = NULL;
110984 -               ZSTD_free(zcs, cMem);
110985 -               return 0;
110986 -       }
110989 -/*======   Initialization   ======*/
110991 -size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
110992 -size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */; }
110994 -static size_t ZSTD_resetCStream_internal(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
110996 -       if (zcs->inBuffSize == 0)
110997 -               return ERROR(stage_wrong); /* zcs has not been init at least once => can't reset */
110999 -       if (zcs->cdict)
111000 -               CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize))
111001 -       else
111002 -               CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize));
111004 -       zcs->inToCompress = 0;
111005 -       zcs->inBuffPos = 0;
111006 -       zcs->inBuffTarget = zcs->blockSize;
111007 -       zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
111008 -       zcs->stage = zcss_load;
111009 -       zcs->frameEnded = 0;
111010 -       zcs->pledgedSrcSize = pledgedSrcSize;
111011 -       zcs->inputProcessed = 0;
111012 -       return 0; /* ready to go */
111015 -size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
111018 -       zcs->params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
111020 -       return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
111023 -static size_t ZSTD_initCStream_advanced(ZSTD_CStream *zcs, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
111025 -       /* allocate buffers */
111026 -       {
111027 -               size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog;
111028 -               if (zcs->inBuffSize < neededInBuffSize) {
111029 -                       zcs->inBuffSize = neededInBuffSize;
111030 -                       ZSTD_free(zcs->inBuff, zcs->customMem);
111031 -                       zcs->inBuff = (char *)ZSTD_malloc(neededInBuffSize, zcs->customMem);
111032 -                       if (zcs->inBuff == NULL)
111033 -                               return ERROR(memory_allocation);
111034 -               }
111035 -               zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize);
111036 -       }
111037 -       if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize) + 1) {
111038 -               zcs->outBuffSize = ZSTD_compressBound(zcs->blockSize) + 1;
111039 -               ZSTD_free(zcs->outBuff, zcs->customMem);
111040 -               zcs->outBuff = (char *)ZSTD_malloc(zcs->outBuffSize, zcs->customMem);
111041 -               if (zcs->outBuff == NULL)
111042 -                       return ERROR(memory_allocation);
111043 -       }
111045 -       if (dict && dictSize >= 8) {
111046 -               ZSTD_freeCDict(zcs->cdictLocal);
111047 -               zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0, params, zcs->customMem);
111048 -               if (zcs->cdictLocal == NULL)
111049 -                       return ERROR(memory_allocation);
111050 -               zcs->cdict = zcs->cdictLocal;
111051 -       } else
111052 -               zcs->cdict = NULL;
111054 -       zcs->checksum = params.fParams.checksumFlag > 0;
111055 -       zcs->params = params;
111057 -       return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
111060 -ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize)
111062 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
111063 -       ZSTD_CStream *const zcs = ZSTD_createCStream_advanced(stackMem);
111064 -       if (zcs) {
111065 -               size_t const code = ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize);
111066 -               if (ZSTD_isError(code)) {
111067 -                       return NULL;
111068 -               }
111069 -       }
111070 -       return zcs;
111073 -ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize)
111075 -       ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict);
111076 -       ZSTD_CStream *const zcs = ZSTD_initCStream(params, pledgedSrcSize, workspace, workspaceSize);
111077 -       if (zcs) {
111078 -               zcs->cdict = cdict;
111079 -               if (ZSTD_isError(ZSTD_resetCStream_internal(zcs, pledgedSrcSize))) {
111080 -                       return NULL;
111081 -               }
111082 -       }
111083 -       return zcs;
111086 -/*======   Compression   ======*/
111088 -typedef enum { zsf_gather, zsf_flush, zsf_end } ZSTD_flush_e;
111090 -ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
111092 -       size_t const length = MIN(dstCapacity, srcSize);
111093 -       memcpy(dst, src, length);
111094 -       return length;
111097 -static size_t ZSTD_compressStream_generic(ZSTD_CStream *zcs, void *dst, size_t *dstCapacityPtr, const void *src, size_t *srcSizePtr, ZSTD_flush_e const flush)
111099 -       U32 someMoreWork = 1;
111100 -       const char *const istart = (const char *)src;
111101 -       const char *const iend = istart + *srcSizePtr;
111102 -       const char *ip = istart;
111103 -       char *const ostart = (char *)dst;
111104 -       char *const oend = ostart + *dstCapacityPtr;
111105 -       char *op = ostart;
111107 -       while (someMoreWork) {
111108 -               switch (zcs->stage) {
111109 -               case zcss_init:
111110 -                       return ERROR(init_missing); /* call ZBUFF_compressInit() first ! */
111112 -               case zcss_load:
111113 -                       /* complete inBuffer */
111114 -                       {
111115 -                               size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
111116 -                               size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend - ip);
111117 -                               zcs->inBuffPos += loaded;
111118 -                               ip += loaded;
111119 -                               if ((zcs->inBuffPos == zcs->inToCompress) || (!flush && (toLoad != loaded))) {
111120 -                                       someMoreWork = 0;
111121 -                                       break; /* not enough input to get a full block : stop there, wait for more */
111122 -                               }
111123 -                       }
111124 -                       /* compress curr block (note : this stage cannot be stopped in the middle) */
111125 -                       {
111126 -                               void *cDst;
111127 -                               size_t cSize;
111128 -                               size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
111129 -                               size_t oSize = oend - op;
111130 -                               if (oSize >= ZSTD_compressBound(iSize))
111131 -                                       cDst = op; /* compress directly into output buffer (avoid flush stage) */
111132 -                               else
111133 -                                       cDst = zcs->outBuff, oSize = zcs->outBuffSize;
111134 -                               cSize = (flush == zsf_end) ? ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize)
111135 -                                                          : ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize);
111136 -                               if (ZSTD_isError(cSize))
111137 -                                       return cSize;
111138 -                               if (flush == zsf_end)
111139 -                                       zcs->frameEnded = 1;
111140 -                               /* prepare next block */
111141 -                               zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
111142 -                               if (zcs->inBuffTarget > zcs->inBuffSize)
111143 -                                       zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; /* note : inBuffSize >= blockSize */
111144 -                               zcs->inToCompress = zcs->inBuffPos;
111145 -                               if (cDst == op) {
111146 -                                       op += cSize;
111147 -                                       break;
111148 -                               } /* no need to flush */
111149 -                               zcs->outBuffContentSize = cSize;
111150 -                               zcs->outBuffFlushedSize = 0;
111151 -                               zcs->stage = zcss_flush; /* pass-through to flush stage */
111152 -                       }
111153 -                       fallthrough;
111155 -               case zcss_flush: {
111156 -                       size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
111157 -                       size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
111158 -                       op += flushed;
111159 -                       zcs->outBuffFlushedSize += flushed;
111160 -                       if (toFlush != flushed) {
111161 -                               someMoreWork = 0;
111162 -                               break;
111163 -                       } /* dst too small to store flushed data : stop there */
111164 -                       zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
111165 -                       zcs->stage = zcss_load;
111166 -                       break;
111167 -               }
111169 -               case zcss_final:
111170 -                       someMoreWork = 0; /* do nothing */
111171 -                       break;
111173 -               default:
111174 -                       return ERROR(GENERIC); /* impossible */
111175 -               }
111176 -       }
111178 -       *srcSizePtr = ip - istart;
111179 -       *dstCapacityPtr = op - ostart;
111180 -       zcs->inputProcessed += *srcSizePtr;
111181 -       if (zcs->frameEnded)
111182 -               return 0;
111183 -       {
111184 -               size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
111185 -               if (hintInSize == 0)
111186 -                       hintInSize = zcs->blockSize;
111187 -               return hintInSize;
111188 -       }
111191 -size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, ZSTD_inBuffer *input)
111193 -       size_t sizeRead = input->size - input->pos;
111194 -       size_t sizeWritten = output->size - output->pos;
111195 -       size_t const result =
111196 -           ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, (const char *)(input->src) + input->pos, &sizeRead, zsf_gather);
111197 -       input->pos += sizeRead;
111198 -       output->pos += sizeWritten;
111199 -       return result;
111202 -/*======   Finalize   ======*/
111204 -/*! ZSTD_flushStream() :
111205 -*   @return : amount of data remaining to flush */
111206 -size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output)
111208 -       size_t srcSize = 0;
111209 -       size_t sizeWritten = output->size - output->pos;
111210 -       size_t const result = ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, &srcSize,
111211 -                                                         &srcSize, /* use a valid src address instead of NULL */
111212 -                                                         zsf_flush);
111213 -       output->pos += sizeWritten;
111214 -       if (ZSTD_isError(result))
111215 -               return result;
111216 -       return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */
111219 -size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output)
111221 -       BYTE *const ostart = (BYTE *)(output->dst) + output->pos;
111222 -       BYTE *const oend = (BYTE *)(output->dst) + output->size;
111223 -       BYTE *op = ostart;
111225 -       if ((zcs->pledgedSrcSize) && (zcs->inputProcessed != zcs->pledgedSrcSize))
111226 -               return ERROR(srcSize_wrong); /* pledgedSrcSize not respected */
111228 -       if (zcs->stage != zcss_final) {
111229 -               /* flush whatever remains */
111230 -               size_t srcSize = 0;
111231 -               size_t sizeWritten = output->size - output->pos;
111232 -               size_t const notEnded =
111233 -                   ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, &srcSize, &srcSize, zsf_end); /* use a valid src address instead of NULL */
111234 -               size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
111235 -               op += sizeWritten;
111236 -               if (remainingToFlush) {
111237 -                       output->pos += sizeWritten;
111238 -                       return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */ + (zcs->checksum * 4);
111239 -               }
111240 -               /* create epilogue */
111241 -               zcs->stage = zcss_final;
111242 -               zcs->outBuffContentSize = !notEnded ? 0 : ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL,
111243 -                                                                          0); /* write epilogue, including final empty block, into outBuff */
111244 -       }
111246 -       /* flush epilogue */
111247 -       {
111248 -               size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
111249 -               size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
111250 -               op += flushed;
111251 -               zcs->outBuffFlushedSize += flushed;
111252 -               output->pos += op - ostart;
111253 -               if (toFlush == flushed)
111254 -                       zcs->stage = zcss_init; /* end reached */
111255 -               return toFlush - flushed;
111256 -       }
111259 -/*-=====  Pre-defined compression levels  =====-*/
111261 -#define ZSTD_DEFAULT_CLEVEL 1
111262 -#define ZSTD_MAX_CLEVEL 22
111263 -int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
111265 -static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL + 1] = {
111266 -    {
111267 -       /* "default" */
111268 -       /* W,  C,  H,  S,  L, TL, strat */
111269 -       {18, 12, 12, 1, 7, 16, ZSTD_fast},    /* level  0 - never used */
111270 -       {19, 13, 14, 1, 7, 16, ZSTD_fast},    /* level  1 */
111271 -       {19, 15, 16, 1, 6, 16, ZSTD_fast},    /* level  2 */
111272 -       {20, 16, 17, 1, 5, 16, ZSTD_dfast},   /* level  3.*/
111273 -       {20, 18, 18, 1, 5, 16, ZSTD_dfast},   /* level  4.*/
111274 -       {20, 15, 18, 3, 5, 16, ZSTD_greedy},  /* level  5 */
111275 -       {21, 16, 19, 2, 5, 16, ZSTD_lazy},    /* level  6 */
111276 -       {21, 17, 20, 3, 5, 16, ZSTD_lazy},    /* level  7 */
111277 -       {21, 18, 20, 3, 5, 16, ZSTD_lazy2},   /* level  8 */
111278 -       {21, 20, 20, 3, 5, 16, ZSTD_lazy2},   /* level  9 */
111279 -       {21, 19, 21, 4, 5, 16, ZSTD_lazy2},   /* level 10 */
111280 -       {22, 20, 22, 4, 5, 16, ZSTD_lazy2},   /* level 11 */
111281 -       {22, 20, 22, 5, 5, 16, ZSTD_lazy2},   /* level 12 */
111282 -       {22, 21, 22, 5, 5, 16, ZSTD_lazy2},   /* level 13 */
111283 -       {22, 21, 22, 6, 5, 16, ZSTD_lazy2},   /* level 14 */
111284 -       {22, 21, 21, 5, 5, 16, ZSTD_btlazy2}, /* level 15 */
111285 -       {23, 22, 22, 5, 5, 16, ZSTD_btlazy2}, /* level 16 */
111286 -       {23, 21, 22, 4, 5, 24, ZSTD_btopt},   /* level 17 */
111287 -       {23, 23, 22, 6, 5, 32, ZSTD_btopt},   /* level 18 */
111288 -       {23, 23, 22, 6, 3, 48, ZSTD_btopt},   /* level 19 */
111289 -       {25, 25, 23, 7, 3, 64, ZSTD_btopt2},  /* level 20 */
111290 -       {26, 26, 23, 7, 3, 256, ZSTD_btopt2}, /* level 21 */
111291 -       {27, 27, 25, 9, 3, 512, ZSTD_btopt2}, /* level 22 */
111292 -    },
111293 -    {
111294 -       /* for srcSize <= 256 KB */
111295 -       /* W,  C,  H,  S,  L,  T, strat */
111296 -       {0, 0, 0, 0, 0, 0, ZSTD_fast},   /* level  0 - not used */
111297 -       {18, 13, 14, 1, 6, 8, ZSTD_fast},      /* level  1 */
111298 -       {18, 14, 13, 1, 5, 8, ZSTD_dfast},     /* level  2 */
111299 -       {18, 16, 15, 1, 5, 8, ZSTD_dfast},     /* level  3 */
111300 -       {18, 15, 17, 1, 5, 8, ZSTD_greedy},    /* level  4.*/
111301 -       {18, 16, 17, 4, 5, 8, ZSTD_greedy},    /* level  5.*/
111302 -       {18, 16, 17, 3, 5, 8, ZSTD_lazy},      /* level  6.*/
111303 -       {18, 17, 17, 4, 4, 8, ZSTD_lazy},      /* level  7 */
111304 -       {18, 17, 17, 4, 4, 8, ZSTD_lazy2},     /* level  8 */
111305 -       {18, 17, 17, 5, 4, 8, ZSTD_lazy2},     /* level  9 */
111306 -       {18, 17, 17, 6, 4, 8, ZSTD_lazy2},     /* level 10 */
111307 -       {18, 18, 17, 6, 4, 8, ZSTD_lazy2},     /* level 11.*/
111308 -       {18, 18, 17, 7, 4, 8, ZSTD_lazy2},     /* level 12.*/
111309 -       {18, 19, 17, 6, 4, 8, ZSTD_btlazy2},   /* level 13 */
111310 -       {18, 18, 18, 4, 4, 16, ZSTD_btopt},    /* level 14.*/
111311 -       {18, 18, 18, 4, 3, 16, ZSTD_btopt},    /* level 15.*/
111312 -       {18, 19, 18, 6, 3, 32, ZSTD_btopt},    /* level 16.*/
111313 -       {18, 19, 18, 8, 3, 64, ZSTD_btopt},    /* level 17.*/
111314 -       {18, 19, 18, 9, 3, 128, ZSTD_btopt},   /* level 18.*/
111315 -       {18, 19, 18, 10, 3, 256, ZSTD_btopt},  /* level 19.*/
111316 -       {18, 19, 18, 11, 3, 512, ZSTD_btopt2}, /* level 20.*/
111317 -       {18, 19, 18, 12, 3, 512, ZSTD_btopt2}, /* level 21.*/
111318 -       {18, 19, 18, 13, 3, 512, ZSTD_btopt2}, /* level 22.*/
111319 -    },
111320 -    {
111321 -       /* for srcSize <= 128 KB */
111322 -       /* W,  C,  H,  S,  L,  T, strat */
111323 -       {17, 12, 12, 1, 7, 8, ZSTD_fast},      /* level  0 - not used */
111324 -       {17, 12, 13, 1, 6, 8, ZSTD_fast},      /* level  1 */
111325 -       {17, 13, 16, 1, 5, 8, ZSTD_fast},      /* level  2 */
111326 -       {17, 16, 16, 2, 5, 8, ZSTD_dfast},     /* level  3 */
111327 -       {17, 13, 15, 3, 4, 8, ZSTD_greedy},    /* level  4 */
111328 -       {17, 15, 17, 4, 4, 8, ZSTD_greedy},    /* level  5 */
111329 -       {17, 16, 17, 3, 4, 8, ZSTD_lazy},      /* level  6 */
111330 -       {17, 15, 17, 4, 4, 8, ZSTD_lazy2},     /* level  7 */
111331 -       {17, 17, 17, 4, 4, 8, ZSTD_lazy2},     /* level  8 */
111332 -       {17, 17, 17, 5, 4, 8, ZSTD_lazy2},     /* level  9 */
111333 -       {17, 17, 17, 6, 4, 8, ZSTD_lazy2},     /* level 10 */
111334 -       {17, 17, 17, 7, 4, 8, ZSTD_lazy2},     /* level 11 */
111335 -       {17, 17, 17, 8, 4, 8, ZSTD_lazy2},     /* level 12 */
111336 -       {17, 18, 17, 6, 4, 8, ZSTD_btlazy2},   /* level 13.*/
111337 -       {17, 17, 17, 7, 3, 8, ZSTD_btopt},     /* level 14.*/
111338 -       {17, 17, 17, 7, 3, 16, ZSTD_btopt},    /* level 15.*/
111339 -       {17, 18, 17, 7, 3, 32, ZSTD_btopt},    /* level 16.*/
111340 -       {17, 18, 17, 7, 3, 64, ZSTD_btopt},    /* level 17.*/
111341 -       {17, 18, 17, 7, 3, 256, ZSTD_btopt},   /* level 18.*/
111342 -       {17, 18, 17, 8, 3, 256, ZSTD_btopt},   /* level 19.*/
111343 -       {17, 18, 17, 9, 3, 256, ZSTD_btopt2},  /* level 20.*/
111344 -       {17, 18, 17, 10, 3, 256, ZSTD_btopt2}, /* level 21.*/
111345 -       {17, 18, 17, 11, 3, 512, ZSTD_btopt2}, /* level 22.*/
111346 -    },
111347 -    {
111348 -       /* for srcSize <= 16 KB */
111349 -       /* W,  C,  H,  S,  L,  T, strat */
111350 -       {14, 12, 12, 1, 7, 6, ZSTD_fast},      /* level  0 - not used */
111351 -       {14, 14, 14, 1, 6, 6, ZSTD_fast},      /* level  1 */
111352 -       {14, 14, 14, 1, 4, 6, ZSTD_fast},      /* level  2 */
111353 -       {14, 14, 14, 1, 4, 6, ZSTD_dfast},     /* level  3.*/
111354 -       {14, 14, 14, 4, 4, 6, ZSTD_greedy},    /* level  4.*/
111355 -       {14, 14, 14, 3, 4, 6, ZSTD_lazy},      /* level  5.*/
111356 -       {14, 14, 14, 4, 4, 6, ZSTD_lazy2},     /* level  6 */
111357 -       {14, 14, 14, 5, 4, 6, ZSTD_lazy2},     /* level  7 */
111358 -       {14, 14, 14, 6, 4, 6, ZSTD_lazy2},     /* level  8.*/
111359 -       {14, 15, 14, 6, 4, 6, ZSTD_btlazy2},   /* level  9.*/
111360 -       {14, 15, 14, 3, 3, 6, ZSTD_btopt},     /* level 10.*/
111361 -       {14, 15, 14, 6, 3, 8, ZSTD_btopt},     /* level 11.*/
111362 -       {14, 15, 14, 6, 3, 16, ZSTD_btopt},    /* level 12.*/
111363 -       {14, 15, 14, 6, 3, 24, ZSTD_btopt},    /* level 13.*/
111364 -       {14, 15, 15, 6, 3, 48, ZSTD_btopt},    /* level 14.*/
111365 -       {14, 15, 15, 6, 3, 64, ZSTD_btopt},    /* level 15.*/
111366 -       {14, 15, 15, 6, 3, 96, ZSTD_btopt},    /* level 16.*/
111367 -       {14, 15, 15, 6, 3, 128, ZSTD_btopt},   /* level 17.*/
111368 -       {14, 15, 15, 6, 3, 256, ZSTD_btopt},   /* level 18.*/
111369 -       {14, 15, 15, 7, 3, 256, ZSTD_btopt},   /* level 19.*/
111370 -       {14, 15, 15, 8, 3, 256, ZSTD_btopt2},  /* level 20.*/
111371 -       {14, 15, 15, 9, 3, 256, ZSTD_btopt2},  /* level 21.*/
111372 -       {14, 15, 15, 10, 3, 256, ZSTD_btopt2}, /* level 22.*/
111373 -    },
111376 -/*! ZSTD_getCParams() :
111377 -*   @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`.
111378 -*   Size values are optional, provide 0 if not known or unused */
111379 -ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
111381 -       ZSTD_compressionParameters cp;
111382 -       size_t const addedSize = srcSize ? 0 : 500;
111383 -       U64 const rSize = srcSize + dictSize ? srcSize + dictSize + addedSize : (U64)-1;
111384 -       U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */
111385 -       if (compressionLevel <= 0)
111386 -               compressionLevel = ZSTD_DEFAULT_CLEVEL; /* 0 == default; no negative compressionLevel yet */
111387 -       if (compressionLevel > ZSTD_MAX_CLEVEL)
111388 -               compressionLevel = ZSTD_MAX_CLEVEL;
111389 -       cp = ZSTD_defaultCParameters[tableID][compressionLevel];
111390 -       if (ZSTD_32bits()) { /* auto-correction, for 32-bits mode */
111391 -               if (cp.windowLog > ZSTD_WINDOWLOG_MAX)
111392 -                       cp.windowLog = ZSTD_WINDOWLOG_MAX;
111393 -               if (cp.chainLog > ZSTD_CHAINLOG_MAX)
111394 -                       cp.chainLog = ZSTD_CHAINLOG_MAX;
111395 -               if (cp.hashLog > ZSTD_HASHLOG_MAX)
111396 -                       cp.hashLog = ZSTD_HASHLOG_MAX;
111397 -       }
111398 -       cp = ZSTD_adjustCParams(cp, srcSize, dictSize);
111399 -       return cp;
111402 -/*! ZSTD_getParams() :
111403 -*   same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
111404 -*   All fields of `ZSTD_frameParameters` are set to default (0) */
111405 -ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
111407 -       ZSTD_parameters params;
111408 -       ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSize, dictSize);
111409 -       memset(&params, 0, sizeof(params));
111410 -       params.cParams = cParams;
111411 -       return params;
111414 -EXPORT_SYMBOL(ZSTD_maxCLevel);
111415 -EXPORT_SYMBOL(ZSTD_compressBound);
111417 -EXPORT_SYMBOL(ZSTD_CCtxWorkspaceBound);
111418 -EXPORT_SYMBOL(ZSTD_initCCtx);
111419 -EXPORT_SYMBOL(ZSTD_compressCCtx);
111420 -EXPORT_SYMBOL(ZSTD_compress_usingDict);
111422 -EXPORT_SYMBOL(ZSTD_CDictWorkspaceBound);
111423 -EXPORT_SYMBOL(ZSTD_initCDict);
111424 -EXPORT_SYMBOL(ZSTD_compress_usingCDict);
111426 -EXPORT_SYMBOL(ZSTD_CStreamWorkspaceBound);
111427 -EXPORT_SYMBOL(ZSTD_initCStream);
111428 -EXPORT_SYMBOL(ZSTD_initCStream_usingCDict);
111429 -EXPORT_SYMBOL(ZSTD_resetCStream);
111430 -EXPORT_SYMBOL(ZSTD_compressStream);
111431 -EXPORT_SYMBOL(ZSTD_flushStream);
111432 -EXPORT_SYMBOL(ZSTD_endStream);
111433 -EXPORT_SYMBOL(ZSTD_CStreamInSize);
111434 -EXPORT_SYMBOL(ZSTD_CStreamOutSize);
111436 -EXPORT_SYMBOL(ZSTD_getCParams);
111437 -EXPORT_SYMBOL(ZSTD_getParams);
111438 -EXPORT_SYMBOL(ZSTD_checkCParams);
111439 -EXPORT_SYMBOL(ZSTD_adjustCParams);
111441 -EXPORT_SYMBOL(ZSTD_compressBegin);
111442 -EXPORT_SYMBOL(ZSTD_compressBegin_usingDict);
111443 -EXPORT_SYMBOL(ZSTD_compressBegin_advanced);
111444 -EXPORT_SYMBOL(ZSTD_copyCCtx);
111445 -EXPORT_SYMBOL(ZSTD_compressBegin_usingCDict);
111446 -EXPORT_SYMBOL(ZSTD_compressContinue);
111447 -EXPORT_SYMBOL(ZSTD_compressEnd);
111449 -EXPORT_SYMBOL(ZSTD_getBlockSizeMax);
111450 -EXPORT_SYMBOL(ZSTD_compressBlock);
111452 -MODULE_LICENSE("Dual BSD/GPL");
111453 -MODULE_DESCRIPTION("Zstd Compressor");
111454 diff --git a/lib/zstd/compress/fse_compress.c b/lib/zstd/compress/fse_compress.c
111455 new file mode 100644
111456 index 000000000000..436985b620e5
111457 --- /dev/null
111458 +++ b/lib/zstd/compress/fse_compress.c
111459 @@ -0,0 +1,625 @@
111460 +/* ******************************************************************
111461 + * FSE : Finite State Entropy encoder
111462 + * Copyright (c) Yann Collet, Facebook, Inc.
111464 + *  You can contact the author at :
111465 + *  - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
111466 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
111468 + * This source code is licensed under both the BSD-style license (found in the
111469 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
111470 + * in the COPYING file in the root directory of this source tree).
111471 + * You may select, at your option, one of the above-listed licenses.
111472 +****************************************************************** */
111474 +/* **************************************************************
111475 +*  Includes
111476 +****************************************************************/
111477 +#include "../common/compiler.h"
111478 +#include "../common/mem.h"        /* U32, U16, etc. */
111479 +#include "../common/debug.h"      /* assert, DEBUGLOG */
111480 +#include "hist.h"       /* HIST_count_wksp */
111481 +#include "../common/bitstream.h"
111482 +#define FSE_STATIC_LINKING_ONLY
111483 +#include "../common/fse.h"
111484 +#include "../common/error_private.h"
111485 +#define ZSTD_DEPS_NEED_MALLOC
111486 +#define ZSTD_DEPS_NEED_MATH64
111487 +#include "../common/zstd_deps.h"  /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */
111490 +/* **************************************************************
111491 +*  Error Management
111492 +****************************************************************/
111493 +#define FSE_isError ERR_isError
111496 +/* **************************************************************
111497 +*  Templates
111498 +****************************************************************/
111500 +  designed to be included
111501 +  for type-specific functions (template emulation in C)
111502 +  Objective is to write these functions only once, for improved maintenance
111505 +/* safety checks */
111506 +#ifndef FSE_FUNCTION_EXTENSION
111507 +#  error "FSE_FUNCTION_EXTENSION must be defined"
111508 +#endif
111509 +#ifndef FSE_FUNCTION_TYPE
111510 +#  error "FSE_FUNCTION_TYPE must be defined"
111511 +#endif
111513 +/* Function names */
111514 +#define FSE_CAT(X,Y) X##Y
111515 +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
111516 +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
111519 +/* Function templates */
111521 +/* FSE_buildCTable_wksp() :
111522 + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
111523 + * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
111524 + * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
111525 + */
111526 +size_t FSE_buildCTable_wksp(FSE_CTable* ct,
111527 +                      const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
111528 +                            void* workSpace, size_t wkspSize)
111530 +    U32 const tableSize = 1 << tableLog;
111531 +    U32 const tableMask = tableSize - 1;
111532 +    void* const ptr = ct;
111533 +    U16* const tableU16 = ( (U16*) ptr) + 2;
111534 +    void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
111535 +    FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
111536 +    U32 const step = FSE_TABLESTEP(tableSize);
111538 +    U32* cumul = (U32*)workSpace;
111539 +    FSE_FUNCTION_TYPE* tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSymbolValue + 2));
111541 +    U32 highThreshold = tableSize-1;
111543 +    if ((size_t)workSpace & 3) return ERROR(GENERIC); /* Must be 4 byte aligned */
111544 +    if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge);
111545 +    /* CTable header */
111546 +    tableU16[-2] = (U16) tableLog;
111547 +    tableU16[-1] = (U16) maxSymbolValue;
111548 +    assert(tableLog < 16);   /* required for threshold strategy to work */
111550 +    /* For explanations on how to distribute symbol values over the table :
111551 +     * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
111553 +     #ifdef __clang_analyzer__
111554 +     ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize);   /* useless initialization, just to keep scan-build happy */
111555 +     #endif
111557 +    /* symbol start positions */
111558 +    {   U32 u;
111559 +        cumul[0] = 0;
111560 +        for (u=1; u <= maxSymbolValue+1; u++) {
111561 +            if (normalizedCounter[u-1]==-1) {  /* Low proba symbol */
111562 +                cumul[u] = cumul[u-1] + 1;
111563 +                tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
111564 +            } else {
111565 +                cumul[u] = cumul[u-1] + normalizedCounter[u-1];
111566 +        }   }
111567 +        cumul[maxSymbolValue+1] = tableSize+1;
111568 +    }
111570 +    /* Spread symbols */
111571 +    {   U32 position = 0;
111572 +        U32 symbol;
111573 +        for (symbol=0; symbol<=maxSymbolValue; symbol++) {
111574 +            int nbOccurrences;
111575 +            int const freq = normalizedCounter[symbol];
111576 +            for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
111577 +                tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
111578 +                position = (position + step) & tableMask;
111579 +                while (position > highThreshold)
111580 +                    position = (position + step) & tableMask;   /* Low proba area */
111581 +        }   }
111583 +        assert(position==0);  /* Must have initialized all positions */
111584 +    }
111586 +    /* Build table */
111587 +    {   U32 u; for (u=0; u<tableSize; u++) {
111588 +        FSE_FUNCTION_TYPE s = tableSymbol[u];   /* note : static analyzer may not understand tableSymbol is properly initialized */
111589 +        tableU16[cumul[s]++] = (U16) (tableSize+u);   /* TableU16 : sorted by symbol order; gives next state value */
111590 +    }   }
111592 +    /* Build Symbol Transformation Table */
111593 +    {   unsigned total = 0;
111594 +        unsigned s;
111595 +        for (s=0; s<=maxSymbolValue; s++) {
111596 +            switch (normalizedCounter[s])
111597 +            {
111598 +            case  0:
111599 +                /* filling nonetheless, for compatibility with FSE_getMaxNbBits() */
111600 +                symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<<tableLog);
111601 +                break;
111603 +            case -1:
111604 +            case  1:
111605 +                symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
111606 +                symbolTT[s].deltaFindState = total - 1;
111607 +                total ++;
111608 +                break;
111609 +            default :
111610 +                {
111611 +                    U32 const maxBitsOut = tableLog - BIT_highbit32 (normalizedCounter[s]-1);
111612 +                    U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
111613 +                    symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
111614 +                    symbolTT[s].deltaFindState = total - normalizedCounter[s];
111615 +                    total +=  normalizedCounter[s];
111616 +    }   }   }   }
111618 +#if 0  /* debug : symbol costs */
111619 +    DEBUGLOG(5, "\n --- table statistics : ");
111620 +    {   U32 symbol;
111621 +        for (symbol=0; symbol<=maxSymbolValue; symbol++) {
111622 +            DEBUGLOG(5, "%3u: w=%3i,   maxBits=%u, fracBits=%.2f",
111623 +                symbol, normalizedCounter[symbol],
111624 +                FSE_getMaxNbBits(symbolTT, symbol),
111625 +                (double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
111626 +        }
111627 +    }
111628 +#endif
111630 +    return 0;
111636 +#ifndef FSE_COMMONDEFS_ONLY
111639 +/*-**************************************************************
111640 +*  FSE NCount encoding
111641 +****************************************************************/
111642 +size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
111644 +    size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3;
111645 +    return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND;  /* maxSymbolValue==0 ? use default */
111648 +static size_t
111649 +FSE_writeNCount_generic (void* header, size_t headerBufferSize,
111650 +                   const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
111651 +                         unsigned writeIsSafe)
111653 +    BYTE* const ostart = (BYTE*) header;
111654 +    BYTE* out = ostart;
111655 +    BYTE* const oend = ostart + headerBufferSize;
111656 +    int nbBits;
111657 +    const int tableSize = 1 << tableLog;
111658 +    int remaining;
111659 +    int threshold;
111660 +    U32 bitStream = 0;
111661 +    int bitCount = 0;
111662 +    unsigned symbol = 0;
111663 +    unsigned const alphabetSize = maxSymbolValue + 1;
111664 +    int previousIs0 = 0;
111666 +    /* Table Size */
111667 +    bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
111668 +    bitCount  += 4;
111670 +    /* Init */
111671 +    remaining = tableSize+1;   /* +1 for extra accuracy */
111672 +    threshold = tableSize;
111673 +    nbBits = tableLog+1;
111675 +    while ((symbol < alphabetSize) && (remaining>1)) {  /* stops at 1 */
111676 +        if (previousIs0) {
111677 +            unsigned start = symbol;
111678 +            while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++;
111679 +            if (symbol == alphabetSize) break;   /* incorrect distribution */
111680 +            while (symbol >= start+24) {
111681 +                start+=24;
111682 +                bitStream += 0xFFFFU << bitCount;
111683 +                if ((!writeIsSafe) && (out > oend-2))
111684 +                    return ERROR(dstSize_tooSmall);   /* Buffer overflow */
111685 +                out[0] = (BYTE) bitStream;
111686 +                out[1] = (BYTE)(bitStream>>8);
111687 +                out+=2;
111688 +                bitStream>>=16;
111689 +            }
111690 +            while (symbol >= start+3) {
111691 +                start+=3;
111692 +                bitStream += 3 << bitCount;
111693 +                bitCount += 2;
111694 +            }
111695 +            bitStream += (symbol-start) << bitCount;
111696 +            bitCount += 2;
111697 +            if (bitCount>16) {
111698 +                if ((!writeIsSafe) && (out > oend - 2))
111699 +                    return ERROR(dstSize_tooSmall);   /* Buffer overflow */
111700 +                out[0] = (BYTE)bitStream;
111701 +                out[1] = (BYTE)(bitStream>>8);
111702 +                out += 2;
111703 +                bitStream >>= 16;
111704 +                bitCount -= 16;
111705 +        }   }
111706 +        {   int count = normalizedCounter[symbol++];
111707 +            int const max = (2*threshold-1) - remaining;
111708 +            remaining -= count < 0 ? -count : count;
111709 +            count++;   /* +1 for extra accuracy */
111710 +            if (count>=threshold)
111711 +                count += max;   /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
111712 +            bitStream += count << bitCount;
111713 +            bitCount  += nbBits;
111714 +            bitCount  -= (count<max);
111715 +            previousIs0  = (count==1);
111716 +            if (remaining<1) return ERROR(GENERIC);
111717 +            while (remaining<threshold) { nbBits--; threshold>>=1; }
111718 +        }
111719 +        if (bitCount>16) {
111720 +            if ((!writeIsSafe) && (out > oend - 2))
111721 +                return ERROR(dstSize_tooSmall);   /* Buffer overflow */
111722 +            out[0] = (BYTE)bitStream;
111723 +            out[1] = (BYTE)(bitStream>>8);
111724 +            out += 2;
111725 +            bitStream >>= 16;
111726 +            bitCount -= 16;
111727 +    }   }
111729 +    if (remaining != 1)
111730 +        return ERROR(GENERIC);  /* incorrect normalized distribution */
111731 +    assert(symbol <= alphabetSize);
111733 +    /* flush remaining bitStream */
111734 +    if ((!writeIsSafe) && (out > oend - 2))
111735 +        return ERROR(dstSize_tooSmall);   /* Buffer overflow */
111736 +    out[0] = (BYTE)bitStream;
111737 +    out[1] = (BYTE)(bitStream>>8);
111738 +    out+= (bitCount+7) /8;
111740 +    return (out-ostart);
111744 +size_t FSE_writeNCount (void* buffer, size_t bufferSize,
111745 +                  const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
111747 +    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);   /* Unsupported */
111748 +    if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported */
111750 +    if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
111751 +        return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
111753 +    return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */);
111757 +/*-**************************************************************
111758 +*  FSE Compression Code
111759 +****************************************************************/
111761 +FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
111763 +    size_t size;
111764 +    if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
111765 +    size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
111766 +    return (FSE_CTable*)ZSTD_malloc(size);
111769 +void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); }
111771 +/* provides the minimum logSize to safely represent a distribution */
111772 +static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
111774 +    U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1;
111775 +    U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
111776 +    U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
111777 +    assert(srcSize > 1); /* Not supported, RLE should be used instead */
111778 +    return minBits;
111781 +unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
111783 +    U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
111784 +    U32 tableLog = maxTableLog;
111785 +    U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
111786 +    assert(srcSize > 1); /* Not supported, RLE should be used instead */
111787 +    if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
111788 +    if (maxBitsSrc < tableLog) tableLog = maxBitsSrc;   /* Accuracy can be reduced */
111789 +    if (minBits > tableLog) tableLog = minBits;   /* Need a minimum to safely represent all symbol values */
111790 +    if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
111791 +    if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
111792 +    return tableLog;
111795 +unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
111797 +    return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
111800 +/* Secondary normalization method.
111801 +   To be used when primary method fails. */
111803 +static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue, short lowProbCount)
111805 +    short const NOT_YET_ASSIGNED = -2;
111806 +    U32 s;
111807 +    U32 distributed = 0;
111808 +    U32 ToDistribute;
111810 +    /* Init */
111811 +    U32 const lowThreshold = (U32)(total >> tableLog);
111812 +    U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
111814 +    for (s=0; s<=maxSymbolValue; s++) {
111815 +        if (count[s] == 0) {
111816 +            norm[s]=0;
111817 +            continue;
111818 +        }
111819 +        if (count[s] <= lowThreshold) {
111820 +            norm[s] = lowProbCount;
111821 +            distributed++;
111822 +            total -= count[s];
111823 +            continue;
111824 +        }
111825 +        if (count[s] <= lowOne) {
111826 +            norm[s] = 1;
111827 +            distributed++;
111828 +            total -= count[s];
111829 +            continue;
111830 +        }
111832 +        norm[s]=NOT_YET_ASSIGNED;
111833 +    }
111834 +    ToDistribute = (1 << tableLog) - distributed;
111836 +    if (ToDistribute == 0)
111837 +        return 0;
111839 +    if ((total / ToDistribute) > lowOne) {
111840 +        /* risk of rounding to zero */
111841 +        lowOne = (U32)((total * 3) / (ToDistribute * 2));
111842 +        for (s=0; s<=maxSymbolValue; s++) {
111843 +            if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
111844 +                norm[s] = 1;
111845 +                distributed++;
111846 +                total -= count[s];
111847 +                continue;
111848 +        }   }
111849 +        ToDistribute = (1 << tableLog) - distributed;
111850 +    }
111852 +    if (distributed == maxSymbolValue+1) {
111853 +        /* all values are pretty poor;
111854 +           probably incompressible data (should have already been detected);
111855 +           find max, then give all remaining points to max */
111856 +        U32 maxV = 0, maxC = 0;
111857 +        for (s=0; s<=maxSymbolValue; s++)
111858 +            if (count[s] > maxC) { maxV=s; maxC=count[s]; }
111859 +        norm[maxV] += (short)ToDistribute;
111860 +        return 0;
111861 +    }
111863 +    if (total == 0) {
111864 +        /* all of the symbols were low enough for the lowOne or lowThreshold */
111865 +        for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
111866 +            if (norm[s] > 0) { ToDistribute--; norm[s]++; }
111867 +        return 0;
111868 +    }
111870 +    {   U64 const vStepLog = 62 - tableLog;
111871 +        U64 const mid = (1ULL << (vStepLog-1)) - 1;
111872 +        U64 const rStep = ZSTD_div64((((U64)1<<vStepLog) * ToDistribute) + mid, (U32)total);   /* scale on remaining */
111873 +        U64 tmpTotal = mid;
111874 +        for (s=0; s<=maxSymbolValue; s++) {
111875 +            if (norm[s]==NOT_YET_ASSIGNED) {
111876 +                U64 const end = tmpTotal + (count[s] * rStep);
111877 +                U32 const sStart = (U32)(tmpTotal >> vStepLog);
111878 +                U32 const sEnd = (U32)(end >> vStepLog);
111879 +                U32 const weight = sEnd - sStart;
111880 +                if (weight < 1)
111881 +                    return ERROR(GENERIC);
111882 +                norm[s] = (short)weight;
111883 +                tmpTotal = end;
111884 +    }   }   }
111886 +    return 0;
111889 +size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
111890 +                           const unsigned* count, size_t total,
111891 +                           unsigned maxSymbolValue, unsigned useLowProbCount)
111893 +    /* Sanity checks */
111894 +    if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
111895 +    if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported size */
111896 +    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);   /* Unsupported size */
111897 +    if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC);   /* Too small tableLog, compression potentially impossible */
111899 +    {   static U32 const rtbTable[] = {     0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
111900 +        short const lowProbCount = useLowProbCount ? -1 : 1;
111901 +        U64 const scale = 62 - tableLog;
111902 +        U64 const step = ZSTD_div64((U64)1<<62, (U32)total);   /* <== here, one division ! */
111903 +        U64 const vStep = 1ULL<<(scale-20);
111904 +        int stillToDistribute = 1<<tableLog;
111905 +        unsigned s;
111906 +        unsigned largest=0;
111907 +        short largestP=0;
111908 +        U32 lowThreshold = (U32)(total >> tableLog);
111910 +        for (s=0; s<=maxSymbolValue; s++) {
111911 +            if (count[s] == total) return 0;   /* rle special case */
111912 +            if (count[s] == 0) { normalizedCounter[s]=0; continue; }
111913 +            if (count[s] <= lowThreshold) {
111914 +                normalizedCounter[s] = lowProbCount;
111915 +                stillToDistribute--;
111916 +            } else {
111917 +                short proba = (short)((count[s]*step) >> scale);
111918 +                if (proba<8) {
111919 +                    U64 restToBeat = vStep * rtbTable[proba];
111920 +                    proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
111921 +                }
111922 +                if (proba > largestP) { largestP=proba; largest=s; }
111923 +                normalizedCounter[s] = proba;
111924 +                stillToDistribute -= proba;
111925 +        }   }
111926 +        if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
111927 +            /* corner case, need another normalization method */
111928 +            size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount);
111929 +            if (FSE_isError(errorCode)) return errorCode;
111930 +        }
111931 +        else normalizedCounter[largest] += (short)stillToDistribute;
111932 +    }
111934 +#if 0
111935 +    {   /* Print Table (debug) */
111936 +        U32 s;
111937 +        U32 nTotal = 0;
111938 +        for (s=0; s<=maxSymbolValue; s++)
111939 +            RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]);
111940 +        for (s=0; s<=maxSymbolValue; s++)
111941 +            nTotal += abs(normalizedCounter[s]);
111942 +        if (nTotal != (1U<<tableLog))
111943 +            RAWLOG(2, "Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
111944 +        getchar();
111945 +    }
111946 +#endif
111948 +    return tableLog;
111952 +/* fake FSE_CTable, for raw (uncompressed) input */
111953 +size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
111955 +    const unsigned tableSize = 1 << nbBits;
111956 +    const unsigned tableMask = tableSize - 1;
111957 +    const unsigned maxSymbolValue = tableMask;
111958 +    void* const ptr = ct;
111959 +    U16* const tableU16 = ( (U16*) ptr) + 2;
111960 +    void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1);   /* assumption : tableLog >= 1 */
111961 +    FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
111962 +    unsigned s;
111964 +    /* Sanity checks */
111965 +    if (nbBits < 1) return ERROR(GENERIC);             /* min size */
111967 +    /* header */
111968 +    tableU16[-2] = (U16) nbBits;
111969 +    tableU16[-1] = (U16) maxSymbolValue;
111971 +    /* Build table */
111972 +    for (s=0; s<tableSize; s++)
111973 +        tableU16[s] = (U16)(tableSize + s);
111975 +    /* Build Symbol Transformation Table */
111976 +    {   const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
111977 +        for (s=0; s<=maxSymbolValue; s++) {
111978 +            symbolTT[s].deltaNbBits = deltaNbBits;
111979 +            symbolTT[s].deltaFindState = s-1;
111980 +    }   }
111982 +    return 0;
111985 +/* fake FSE_CTable, for rle input (always same symbol) */
111986 +size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
111988 +    void* ptr = ct;
111989 +    U16* tableU16 = ( (U16*) ptr) + 2;
111990 +    void* FSCTptr = (U32*)ptr + 2;
111991 +    FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
111993 +    /* header */
111994 +    tableU16[-2] = (U16) 0;
111995 +    tableU16[-1] = (U16) symbolValue;
111997 +    /* Build table */
111998 +    tableU16[0] = 0;
111999 +    tableU16[1] = 0;   /* just in case */
112001 +    /* Build Symbol Transformation Table */
112002 +    symbolTT[symbolValue].deltaNbBits = 0;
112003 +    symbolTT[symbolValue].deltaFindState = 0;
112005 +    return 0;
112009 +static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
112010 +                           const void* src, size_t srcSize,
112011 +                           const FSE_CTable* ct, const unsigned fast)
112013 +    const BYTE* const istart = (const BYTE*) src;
112014 +    const BYTE* const iend = istart + srcSize;
112015 +    const BYTE* ip=iend;
112017 +    BIT_CStream_t bitC;
112018 +    FSE_CState_t CState1, CState2;
112020 +    /* init */
112021 +    if (srcSize <= 2) return 0;
112022 +    { size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
112023 +      if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
112025 +#define FSE_FLUSHBITS(s)  (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
112027 +    if (srcSize & 1) {
112028 +        FSE_initCState2(&CState1, ct, *--ip);
112029 +        FSE_initCState2(&CState2, ct, *--ip);
112030 +        FSE_encodeSymbol(&bitC, &CState1, *--ip);
112031 +        FSE_FLUSHBITS(&bitC);
112032 +    } else {
112033 +        FSE_initCState2(&CState2, ct, *--ip);
112034 +        FSE_initCState2(&CState1, ct, *--ip);
112035 +    }
112037 +    /* join to mod 4 */
112038 +    srcSize -= 2;
112039 +    if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) {  /* test bit 2 */
112040 +        FSE_encodeSymbol(&bitC, &CState2, *--ip);
112041 +        FSE_encodeSymbol(&bitC, &CState1, *--ip);
112042 +        FSE_FLUSHBITS(&bitC);
112043 +    }
112045 +    /* 2 or 4 encoding per loop */
112046 +    while ( ip>istart ) {
112048 +        FSE_encodeSymbol(&bitC, &CState2, *--ip);
112050 +        if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 )   /* this test must be static */
112051 +            FSE_FLUSHBITS(&bitC);
112053 +        FSE_encodeSymbol(&bitC, &CState1, *--ip);
112055 +        if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) {  /* this test must be static */
112056 +            FSE_encodeSymbol(&bitC, &CState2, *--ip);
112057 +            FSE_encodeSymbol(&bitC, &CState1, *--ip);
112058 +        }
112060 +        FSE_FLUSHBITS(&bitC);
112061 +    }
112063 +    FSE_flushCState(&bitC, &CState2);
112064 +    FSE_flushCState(&bitC, &CState1);
112065 +    return BIT_closeCStream(&bitC);
112068 +size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
112069 +                           const void* src, size_t srcSize,
112070 +                           const FSE_CTable* ct)
112072 +    unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
112074 +    if (fast)
112075 +        return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
112076 +    else
112077 +        return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
112081 +size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
112084 +#endif   /* FSE_COMMONDEFS_ONLY */
112085 diff --git a/lib/zstd/compress/hist.c b/lib/zstd/compress/hist.c
112086 new file mode 100644
112087 index 000000000000..5fc30f766591
112088 --- /dev/null
112089 +++ b/lib/zstd/compress/hist.c
112090 @@ -0,0 +1,164 @@
112091 +/* ******************************************************************
112092 + * hist : Histogram functions
112093 + * part of Finite State Entropy project
112094 + * Copyright (c) Yann Collet, Facebook, Inc.
112096 + *  You can contact the author at :
112097 + *  - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
112098 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
112100 + * This source code is licensed under both the BSD-style license (found in the
112101 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
112102 + * in the COPYING file in the root directory of this source tree).
112103 + * You may select, at your option, one of the above-listed licenses.
112104 +****************************************************************** */
112106 +/* --- dependencies --- */
112107 +#include "../common/mem.h"             /* U32, BYTE, etc. */
112108 +#include "../common/debug.h"           /* assert, DEBUGLOG */
112109 +#include "../common/error_private.h"   /* ERROR */
112110 +#include "hist.h"
112113 +/* --- Error management --- */
112114 +unsigned HIST_isError(size_t code) { return ERR_isError(code); }
112116 +/*-**************************************************************
112117 + *  Histogram functions
112118 + ****************************************************************/
112119 +unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
112120 +                           const void* src, size_t srcSize)
112122 +    const BYTE* ip = (const BYTE*)src;
112123 +    const BYTE* const end = ip + srcSize;
112124 +    unsigned maxSymbolValue = *maxSymbolValuePtr;
112125 +    unsigned largestCount=0;
112127 +    ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
112128 +    if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
112130 +    while (ip<end) {
112131 +        assert(*ip <= maxSymbolValue);
112132 +        count[*ip++]++;
112133 +    }
112135 +    while (!count[maxSymbolValue]) maxSymbolValue--;
112136 +    *maxSymbolValuePtr = maxSymbolValue;
112138 +    {   U32 s;
112139 +        for (s=0; s<=maxSymbolValue; s++)
112140 +            if (count[s] > largestCount) largestCount = count[s];
112141 +    }
112143 +    return largestCount;
112146 +typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
112148 +/* HIST_count_parallel_wksp() :
112149 + * store histogram into 4 intermediate tables, recombined at the end.
112150 + * this design makes better use of OoO cpus,
112151 + * and is noticeably faster when some values are heavily repeated.
112152 + * But it needs some additional workspace for intermediate tables.
112153 + * `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32.
112154 + * @return : largest histogram frequency,
112155 + *           or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */
112156 +static size_t HIST_count_parallel_wksp(
112157 +                                unsigned* count, unsigned* maxSymbolValuePtr,
112158 +                                const void* source, size_t sourceSize,
112159 +                                HIST_checkInput_e check,
112160 +                                U32* const workSpace)
112162 +    const BYTE* ip = (const BYTE*)source;
112163 +    const BYTE* const iend = ip+sourceSize;
112164 +    size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count);
112165 +    unsigned max=0;
112166 +    U32* const Counting1 = workSpace;
112167 +    U32* const Counting2 = Counting1 + 256;
112168 +    U32* const Counting3 = Counting2 + 256;
112169 +    U32* const Counting4 = Counting3 + 256;
112171 +    /* safety checks */
112172 +    assert(*maxSymbolValuePtr <= 255);
112173 +    if (!sourceSize) {
112174 +        ZSTD_memset(count, 0, countSize);
112175 +        *maxSymbolValuePtr = 0;
112176 +        return 0;
112177 +    }
112178 +    ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned));
112180 +    /* by stripes of 16 bytes */
112181 +    {   U32 cached = MEM_read32(ip); ip += 4;
112182 +        while (ip < iend-15) {
112183 +            U32 c = cached; cached = MEM_read32(ip); ip += 4;
112184 +            Counting1[(BYTE) c     ]++;
112185 +            Counting2[(BYTE)(c>>8) ]++;
112186 +            Counting3[(BYTE)(c>>16)]++;
112187 +            Counting4[       c>>24 ]++;
112188 +            c = cached; cached = MEM_read32(ip); ip += 4;
112189 +            Counting1[(BYTE) c     ]++;
112190 +            Counting2[(BYTE)(c>>8) ]++;
112191 +            Counting3[(BYTE)(c>>16)]++;
112192 +            Counting4[       c>>24 ]++;
112193 +            c = cached; cached = MEM_read32(ip); ip += 4;
112194 +            Counting1[(BYTE) c     ]++;
112195 +            Counting2[(BYTE)(c>>8) ]++;
112196 +            Counting3[(BYTE)(c>>16)]++;
112197 +            Counting4[       c>>24 ]++;
112198 +            c = cached; cached = MEM_read32(ip); ip += 4;
112199 +            Counting1[(BYTE) c     ]++;
112200 +            Counting2[(BYTE)(c>>8) ]++;
112201 +            Counting3[(BYTE)(c>>16)]++;
112202 +            Counting4[       c>>24 ]++;
112203 +        }
112204 +        ip-=4;
112205 +    }
112207 +    /* finish last symbols */
112208 +    while (ip<iend) Counting1[*ip++]++;
112210 +    {   U32 s;
112211 +        for (s=0; s<256; s++) {
112212 +            Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
112213 +            if (Counting1[s] > max) max = Counting1[s];
112214 +    }   }
112216 +    {   unsigned maxSymbolValue = 255;
112217 +        while (!Counting1[maxSymbolValue]) maxSymbolValue--;
112218 +        if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall);
112219 +        *maxSymbolValuePtr = maxSymbolValue;
112220 +        ZSTD_memmove(count, Counting1, countSize);   /* in case count & Counting1 are overlapping */
112221 +    }
112222 +    return (size_t)max;
112225 +/* HIST_countFast_wksp() :
112226 + * Same as HIST_countFast(), but using an externally provided scratch buffer.
112227 + * `workSpace` is a writable buffer which must be 4-bytes aligned,
112228 + * `workSpaceSize` must be >= HIST_WKSP_SIZE
112229 + */
112230 +size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
112231 +                          const void* source, size_t sourceSize,
112232 +                          void* workSpace, size_t workSpaceSize)
112234 +    if (sourceSize < 1500) /* heuristic threshold */
112235 +        return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
112236 +    if ((size_t)workSpace & 3) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
112237 +    if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
112238 +    return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
112241 +/* HIST_count_wksp() :
112242 + * Same as HIST_count(), but using an externally provided scratch buffer.
112243 + * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
112244 +size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
112245 +                       const void* source, size_t sourceSize,
112246 +                       void* workSpace, size_t workSpaceSize)
112248 +    if ((size_t)workSpace & 3) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
112249 +    if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
112250 +    if (*maxSymbolValuePtr < 255)
112251 +        return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
112252 +    *maxSymbolValuePtr = 255;
112253 +    return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
112255 diff --git a/lib/zstd/compress/hist.h b/lib/zstd/compress/hist.h
112256 new file mode 100644
112257 index 000000000000..228ed48a71de
112258 --- /dev/null
112259 +++ b/lib/zstd/compress/hist.h
112260 @@ -0,0 +1,75 @@
112261 +/* ******************************************************************
112262 + * hist : Histogram functions
112263 + * part of Finite State Entropy project
112264 + * Copyright (c) Yann Collet, Facebook, Inc.
112266 + *  You can contact the author at :
112267 + *  - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
112268 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
112270 + * This source code is licensed under both the BSD-style license (found in the
112271 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
112272 + * in the COPYING file in the root directory of this source tree).
112273 + * You may select, at your option, one of the above-listed licenses.
112274 +****************************************************************** */
112276 +/* --- dependencies --- */
112277 +#include "../common/zstd_deps.h"   /* size_t */
112280 +/* --- simple histogram functions --- */
112282 +/*! HIST_count():
112283 + *  Provides the precise count of each byte within a table 'count'.
112284 + * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
112285 + *  Updates *maxSymbolValuePtr with actual largest symbol value detected.
112286 + * @return : count of the most frequent symbol (which isn't identified).
112287 + *           or an error code, which can be tested using HIST_isError().
112288 + *           note : if return == srcSize, there is only one symbol.
112289 + */
112290 +size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
112291 +                  const void* src, size_t srcSize);
112293 +unsigned HIST_isError(size_t code);  /**< tells if a return value is an error code */
112296 +/* --- advanced histogram functions --- */
112298 +#define HIST_WKSP_SIZE_U32 1024
112299 +#define HIST_WKSP_SIZE    (HIST_WKSP_SIZE_U32 * sizeof(unsigned))
112300 +/** HIST_count_wksp() :
112301 + *  Same as HIST_count(), but using an externally provided scratch buffer.
112302 + *  Benefit is this function will use very little stack space.
112303 + * `workSpace` is a writable buffer which must be 4-bytes aligned,
112304 + * `workSpaceSize` must be >= HIST_WKSP_SIZE
112305 + */
112306 +size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
112307 +                       const void* src, size_t srcSize,
112308 +                       void* workSpace, size_t workSpaceSize);
112310 +/** HIST_countFast() :
112311 + *  same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr.
112312 + *  This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr`
112313 + */
112314 +size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
112315 +                      const void* src, size_t srcSize);
112317 +/** HIST_countFast_wksp() :
112318 + *  Same as HIST_countFast(), but using an externally provided scratch buffer.
112319 + * `workSpace` is a writable buffer which must be 4-bytes aligned,
112320 + * `workSpaceSize` must be >= HIST_WKSP_SIZE
112321 + */
112322 +size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
112323 +                           const void* src, size_t srcSize,
112324 +                           void* workSpace, size_t workSpaceSize);
112326 +/*! HIST_count_simple() :
112327 + *  Same as HIST_countFast(), this function is unsafe,
112328 + *  and will segfault if any value within `src` is `> *maxSymbolValuePtr`.
112329 + *  It is also a bit slower for large inputs.
112330 + *  However, it does not need any additional memory (not even on stack).
112331 + * @return : count of the most frequent symbol.
112332 + *  Note this function doesn't produce any error (i.e. it must succeed).
112333 + */
112334 +unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
112335 +                           const void* src, size_t srcSize);
112336 diff --git a/lib/zstd/compress/huf_compress.c b/lib/zstd/compress/huf_compress.c
112337 new file mode 100644
112338 index 000000000000..ff0e76a2e0e3
112339 --- /dev/null
112340 +++ b/lib/zstd/compress/huf_compress.c
112341 @@ -0,0 +1,901 @@
112342 +/* ******************************************************************
112343 + * Huffman encoder, part of New Generation Entropy library
112344 + * Copyright (c) Yann Collet, Facebook, Inc.
112346 + *  You can contact the author at :
112347 + *  - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
112348 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
112350 + * This source code is licensed under both the BSD-style license (found in the
112351 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
112352 + * in the COPYING file in the root directory of this source tree).
112353 + * You may select, at your option, one of the above-listed licenses.
112354 +****************************************************************** */
112356 +/* **************************************************************
112357 +*  Compiler specifics
112358 +****************************************************************/
112361 +/* **************************************************************
112362 +*  Includes
112363 +****************************************************************/
112364 +#include "../common/zstd_deps.h"     /* ZSTD_memcpy, ZSTD_memset */
112365 +#include "../common/compiler.h"
112366 +#include "../common/bitstream.h"
112367 +#include "hist.h"
112368 +#define FSE_STATIC_LINKING_ONLY   /* FSE_optimalTableLog_internal */
112369 +#include "../common/fse.h"        /* header compression */
112370 +#define HUF_STATIC_LINKING_ONLY
112371 +#include "../common/huf.h"
112372 +#include "../common/error_private.h"
112375 +/* **************************************************************
112376 +*  Error Management
112377 +****************************************************************/
112378 +#define HUF_isError ERR_isError
112379 +#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)   /* use only *after* variable declarations */
112382 +/* **************************************************************
112383 +*  Utils
112384 +****************************************************************/
112385 +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
112387 +    return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
112391 +/* *******************************************************
112392 +*  HUF : Huffman block compression
112393 +*********************************************************/
112394 +/* HUF_compressWeights() :
112395 + * Same as FSE_compress(), but dedicated to huff0's weights compression.
112396 + * The use case needs much less stack memory.
112397 + * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
112398 + */
112399 +#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
112401 +typedef struct {
112402 +    FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
112403 +    U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)];
112404 +    unsigned count[HUF_TABLELOG_MAX+1];
112405 +    S16 norm[HUF_TABLELOG_MAX+1];
112406 +} HUF_CompressWeightsWksp;
112408 +static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightTable, size_t wtSize, void* workspace, size_t workspaceSize)
112410 +    BYTE* const ostart = (BYTE*) dst;
112411 +    BYTE* op = ostart;
112412 +    BYTE* const oend = ostart + dstSize;
112414 +    unsigned maxSymbolValue = HUF_TABLELOG_MAX;
112415 +    U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
112416 +    HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)workspace;
112418 +    if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC);
112420 +    /* init conditions */
112421 +    if (wtSize <= 1) return 0;  /* Not compressible */
112423 +    /* Scan input and build symbol stats */
112424 +    {   unsigned const maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize);   /* never fails */
112425 +        if (maxCount == wtSize) return 1;   /* only a single symbol in src : rle */
112426 +        if (maxCount == 1) return 0;        /* each symbol present maximum once => not compressible */
112427 +    }
112429 +    tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
112430 +    CHECK_F( FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) );
112432 +    /* Write table description header */
112433 +    {   CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), wksp->norm, maxSymbolValue, tableLog) );
112434 +        op += hSize;
112435 +    }
112437 +    /* Compress */
112438 +    CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) );
112439 +    {   CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) );
112440 +        if (cSize == 0) return 0;   /* not enough space for compressed data */
112441 +        op += cSize;
112442 +    }
112444 +    return (size_t)(op-ostart);
112448 +typedef struct {
112449 +    HUF_CompressWeightsWksp wksp;
112450 +    BYTE bitsToWeight[HUF_TABLELOG_MAX + 1];   /* precomputed conversion table */
112451 +    BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
112452 +} HUF_WriteCTableWksp;
112454 +size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
112455 +                            const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog,
112456 +                            void* workspace, size_t workspaceSize)
112458 +    BYTE* op = (BYTE*)dst;
112459 +    U32 n;
112460 +    HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)workspace;
112462 +    /* check conditions */
112463 +    if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
112464 +    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
112466 +    /* convert to weight */
112467 +    wksp->bitsToWeight[0] = 0;
112468 +    for (n=1; n<huffLog+1; n++)
112469 +        wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
112470 +    for (n=0; n<maxSymbolValue; n++)
112471 +        wksp->huffWeight[n] = wksp->bitsToWeight[CTable[n].nbBits];
112473 +    /* attempt weights compression by FSE */
112474 +    {   CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) );
112475 +        if ((hSize>1) & (hSize < maxSymbolValue/2)) {   /* FSE compressed */
112476 +            op[0] = (BYTE)hSize;
112477 +            return hSize+1;
112478 +    }   }
112480 +    /* write raw values as 4-bits (max : 15) */
112481 +    if (maxSymbolValue > (256-128)) return ERROR(GENERIC);   /* should not happen : likely means source cannot be compressed */
112482 +    if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall);   /* not enough space within dst buffer */
112483 +    op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
112484 +    wksp->huffWeight[maxSymbolValue] = 0;   /* to be sure it doesn't cause msan issue in final combination */
112485 +    for (n=0; n<maxSymbolValue; n+=2)
112486 +        op[(n/2)+1] = (BYTE)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n+1]);
112487 +    return ((maxSymbolValue+1)/2) + 1;
112490 +/*! HUF_writeCTable() :
112491 +    `CTable` : Huffman tree to save, using huf representation.
112492 +    @return : size of saved CTable */
112493 +size_t HUF_writeCTable (void* dst, size_t maxDstSize,
112494 +                        const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
112496 +    HUF_WriteCTableWksp wksp;
112497 +    return HUF_writeCTable_wksp(dst, maxDstSize, CTable, maxSymbolValue, huffLog, &wksp, sizeof(wksp));
112501 +size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
112503 +    BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];   /* init not required, even though some static analyzer may complain */
112504 +    U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];   /* large enough for values from 0 to 16 */
112505 +    U32 tableLog = 0;
112506 +    U32 nbSymbols = 0;
112508 +    /* get symbol weights */
112509 +    CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
112510 +    *hasZeroWeights = (rankVal[0] > 0);
112512 +    /* check result */
112513 +    if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
112514 +    if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
112516 +    /* Prepare base value per rank */
112517 +    {   U32 n, nextRankStart = 0;
112518 +        for (n=1; n<=tableLog; n++) {
112519 +            U32 curr = nextRankStart;
112520 +            nextRankStart += (rankVal[n] << (n-1));
112521 +            rankVal[n] = curr;
112522 +    }   }
112524 +    /* fill nbBits */
112525 +    {   U32 n; for (n=0; n<nbSymbols; n++) {
112526 +            const U32 w = huffWeight[n];
112527 +            CTable[n].nbBits = (BYTE)(tableLog + 1 - w) & -(w != 0);
112528 +    }   }
112530 +    /* fill val */
112531 +    {   U16 nbPerRank[HUF_TABLELOG_MAX+2]  = {0};  /* support w=0=>n=tableLog+1 */
112532 +        U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
112533 +        { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
112534 +        /* determine stating value per rank */
112535 +        valPerRank[tableLog+1] = 0;   /* for w==0 */
112536 +        {   U16 min = 0;
112537 +            U32 n; for (n=tableLog; n>0; n--) {  /* start at n=tablelog <-> w=1 */
112538 +                valPerRank[n] = min;     /* get starting value within each rank */
112539 +                min += nbPerRank[n];
112540 +                min >>= 1;
112541 +        }   }
112542 +        /* assign value within rank, symbol order */
112543 +        { U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
112544 +    }
112546 +    *maxSymbolValuePtr = nbSymbols - 1;
112547 +    return readSize;
112550 +U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue)
112552 +    const HUF_CElt* table = (const HUF_CElt*)symbolTable;
112553 +    assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
112554 +    return table[symbolValue].nbBits;
112558 +typedef struct nodeElt_s {
112559 +    U32 count;
112560 +    U16 parent;
112561 +    BYTE byte;
112562 +    BYTE nbBits;
112563 +} nodeElt;
112566 + * HUF_setMaxHeight():
112567 + * Enforces maxNbBits on the Huffman tree described in huffNode.
112569 + * It sets all nodes with nbBits > maxNbBits to be maxNbBits. Then it adjusts
112570 + * the tree to so that it is a valid canonical Huffman tree.
112572 + * @pre               The sum of the ranks of each symbol == 2^largestBits,
112573 + *                    where largestBits == huffNode[lastNonNull].nbBits.
112574 + * @post              The sum of the ranks of each symbol == 2^largestBits,
112575 + *                    where largestBits is the return value <= maxNbBits.
112577 + * @param huffNode    The Huffman tree modified in place to enforce maxNbBits.
112578 + * @param lastNonNull The symbol with the lowest count in the Huffman tree.
112579 + * @param maxNbBits   The maximum allowed number of bits, which the Huffman tree
112580 + *                    may not respect. After this function the Huffman tree will
112581 + *                    respect maxNbBits.
112582 + * @return            The maximum number of bits of the Huffman tree after adjustment,
112583 + *                    necessarily no more than maxNbBits.
112584 + */
112585 +static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
112587 +    const U32 largestBits = huffNode[lastNonNull].nbBits;
112588 +    /* early exit : no elt > maxNbBits, so the tree is already valid. */
112589 +    if (largestBits <= maxNbBits) return largestBits;
112591 +    /* there are several too large elements (at least >= 2) */
112592 +    {   int totalCost = 0;
112593 +        const U32 baseCost = 1 << (largestBits - maxNbBits);
112594 +        int n = (int)lastNonNull;
112596 +        /* Adjust any ranks > maxNbBits to maxNbBits.
112597 +         * Compute totalCost, which is how far the sum of the ranks is
112598 +         * we are over 2^largestBits after adjust the offending ranks.
112599 +         */
112600 +        while (huffNode[n].nbBits > maxNbBits) {
112601 +            totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
112602 +            huffNode[n].nbBits = (BYTE)maxNbBits;
112603 +            n--;
112604 +        }
112605 +        /* n stops at huffNode[n].nbBits <= maxNbBits */
112606 +        assert(huffNode[n].nbBits <= maxNbBits);
112607 +        /* n end at index of smallest symbol using < maxNbBits */
112608 +        while (huffNode[n].nbBits == maxNbBits) --n;
112610 +        /* renorm totalCost from 2^largestBits to 2^maxNbBits
112611 +         * note : totalCost is necessarily a multiple of baseCost */
112612 +        assert((totalCost & (baseCost - 1)) == 0);
112613 +        totalCost >>= (largestBits - maxNbBits);
112614 +        assert(totalCost > 0);
112616 +        /* repay normalized cost */
112617 +        {   U32 const noSymbol = 0xF0F0F0F0;
112618 +            U32 rankLast[HUF_TABLELOG_MAX+2];
112620 +            /* Get pos of last (smallest = lowest cum. count) symbol per rank */
112621 +            ZSTD_memset(rankLast, 0xF0, sizeof(rankLast));
112622 +            {   U32 currentNbBits = maxNbBits;
112623 +                int pos;
112624 +                for (pos=n ; pos >= 0; pos--) {
112625 +                    if (huffNode[pos].nbBits >= currentNbBits) continue;
112626 +                    currentNbBits = huffNode[pos].nbBits;   /* < maxNbBits */
112627 +                    rankLast[maxNbBits-currentNbBits] = (U32)pos;
112628 +            }   }
112630 +            while (totalCost > 0) {
112631 +                /* Try to reduce the next power of 2 above totalCost because we
112632 +                 * gain back half the rank.
112633 +                 */
112634 +                U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1;
112635 +                for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
112636 +                    U32 const highPos = rankLast[nBitsToDecrease];
112637 +                    U32 const lowPos = rankLast[nBitsToDecrease-1];
112638 +                    if (highPos == noSymbol) continue;
112639 +                    /* Decrease highPos if no symbols of lowPos or if it is
112640 +                     * not cheaper to remove 2 lowPos than highPos.
112641 +                     */
112642 +                    if (lowPos == noSymbol) break;
112643 +                    {   U32 const highTotal = huffNode[highPos].count;
112644 +                        U32 const lowTotal = 2 * huffNode[lowPos].count;
112645 +                        if (highTotal <= lowTotal) break;
112646 +                }   }
112647 +                /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
112648 +                assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1);
112649 +                /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
112650 +                while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
112651 +                    nBitsToDecrease++;
112652 +                assert(rankLast[nBitsToDecrease] != noSymbol);
112653 +                /* Increase the number of bits to gain back half the rank cost. */
112654 +                totalCost -= 1 << (nBitsToDecrease-1);
112655 +                huffNode[rankLast[nBitsToDecrease]].nbBits++;
112657 +                /* Fix up the new rank.
112658 +                 * If the new rank was empty, this symbol is now its smallest.
112659 +                 * Otherwise, this symbol will be the largest in the new rank so no adjustment.
112660 +                 */
112661 +                if (rankLast[nBitsToDecrease-1] == noSymbol)
112662 +                    rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease];
112663 +                /* Fix up the old rank.
112664 +                 * If the symbol was at position 0, meaning it was the highest weight symbol in the tree,
112665 +                 * it must be the only symbol in its rank, so the old rank now has no symbols.
112666 +                 * Otherwise, since the Huffman nodes are sorted by count, the previous position is now
112667 +                 * the smallest node in the rank. If the previous position belongs to a different rank,
112668 +                 * then the rank is now empty.
112669 +                 */
112670 +                if (rankLast[nBitsToDecrease] == 0)    /* special case, reached largest symbol */
112671 +                    rankLast[nBitsToDecrease] = noSymbol;
112672 +                else {
112673 +                    rankLast[nBitsToDecrease]--;
112674 +                    if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
112675 +                        rankLast[nBitsToDecrease] = noSymbol;   /* this rank is now empty */
112676 +                }
112677 +            }   /* while (totalCost > 0) */
112679 +            /* If we've removed too much weight, then we have to add it back.
112680 +             * To avoid overshooting again, we only adjust the smallest rank.
112681 +             * We take the largest nodes from the lowest rank 0 and move them
112682 +             * to rank 1. There's guaranteed to be enough rank 0 symbols because
112683 +             * TODO.
112684 +             */
112685 +            while (totalCost < 0) {  /* Sometimes, cost correction overshoot */
112686 +                /* special case : no rank 1 symbol (using maxNbBits-1);
112687 +                 * let's create one from largest rank 0 (using maxNbBits).
112688 +                 */
112689 +                if (rankLast[1] == noSymbol) {
112690 +                    while (huffNode[n].nbBits == maxNbBits) n--;
112691 +                    huffNode[n+1].nbBits--;
112692 +                    assert(n >= 0);
112693 +                    rankLast[1] = (U32)(n+1);
112694 +                    totalCost++;
112695 +                    continue;
112696 +                }
112697 +                huffNode[ rankLast[1] + 1 ].nbBits--;
112698 +                rankLast[1]++;
112699 +                totalCost ++;
112700 +            }
112701 +        }   /* repay normalized cost */
112702 +    }   /* there are several too large elements (at least >= 2) */
112704 +    return maxNbBits;
112707 +typedef struct {
112708 +    U32 base;
112709 +    U32 curr;
112710 +} rankPos;
112712 +typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
112714 +#define RANK_POSITION_TABLE_SIZE 32
112716 +typedef struct {
112717 +  huffNodeTable huffNodeTbl;
112718 +  rankPos rankPosition[RANK_POSITION_TABLE_SIZE];
112719 +} HUF_buildCTable_wksp_tables;
112722 + * HUF_sort():
112723 + * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order.
112725 + * @param[out] huffNode       Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled.
112726 + *                            Must have (maxSymbolValue + 1) entries.
112727 + * @param[in]  count          Histogram of the symbols.
112728 + * @param[in]  maxSymbolValue Maximum symbol value.
112729 + * @param      rankPosition   This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries.
112730 + */
112731 +static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition)
112733 +    int n;
112734 +    int const maxSymbolValue1 = (int)maxSymbolValue + 1;
112736 +    /* Compute base and set curr to base.
112737 +     * For symbol s let lowerRank = BIT_highbit32(count[n]+1) and rank = lowerRank + 1.
112738 +     * Then 2^lowerRank <= count[n]+1 <= 2^rank.
112739 +     * We attribute each symbol to lowerRank's base value, because we want to know where
112740 +     * each rank begins in the output, so for rank R we want to count ranks R+1 and above.
112741 +     */
112742 +    ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
112743 +    for (n = 0; n < maxSymbolValue1; ++n) {
112744 +        U32 lowerRank = BIT_highbit32(count[n] + 1);
112745 +        rankPosition[lowerRank].base++;
112746 +    }
112747 +    assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);
112748 +    for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) {
112749 +        rankPosition[n-1].base += rankPosition[n].base;
112750 +        rankPosition[n-1].curr = rankPosition[n-1].base;
112751 +    }
112752 +    /* Sort */
112753 +    for (n = 0; n < maxSymbolValue1; ++n) {
112754 +        U32 const c = count[n];
112755 +        U32 const r = BIT_highbit32(c+1) + 1;
112756 +        U32 pos = rankPosition[r].curr++;
112757 +        /* Insert into the correct position in the rank.
112758 +         * We have at most 256 symbols, so this insertion should be fine.
112759 +         */
112760 +        while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) {
112761 +            huffNode[pos] = huffNode[pos-1];
112762 +            pos--;
112763 +        }
112764 +        huffNode[pos].count = c;
112765 +        huffNode[pos].byte  = (BYTE)n;
112766 +    }
112770 +/** HUF_buildCTable_wksp() :
112771 + *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
112772 + *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables).
112773 + */
112774 +#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
112776 +/* HUF_buildTree():
112777 + * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree.
112779 + * @param huffNode        The array sorted by HUF_sort(). Builds the Huffman tree in this array.
112780 + * @param maxSymbolValue  The maximum symbol value.
112781 + * @return                The smallest node in the Huffman tree (by count).
112782 + */
112783 +static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
112785 +    nodeElt* const huffNode0 = huffNode - 1;
112786 +    int nonNullRank;
112787 +    int lowS, lowN;
112788 +    int nodeNb = STARTNODE;
112789 +    int n, nodeRoot;
112790 +    /* init for parents */
112791 +    nonNullRank = (int)maxSymbolValue;
112792 +    while(huffNode[nonNullRank].count == 0) nonNullRank--;
112793 +    lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
112794 +    huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
112795 +    huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb;
112796 +    nodeNb++; lowS-=2;
112797 +    for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
112798 +    huffNode0[0].count = (U32)(1U<<31);  /* fake entry, strong barrier */
112800 +    /* create parents */
112801 +    while (nodeNb <= nodeRoot) {
112802 +        int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
112803 +        int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
112804 +        huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
112805 +        huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb;
112806 +        nodeNb++;
112807 +    }
112809 +    /* distribute weights (unlimited tree height) */
112810 +    huffNode[nodeRoot].nbBits = 0;
112811 +    for (n=nodeRoot-1; n>=STARTNODE; n--)
112812 +        huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
112813 +    for (n=0; n<=nonNullRank; n++)
112814 +        huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
112816 +    return nonNullRank;
112820 + * HUF_buildCTableFromTree():
112821 + * Build the CTable given the Huffman tree in huffNode.
112823 + * @param[out] CTable         The output Huffman CTable.
112824 + * @param      huffNode       The Huffman tree.
112825 + * @param      nonNullRank    The last and smallest node in the Huffman tree.
112826 + * @param      maxSymbolValue The maximum symbol value.
112827 + * @param      maxNbBits      The exact maximum number of bits used in the Huffman tree.
112828 + */
112829 +static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits)
112831 +    /* fill result into ctable (val, nbBits) */
112832 +    int n;
112833 +    U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
112834 +    U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
112835 +    int const alphabetSize = (int)(maxSymbolValue + 1);
112836 +    for (n=0; n<=nonNullRank; n++)
112837 +        nbPerRank[huffNode[n].nbBits]++;
112838 +    /* determine starting value per rank */
112839 +    {   U16 min = 0;
112840 +        for (n=(int)maxNbBits; n>0; n--) {
112841 +            valPerRank[n] = min;      /* get starting value within each rank */
112842 +            min += nbPerRank[n];
112843 +            min >>= 1;
112844 +    }   }
112845 +    for (n=0; n<alphabetSize; n++)
112846 +        CTable[huffNode[n].byte].nbBits = huffNode[n].nbBits;   /* push nbBits per symbol, symbol order */
112847 +    for (n=0; n<alphabetSize; n++)
112848 +        CTable[n].val = valPerRank[CTable[n].nbBits]++;   /* assign value within rank, symbol order */
112851 +size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
112853 +    HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)workSpace;
112854 +    nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
112855 +    nodeElt* const huffNode = huffNode0+1;
112856 +    int nonNullRank;
112858 +    /* safety checks */
112859 +    if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
112860 +    if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
112861 +      return ERROR(workSpace_tooSmall);
112862 +    if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
112863 +    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
112864 +      return ERROR(maxSymbolValue_tooLarge);
112865 +    ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable));
112867 +    /* sort, decreasing order */
112868 +    HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
112870 +    /* build tree */
112871 +    nonNullRank = HUF_buildTree(huffNode, maxSymbolValue);
112873 +    /* enforce maxTableLog */
112874 +    maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
112875 +    if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC);   /* check fit into table */
112877 +    HUF_buildCTableFromTree(tree, huffNode, nonNullRank, maxSymbolValue, maxNbBits);
112879 +    return maxNbBits;
112882 +size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
112884 +    size_t nbBits = 0;
112885 +    int s;
112886 +    for (s = 0; s <= (int)maxSymbolValue; ++s) {
112887 +        nbBits += CTable[s].nbBits * count[s];
112888 +    }
112889 +    return nbBits >> 3;
112892 +int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
112893 +  int bad = 0;
112894 +  int s;
112895 +  for (s = 0; s <= (int)maxSymbolValue; ++s) {
112896 +    bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
112897 +  }
112898 +  return !bad;
112901 +size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
112903 +FORCE_INLINE_TEMPLATE void
112904 +HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
112906 +    BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
112909 +#define HUF_FLUSHBITS(s)  BIT_flushBits(s)
112911 +#define HUF_FLUSHBITS_1(stream) \
112912 +    if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
112914 +#define HUF_FLUSHBITS_2(stream) \
112915 +    if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)
112917 +FORCE_INLINE_TEMPLATE size_t
112918 +HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
112919 +                                   const void* src, size_t srcSize,
112920 +                                   const HUF_CElt* CTable)
112922 +    const BYTE* ip = (const BYTE*) src;
112923 +    BYTE* const ostart = (BYTE*)dst;
112924 +    BYTE* const oend = ostart + dstSize;
112925 +    BYTE* op = ostart;
112926 +    size_t n;
112927 +    BIT_CStream_t bitC;
112929 +    /* init */
112930 +    if (dstSize < 8) return 0;   /* not enough space to compress */
112931 +    { size_t const initErr = BIT_initCStream(&bitC, op, (size_t)(oend-op));
112932 +      if (HUF_isError(initErr)) return 0; }
112934 +    n = srcSize & ~3;  /* join to mod 4 */
112935 +    switch (srcSize & 3)
112936 +    {
112937 +        case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
112938 +                 HUF_FLUSHBITS_2(&bitC);
112939 +                /* fall-through */
112940 +        case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
112941 +                 HUF_FLUSHBITS_1(&bitC);
112942 +                /* fall-through */
112943 +        case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
112944 +                 HUF_FLUSHBITS(&bitC);
112945 +                /* fall-through */
112946 +        case 0 : /* fall-through */
112947 +        default: break;
112948 +    }
112950 +    for (; n>0; n-=4) {  /* note : n&3==0 at this stage */
112951 +        HUF_encodeSymbol(&bitC, ip[n- 1], CTable);
112952 +        HUF_FLUSHBITS_1(&bitC);
112953 +        HUF_encodeSymbol(&bitC, ip[n- 2], CTable);
112954 +        HUF_FLUSHBITS_2(&bitC);
112955 +        HUF_encodeSymbol(&bitC, ip[n- 3], CTable);
112956 +        HUF_FLUSHBITS_1(&bitC);
112957 +        HUF_encodeSymbol(&bitC, ip[n- 4], CTable);
112958 +        HUF_FLUSHBITS(&bitC);
112959 +    }
112961 +    return BIT_closeCStream(&bitC);
112964 +#if DYNAMIC_BMI2
112966 +static TARGET_ATTRIBUTE("bmi2") size_t
112967 +HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
112968 +                                   const void* src, size_t srcSize,
112969 +                                   const HUF_CElt* CTable)
112971 +    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
112974 +static size_t
112975 +HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
112976 +                                      const void* src, size_t srcSize,
112977 +                                      const HUF_CElt* CTable)
112979 +    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
112982 +static size_t
112983 +HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
112984 +                              const void* src, size_t srcSize,
112985 +                              const HUF_CElt* CTable, const int bmi2)
112987 +    if (bmi2) {
112988 +        return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
112989 +    }
112990 +    return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
112993 +#else
112995 +static size_t
112996 +HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
112997 +                              const void* src, size_t srcSize,
112998 +                              const HUF_CElt* CTable, const int bmi2)
113000 +    (void)bmi2;
113001 +    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
113004 +#endif
113006 +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
113008 +    return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
113012 +static size_t
113013 +HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
113014 +                              const void* src, size_t srcSize,
113015 +                              const HUF_CElt* CTable, int bmi2)
113017 +    size_t const segmentSize = (srcSize+3)/4;   /* first 3 segments */
113018 +    const BYTE* ip = (const BYTE*) src;
113019 +    const BYTE* const iend = ip + srcSize;
113020 +    BYTE* const ostart = (BYTE*) dst;
113021 +    BYTE* const oend = ostart + dstSize;
113022 +    BYTE* op = ostart;
113024 +    if (dstSize < 6 + 1 + 1 + 1 + 8) return 0;   /* minimum space to compress successfully */
113025 +    if (srcSize < 12) return 0;   /* no saving possible : too small input */
113026 +    op += 6;   /* jumpTable */
113028 +    assert(op <= oend);
113029 +    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
113030 +        if (cSize==0) return 0;
113031 +        assert(cSize <= 65535);
113032 +        MEM_writeLE16(ostart, (U16)cSize);
113033 +        op += cSize;
113034 +    }
113036 +    ip += segmentSize;
113037 +    assert(op <= oend);
113038 +    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
113039 +        if (cSize==0) return 0;
113040 +        assert(cSize <= 65535);
113041 +        MEM_writeLE16(ostart+2, (U16)cSize);
113042 +        op += cSize;
113043 +    }
113045 +    ip += segmentSize;
113046 +    assert(op <= oend);
113047 +    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
113048 +        if (cSize==0) return 0;
113049 +        assert(cSize <= 65535);
113050 +        MEM_writeLE16(ostart+4, (U16)cSize);
113051 +        op += cSize;
113052 +    }
113054 +    ip += segmentSize;
113055 +    assert(op <= oend);
113056 +    assert(ip <= iend);
113057 +    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) );
113058 +        if (cSize==0) return 0;
113059 +        op += cSize;
113060 +    }
113062 +    return (size_t)(op-ostart);
113065 +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
113067 +    return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
113070 +typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
113072 +static size_t HUF_compressCTable_internal(
113073 +                BYTE* const ostart, BYTE* op, BYTE* const oend,
113074 +                const void* src, size_t srcSize,
113075 +                HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
113077 +    size_t const cSize = (nbStreams==HUF_singleStream) ?
113078 +                         HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) :
113079 +                         HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2);
113080 +    if (HUF_isError(cSize)) { return cSize; }
113081 +    if (cSize==0) { return 0; }   /* uncompressible */
113082 +    op += cSize;
113083 +    /* check compressibility */
113084 +    assert(op >= ostart);
113085 +    if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
113086 +    return (size_t)(op-ostart);
113089 +typedef struct {
113090 +    unsigned count[HUF_SYMBOLVALUE_MAX + 1];
113091 +    HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
113092 +    union {
113093 +        HUF_buildCTable_wksp_tables buildCTable_wksp;
113094 +        HUF_WriteCTableWksp writeCTable_wksp;
113095 +    } wksps;
113096 +} HUF_compress_tables_t;
113098 +/* HUF_compress_internal() :
113099 + * `workSpace_align4` must be aligned on 4-bytes boundaries,
113100 + * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U32 unsigned */
113101 +static size_t
113102 +HUF_compress_internal (void* dst, size_t dstSize,
113103 +                 const void* src, size_t srcSize,
113104 +                       unsigned maxSymbolValue, unsigned huffLog,
113105 +                       HUF_nbStreams_e nbStreams,
113106 +                       void* workSpace_align4, size_t wkspSize,
113107 +                       HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
113108 +                 const int bmi2)
113110 +    HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace_align4;
113111 +    BYTE* const ostart = (BYTE*)dst;
113112 +    BYTE* const oend = ostart + dstSize;
113113 +    BYTE* op = ostart;
113115 +    HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE);
113116 +    assert(((size_t)workSpace_align4 & 3) == 0);   /* must be aligned on 4-bytes boundaries */
113118 +    /* checks & inits */
113119 +    if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);
113120 +    if (!srcSize) return 0;  /* Uncompressed */
113121 +    if (!dstSize) return 0;  /* cannot fit anything within dst budget */
113122 +    if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);   /* current block size limit */
113123 +    if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
113124 +    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
113125 +    if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
113126 +    if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
113128 +    /* Heuristic : If old table is valid, use it for small inputs */
113129 +    if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
113130 +        return HUF_compressCTable_internal(ostart, op, oend,
113131 +                                           src, srcSize,
113132 +                                           nbStreams, oldHufTable, bmi2);
113133 +    }
113135 +    /* Scan input and build symbol stats */
113136 +    {   CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace_align4, wkspSize) );
113137 +        if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; }   /* single symbol, rle */
113138 +        if (largest <= (srcSize >> 7)+4) return 0;   /* heuristic : probably not compressible enough */
113139 +    }
113141 +    /* Check validity of previous table */
113142 +    if ( repeat
113143 +      && *repeat == HUF_repeat_check
113144 +      && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
113145 +        *repeat = HUF_repeat_none;
113146 +    }
113147 +    /* Heuristic : use existing table for small inputs */
113148 +    if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
113149 +        return HUF_compressCTable_internal(ostart, op, oend,
113150 +                                           src, srcSize,
113151 +                                           nbStreams, oldHufTable, bmi2);
113152 +    }
113154 +    /* Build Huffman Tree */
113155 +    huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
113156 +    {   size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
113157 +                                            maxSymbolValue, huffLog,
113158 +                                            &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
113159 +        CHECK_F(maxBits);
113160 +        huffLog = (U32)maxBits;
113161 +        /* Zero unused symbols in CTable, so we can check it for validity */
113162 +        ZSTD_memset(table->CTable + (maxSymbolValue + 1), 0,
113163 +               sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));
113164 +    }
113166 +    /* Write table description header */
113167 +    {   CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog,
113168 +                                              &table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) );
113169 +        /* Check if using previous huffman table is beneficial */
113170 +        if (repeat && *repeat != HUF_repeat_none) {
113171 +            size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
113172 +            size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
113173 +            if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
113174 +                return HUF_compressCTable_internal(ostart, op, oend,
113175 +                                                   src, srcSize,
113176 +                                                   nbStreams, oldHufTable, bmi2);
113177 +        }   }
113179 +        /* Use the new huffman table */
113180 +        if (hSize + 12ul >= srcSize) { return 0; }
113181 +        op += hSize;
113182 +        if (repeat) { *repeat = HUF_repeat_none; }
113183 +        if (oldHufTable)
113184 +            ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable));  /* Save new table */
113185 +    }
113186 +    return HUF_compressCTable_internal(ostart, op, oend,
113187 +                                       src, srcSize,
113188 +                                       nbStreams, table->CTable, bmi2);
113192 +size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
113193 +                      const void* src, size_t srcSize,
113194 +                      unsigned maxSymbolValue, unsigned huffLog,
113195 +                      void* workSpace, size_t wkspSize)
113197 +    return HUF_compress_internal(dst, dstSize, src, srcSize,
113198 +                                 maxSymbolValue, huffLog, HUF_singleStream,
113199 +                                 workSpace, wkspSize,
113200 +                                 NULL, NULL, 0, 0 /*bmi2*/);
113203 +size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
113204 +                      const void* src, size_t srcSize,
113205 +                      unsigned maxSymbolValue, unsigned huffLog,
113206 +                      void* workSpace, size_t wkspSize,
113207 +                      HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
113209 +    return HUF_compress_internal(dst, dstSize, src, srcSize,
113210 +                                 maxSymbolValue, huffLog, HUF_singleStream,
113211 +                                 workSpace, wkspSize, hufTable,
113212 +                                 repeat, preferRepeat, bmi2);
113215 +/* HUF_compress4X_repeat():
113216 + * compress input using 4 streams.
113217 + * provide workspace to generate compression tables */
113218 +size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
113219 +                      const void* src, size_t srcSize,
113220 +                      unsigned maxSymbolValue, unsigned huffLog,
113221 +                      void* workSpace, size_t wkspSize)
113223 +    return HUF_compress_internal(dst, dstSize, src, srcSize,
113224 +                                 maxSymbolValue, huffLog, HUF_fourStreams,
113225 +                                 workSpace, wkspSize,
113226 +                                 NULL, NULL, 0, 0 /*bmi2*/);
113229 +/* HUF_compress4X_repeat():
113230 + * compress input using 4 streams.
113231 + * re-use an existing huffman compression table */
113232 +size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
113233 +                      const void* src, size_t srcSize,
113234 +                      unsigned maxSymbolValue, unsigned huffLog,
113235 +                      void* workSpace, size_t wkspSize,
113236 +                      HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
113238 +    return HUF_compress_internal(dst, dstSize, src, srcSize,
113239 +                                 maxSymbolValue, huffLog, HUF_fourStreams,
113240 +                                 workSpace, wkspSize,
113241 +                                 hufTable, repeat, preferRepeat, bmi2);
113243 diff --git a/lib/zstd/compress/zstd_compress.c b/lib/zstd/compress/zstd_compress.c
113244 new file mode 100644
113245 index 000000000000..78aa14c50dd2
113246 --- /dev/null
113247 +++ b/lib/zstd/compress/zstd_compress.c
113248 @@ -0,0 +1,5105 @@
113250 + * Copyright (c) Yann Collet, Facebook, Inc.
113251 + * All rights reserved.
113253 + * This source code is licensed under both the BSD-style license (found in the
113254 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
113255 + * in the COPYING file in the root directory of this source tree).
113256 + * You may select, at your option, one of the above-listed licenses.
113257 + */
113259 +/*-*************************************
113260 +*  Dependencies
113261 +***************************************/
113262 +#include "../common/zstd_deps.h"  /* INT_MAX, ZSTD_memset, ZSTD_memcpy */
113263 +#include "../common/cpu.h"
113264 +#include "../common/mem.h"
113265 +#include "hist.h"           /* HIST_countFast_wksp */
113266 +#define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */
113267 +#include "../common/fse.h"
113268 +#define HUF_STATIC_LINKING_ONLY
113269 +#include "../common/huf.h"
113270 +#include "zstd_compress_internal.h"
113271 +#include "zstd_compress_sequences.h"
113272 +#include "zstd_compress_literals.h"
113273 +#include "zstd_fast.h"
113274 +#include "zstd_double_fast.h"
113275 +#include "zstd_lazy.h"
113276 +#include "zstd_opt.h"
113277 +#include "zstd_ldm.h"
113278 +#include "zstd_compress_superblock.h"
113280 +/* ***************************************************************
113281 +*  Tuning parameters
113282 +*****************************************************************/
113284 + * COMPRESS_HEAPMODE :
113285 + * Select how default decompression function ZSTD_compress() allocates its context,
113286 + * on stack (0, default), or into heap (1).
113287 + * Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected.
113288 + */
113291 +/*-*************************************
113292 +*  Helper functions
113293 +***************************************/
113294 +/* ZSTD_compressBound()
113295 + * Note that the result from this function is only compatible with the "normal"
113296 + * full-block strategy.
113297 + * When there are a lot of small blocks due to frequent flush in streaming mode
113298 + * the overhead of headers can make the compressed data to be larger than the
113299 + * return value of ZSTD_compressBound().
113300 + */
113301 +size_t ZSTD_compressBound(size_t srcSize) {
113302 +    return ZSTD_COMPRESSBOUND(srcSize);
113306 +/*-*************************************
113307 +*  Context memory management
113308 +***************************************/
113309 +struct ZSTD_CDict_s {
113310 +    const void* dictContent;
113311 +    size_t dictContentSize;
113312 +    ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */
113313 +    U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
113314 +    ZSTD_cwksp workspace;
113315 +    ZSTD_matchState_t matchState;
113316 +    ZSTD_compressedBlockState_t cBlockState;
113317 +    ZSTD_customMem customMem;
113318 +    U32 dictID;
113319 +    int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
113320 +};  /* typedef'd to ZSTD_CDict within "zstd.h" */
113322 +ZSTD_CCtx* ZSTD_createCCtx(void)
113324 +    return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
113327 +static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
113329 +    assert(cctx != NULL);
113330 +    ZSTD_memset(cctx, 0, sizeof(*cctx));
113331 +    cctx->customMem = memManager;
113332 +    cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
113333 +    {   size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
113334 +        assert(!ZSTD_isError(err));
113335 +        (void)err;
113336 +    }
113339 +ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
113341 +    ZSTD_STATIC_ASSERT(zcss_init==0);
113342 +    ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
113343 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
113344 +    {   ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem);
113345 +        if (!cctx) return NULL;
113346 +        ZSTD_initCCtx(cctx, customMem);
113347 +        return cctx;
113348 +    }
113351 +ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
113353 +    ZSTD_cwksp ws;
113354 +    ZSTD_CCtx* cctx;
113355 +    if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL;  /* minimum size */
113356 +    if ((size_t)workspace & 7) return NULL;  /* must be 8-aligned */
113357 +    ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
113359 +    cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
113360 +    if (cctx == NULL) return NULL;
113362 +    ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx));
113363 +    ZSTD_cwksp_move(&cctx->workspace, &ws);
113364 +    cctx->staticSize = workspaceSize;
113366 +    /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
113367 +    if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
113368 +    cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
113369 +    cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
113370 +    cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
113371 +    cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
113372 +    return cctx;
113376 + * Clears and frees all of the dictionaries in the CCtx.
113377 + */
113378 +static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
113380 +    ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem);
113381 +    ZSTD_freeCDict(cctx->localDict.cdict);
113382 +    ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict));
113383 +    ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
113384 +    cctx->cdict = NULL;
113387 +static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
113389 +    size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
113390 +    size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
113391 +    return bufferSize + cdictSize;
113394 +static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
113396 +    assert(cctx != NULL);
113397 +    assert(cctx->staticSize == 0);
113398 +    ZSTD_clearAllDicts(cctx);
113399 +    ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);
113402 +size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
113404 +    if (cctx==NULL) return 0;   /* support free on NULL */
113405 +    RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
113406 +                    "not compatible with static CCtx");
113407 +    {
113408 +        int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
113409 +        ZSTD_freeCCtxContent(cctx);
113410 +        if (!cctxInWorkspace) {
113411 +            ZSTD_customFree(cctx, cctx->customMem);
113412 +        }
113413 +    }
113414 +    return 0;
113418 +static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
113420 +    (void)cctx;
113421 +    return 0;
113425 +size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
113427 +    if (cctx==NULL) return 0;   /* support sizeof on NULL */
113428 +    /* cctx may be in the workspace */
113429 +    return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
113430 +           + ZSTD_cwksp_sizeof(&cctx->workspace)
113431 +           + ZSTD_sizeof_localDict(cctx->localDict)
113432 +           + ZSTD_sizeof_mtctx(cctx);
113435 +size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
113437 +    return ZSTD_sizeof_CCtx(zcs);  /* same object */
113440 +/* private API call, for dictBuilder only */
113441 +const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
113443 +/* Returns 1 if compression parameters are such that we should
113444 + * enable long distance matching (wlog >= 27, strategy >= btopt).
113445 + * Returns 0 otherwise.
113446 + */
113447 +static U32 ZSTD_CParams_shouldEnableLdm(const ZSTD_compressionParameters* const cParams) {
113448 +    return cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27;
113451 +static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
113452 +        ZSTD_compressionParameters cParams)
113454 +    ZSTD_CCtx_params cctxParams;
113455 +    /* should not matter, as all cParams are presumed properly defined */
113456 +    ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT);
113457 +    cctxParams.cParams = cParams;
113459 +    if (ZSTD_CParams_shouldEnableLdm(&cParams)) {
113460 +        DEBUGLOG(4, "ZSTD_makeCCtxParamsFromCParams(): Including LDM into cctx params");
113461 +        cctxParams.ldmParams.enableLdm = 1;
113462 +        /* LDM is enabled by default for optimal parser and window size >= 128MB */
113463 +        ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams);
113464 +        assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog);
113465 +        assert(cctxParams.ldmParams.hashRateLog < 32);
113466 +    }
113468 +    assert(!ZSTD_checkCParams(cParams));
113469 +    return cctxParams;
113472 +static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
113473 +        ZSTD_customMem customMem)
113475 +    ZSTD_CCtx_params* params;
113476 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
113477 +    params = (ZSTD_CCtx_params*)ZSTD_customCalloc(
113478 +            sizeof(ZSTD_CCtx_params), customMem);
113479 +    if (!params) { return NULL; }
113480 +    ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
113481 +    params->customMem = customMem;
113482 +    return params;
113485 +ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
113487 +    return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
113490 +size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
113492 +    if (params == NULL) { return 0; }
113493 +    ZSTD_customFree(params, params->customMem);
113494 +    return 0;
113497 +size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
113499 +    return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
113502 +size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
113503 +    RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
113504 +    ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
113505 +    cctxParams->compressionLevel = compressionLevel;
113506 +    cctxParams->fParams.contentSizeFlag = 1;
113507 +    return 0;
113510 +#define ZSTD_NO_CLEVEL 0
113513 + * Initializes the cctxParams from params and compressionLevel.
113514 + * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL.
113515 + */
113516 +static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel)
113518 +    assert(!ZSTD_checkCParams(params->cParams));
113519 +    ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
113520 +    cctxParams->cParams = params->cParams;
113521 +    cctxParams->fParams = params->fParams;
113522 +    /* Should not matter, as all cParams are presumed properly defined.
113523 +     * But, set it for tracing anyway.
113524 +     */
113525 +    cctxParams->compressionLevel = compressionLevel;
113528 +size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
113530 +    RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
113531 +    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
113532 +    ZSTD_CCtxParams_init_internal(cctxParams, &params, ZSTD_NO_CLEVEL);
113533 +    return 0;
113537 + * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.
113538 + * @param param Validated zstd parameters.
113539 + */
113540 +static void ZSTD_CCtxParams_setZstdParams(
113541 +        ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
113543 +    assert(!ZSTD_checkCParams(params->cParams));
113544 +    cctxParams->cParams = params->cParams;
113545 +    cctxParams->fParams = params->fParams;
113546 +    /* Should not matter, as all cParams are presumed properly defined.
113547 +     * But, set it for tracing anyway.
113548 +     */
113549 +    cctxParams->compressionLevel = ZSTD_NO_CLEVEL;
113552 +ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
113554 +    ZSTD_bounds bounds = { 0, 0, 0 };
113556 +    switch(param)
113557 +    {
113558 +    case ZSTD_c_compressionLevel:
113559 +        bounds.lowerBound = ZSTD_minCLevel();
113560 +        bounds.upperBound = ZSTD_maxCLevel();
113561 +        return bounds;
113563 +    case ZSTD_c_windowLog:
113564 +        bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
113565 +        bounds.upperBound = ZSTD_WINDOWLOG_MAX;
113566 +        return bounds;
113568 +    case ZSTD_c_hashLog:
113569 +        bounds.lowerBound = ZSTD_HASHLOG_MIN;
113570 +        bounds.upperBound = ZSTD_HASHLOG_MAX;
113571 +        return bounds;
113573 +    case ZSTD_c_chainLog:
113574 +        bounds.lowerBound = ZSTD_CHAINLOG_MIN;
113575 +        bounds.upperBound = ZSTD_CHAINLOG_MAX;
113576 +        return bounds;
113578 +    case ZSTD_c_searchLog:
113579 +        bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
113580 +        bounds.upperBound = ZSTD_SEARCHLOG_MAX;
113581 +        return bounds;
113583 +    case ZSTD_c_minMatch:
113584 +        bounds.lowerBound = ZSTD_MINMATCH_MIN;
113585 +        bounds.upperBound = ZSTD_MINMATCH_MAX;
113586 +        return bounds;
113588 +    case ZSTD_c_targetLength:
113589 +        bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
113590 +        bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
113591 +        return bounds;
113593 +    case ZSTD_c_strategy:
113594 +        bounds.lowerBound = ZSTD_STRATEGY_MIN;
113595 +        bounds.upperBound = ZSTD_STRATEGY_MAX;
113596 +        return bounds;
113598 +    case ZSTD_c_contentSizeFlag:
113599 +        bounds.lowerBound = 0;
113600 +        bounds.upperBound = 1;
113601 +        return bounds;
113603 +    case ZSTD_c_checksumFlag:
113604 +        bounds.lowerBound = 0;
113605 +        bounds.upperBound = 1;
113606 +        return bounds;
113608 +    case ZSTD_c_dictIDFlag:
113609 +        bounds.lowerBound = 0;
113610 +        bounds.upperBound = 1;
113611 +        return bounds;
113613 +    case ZSTD_c_nbWorkers:
113614 +        bounds.lowerBound = 0;
113615 +        bounds.upperBound = 0;
113616 +        return bounds;
113618 +    case ZSTD_c_jobSize:
113619 +        bounds.lowerBound = 0;
113620 +        bounds.upperBound = 0;
113621 +        return bounds;
113623 +    case ZSTD_c_overlapLog:
113624 +        bounds.lowerBound = 0;
113625 +        bounds.upperBound = 0;
113626 +        return bounds;
113628 +    case ZSTD_c_enableDedicatedDictSearch:
113629 +        bounds.lowerBound = 0;
113630 +        bounds.upperBound = 1;
113631 +        return bounds;
113633 +    case ZSTD_c_enableLongDistanceMatching:
113634 +        bounds.lowerBound = 0;
113635 +        bounds.upperBound = 1;
113636 +        return bounds;
113638 +    case ZSTD_c_ldmHashLog:
113639 +        bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
113640 +        bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
113641 +        return bounds;
113643 +    case ZSTD_c_ldmMinMatch:
113644 +        bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
113645 +        bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
113646 +        return bounds;
113648 +    case ZSTD_c_ldmBucketSizeLog:
113649 +        bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
113650 +        bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
113651 +        return bounds;
113653 +    case ZSTD_c_ldmHashRateLog:
113654 +        bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
113655 +        bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
113656 +        return bounds;
113658 +    /* experimental parameters */
113659 +    case ZSTD_c_rsyncable:
113660 +        bounds.lowerBound = 0;
113661 +        bounds.upperBound = 1;
113662 +        return bounds;
113664 +    case ZSTD_c_forceMaxWindow :
113665 +        bounds.lowerBound = 0;
113666 +        bounds.upperBound = 1;
113667 +        return bounds;
113669 +    case ZSTD_c_format:
113670 +        ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
113671 +        bounds.lowerBound = ZSTD_f_zstd1;
113672 +        bounds.upperBound = ZSTD_f_zstd1_magicless;   /* note : how to ensure at compile time that this is the highest value enum ? */
113673 +        return bounds;
113675 +    case ZSTD_c_forceAttachDict:
113676 +        ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad);
113677 +        bounds.lowerBound = ZSTD_dictDefaultAttach;
113678 +        bounds.upperBound = ZSTD_dictForceLoad;       /* note : how to ensure at compile time that this is the highest value enum ? */
113679 +        return bounds;
113681 +    case ZSTD_c_literalCompressionMode:
113682 +        ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed);
113683 +        bounds.lowerBound = ZSTD_lcm_auto;
113684 +        bounds.upperBound = ZSTD_lcm_uncompressed;
113685 +        return bounds;
113687 +    case ZSTD_c_targetCBlockSize:
113688 +        bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN;
113689 +        bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
113690 +        return bounds;
113692 +    case ZSTD_c_srcSizeHint:
113693 +        bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN;
113694 +        bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;
113695 +        return bounds;
113697 +    case ZSTD_c_stableInBuffer:
113698 +    case ZSTD_c_stableOutBuffer:
113699 +        bounds.lowerBound = (int)ZSTD_bm_buffered;
113700 +        bounds.upperBound = (int)ZSTD_bm_stable;
113701 +        return bounds;
113703 +    case ZSTD_c_blockDelimiters:
113704 +        bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters;
113705 +        bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters;
113706 +        return bounds;
113708 +    case ZSTD_c_validateSequences:
113709 +        bounds.lowerBound = 0;
113710 +        bounds.upperBound = 1;
113711 +        return bounds;
113713 +    default:
113714 +        bounds.error = ERROR(parameter_unsupported);
113715 +        return bounds;
113716 +    }
113719 +/* ZSTD_cParam_clampBounds:
113720 + * Clamps the value into the bounded range.
113721 + */
113722 +static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
113724 +    ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
113725 +    if (ZSTD_isError(bounds.error)) return bounds.error;
113726 +    if (*value < bounds.lowerBound) *value = bounds.lowerBound;
113727 +    if (*value > bounds.upperBound) *value = bounds.upperBound;
113728 +    return 0;
113731 +#define BOUNDCHECK(cParam, val) { \
113732 +    RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
113733 +                    parameter_outOfBound, "Param out of bounds"); \
113737 +static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
113739 +    switch(param)
113740 +    {
113741 +    case ZSTD_c_compressionLevel:
113742 +    case ZSTD_c_hashLog:
113743 +    case ZSTD_c_chainLog:
113744 +    case ZSTD_c_searchLog:
113745 +    case ZSTD_c_minMatch:
113746 +    case ZSTD_c_targetLength:
113747 +    case ZSTD_c_strategy:
113748 +        return 1;
113750 +    case ZSTD_c_format:
113751 +    case ZSTD_c_windowLog:
113752 +    case ZSTD_c_contentSizeFlag:
113753 +    case ZSTD_c_checksumFlag:
113754 +    case ZSTD_c_dictIDFlag:
113755 +    case ZSTD_c_forceMaxWindow :
113756 +    case ZSTD_c_nbWorkers:
113757 +    case ZSTD_c_jobSize:
113758 +    case ZSTD_c_overlapLog:
113759 +    case ZSTD_c_rsyncable:
113760 +    case ZSTD_c_enableDedicatedDictSearch:
113761 +    case ZSTD_c_enableLongDistanceMatching:
113762 +    case ZSTD_c_ldmHashLog:
113763 +    case ZSTD_c_ldmMinMatch:
113764 +    case ZSTD_c_ldmBucketSizeLog:
113765 +    case ZSTD_c_ldmHashRateLog:
113766 +    case ZSTD_c_forceAttachDict:
113767 +    case ZSTD_c_literalCompressionMode:
113768 +    case ZSTD_c_targetCBlockSize:
113769 +    case ZSTD_c_srcSizeHint:
113770 +    case ZSTD_c_stableInBuffer:
113771 +    case ZSTD_c_stableOutBuffer:
113772 +    case ZSTD_c_blockDelimiters:
113773 +    case ZSTD_c_validateSequences:
113774 +    default:
113775 +        return 0;
113776 +    }
113779 +size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
113781 +    DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
113782 +    if (cctx->streamStage != zcss_init) {
113783 +        if (ZSTD_isUpdateAuthorized(param)) {
113784 +            cctx->cParamsChanged = 1;
113785 +        } else {
113786 +            RETURN_ERROR(stage_wrong, "can only set params in ctx init stage");
113787 +    }   }
113789 +    switch(param)
113790 +    {
113791 +    case ZSTD_c_nbWorkers:
113792 +        RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
113793 +                        "MT not compatible with static alloc");
113794 +        break;
113796 +    case ZSTD_c_compressionLevel:
113797 +    case ZSTD_c_windowLog:
113798 +    case ZSTD_c_hashLog:
113799 +    case ZSTD_c_chainLog:
113800 +    case ZSTD_c_searchLog:
113801 +    case ZSTD_c_minMatch:
113802 +    case ZSTD_c_targetLength:
113803 +    case ZSTD_c_strategy:
113804 +    case ZSTD_c_ldmHashRateLog:
113805 +    case ZSTD_c_format:
113806 +    case ZSTD_c_contentSizeFlag:
113807 +    case ZSTD_c_checksumFlag:
113808 +    case ZSTD_c_dictIDFlag:
113809 +    case ZSTD_c_forceMaxWindow:
113810 +    case ZSTD_c_forceAttachDict:
113811 +    case ZSTD_c_literalCompressionMode:
113812 +    case ZSTD_c_jobSize:
113813 +    case ZSTD_c_overlapLog:
113814 +    case ZSTD_c_rsyncable:
113815 +    case ZSTD_c_enableDedicatedDictSearch:
113816 +    case ZSTD_c_enableLongDistanceMatching:
113817 +    case ZSTD_c_ldmHashLog:
113818 +    case ZSTD_c_ldmMinMatch:
113819 +    case ZSTD_c_ldmBucketSizeLog:
113820 +    case ZSTD_c_targetCBlockSize:
113821 +    case ZSTD_c_srcSizeHint:
113822 +    case ZSTD_c_stableInBuffer:
113823 +    case ZSTD_c_stableOutBuffer:
113824 +    case ZSTD_c_blockDelimiters:
113825 +    case ZSTD_c_validateSequences:
113826 +        break;
113828 +    default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
113829 +    }
113830 +    return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
113833 +size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
113834 +                                    ZSTD_cParameter param, int value)
113836 +    DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
113837 +    switch(param)
113838 +    {
113839 +    case ZSTD_c_format :
113840 +        BOUNDCHECK(ZSTD_c_format, value);
113841 +        CCtxParams->format = (ZSTD_format_e)value;
113842 +        return (size_t)CCtxParams->format;
113844 +    case ZSTD_c_compressionLevel : {
113845 +        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
113846 +        if (value == 0)
113847 +            CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
113848 +        else
113849 +            CCtxParams->compressionLevel = value;
113850 +        if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
113851 +        return 0;  /* return type (size_t) cannot represent negative values */
113852 +    }
113854 +    case ZSTD_c_windowLog :
113855 +        if (value!=0)   /* 0 => use default */
113856 +            BOUNDCHECK(ZSTD_c_windowLog, value);
113857 +        CCtxParams->cParams.windowLog = (U32)value;
113858 +        return CCtxParams->cParams.windowLog;
113860 +    case ZSTD_c_hashLog :
113861 +        if (value!=0)   /* 0 => use default */
113862 +            BOUNDCHECK(ZSTD_c_hashLog, value);
113863 +        CCtxParams->cParams.hashLog = (U32)value;
113864 +        return CCtxParams->cParams.hashLog;
113866 +    case ZSTD_c_chainLog :
113867 +        if (value!=0)   /* 0 => use default */
113868 +            BOUNDCHECK(ZSTD_c_chainLog, value);
113869 +        CCtxParams->cParams.chainLog = (U32)value;
113870 +        return CCtxParams->cParams.chainLog;
113872 +    case ZSTD_c_searchLog :
113873 +        if (value!=0)   /* 0 => use default */
113874 +            BOUNDCHECK(ZSTD_c_searchLog, value);
113875 +        CCtxParams->cParams.searchLog = (U32)value;
113876 +        return (size_t)value;
113878 +    case ZSTD_c_minMatch :
113879 +        if (value!=0)   /* 0 => use default */
113880 +            BOUNDCHECK(ZSTD_c_minMatch, value);
113881 +        CCtxParams->cParams.minMatch = value;
113882 +        return CCtxParams->cParams.minMatch;
113884 +    case ZSTD_c_targetLength :
113885 +        BOUNDCHECK(ZSTD_c_targetLength, value);
113886 +        CCtxParams->cParams.targetLength = value;
113887 +        return CCtxParams->cParams.targetLength;
113889 +    case ZSTD_c_strategy :
113890 +        if (value!=0)   /* 0 => use default */
113891 +            BOUNDCHECK(ZSTD_c_strategy, value);
113892 +        CCtxParams->cParams.strategy = (ZSTD_strategy)value;
113893 +        return (size_t)CCtxParams->cParams.strategy;
113895 +    case ZSTD_c_contentSizeFlag :
113896 +        /* Content size written in frame header _when known_ (default:1) */
113897 +        DEBUGLOG(4, "set content size flag = %u", (value!=0));
113898 +        CCtxParams->fParams.contentSizeFlag = value != 0;
113899 +        return CCtxParams->fParams.contentSizeFlag;
113901 +    case ZSTD_c_checksumFlag :
113902 +        /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
113903 +        CCtxParams->fParams.checksumFlag = value != 0;
113904 +        return CCtxParams->fParams.checksumFlag;
113906 +    case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
113907 +        DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
113908 +        CCtxParams->fParams.noDictIDFlag = !value;
113909 +        return !CCtxParams->fParams.noDictIDFlag;
113911 +    case ZSTD_c_forceMaxWindow :
113912 +        CCtxParams->forceWindow = (value != 0);
113913 +        return CCtxParams->forceWindow;
113915 +    case ZSTD_c_forceAttachDict : {
113916 +        const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
113917 +        BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
113918 +        CCtxParams->attachDictPref = pref;
113919 +        return CCtxParams->attachDictPref;
113920 +    }
113922 +    case ZSTD_c_literalCompressionMode : {
113923 +        const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value;
113924 +        BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
113925 +        CCtxParams->literalCompressionMode = lcm;
113926 +        return CCtxParams->literalCompressionMode;
113927 +    }
113929 +    case ZSTD_c_nbWorkers :
113930 +        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
113931 +        return 0;
113933 +    case ZSTD_c_jobSize :
113934 +        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
113935 +        return 0;
113937 +    case ZSTD_c_overlapLog :
113938 +        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
113939 +        return 0;
113941 +    case ZSTD_c_rsyncable :
113942 +        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
113943 +        return 0;
113945 +    case ZSTD_c_enableDedicatedDictSearch :
113946 +        CCtxParams->enableDedicatedDictSearch = (value!=0);
113947 +        return CCtxParams->enableDedicatedDictSearch;
113949 +    case ZSTD_c_enableLongDistanceMatching :
113950 +        CCtxParams->ldmParams.enableLdm = (value!=0);
113951 +        return CCtxParams->ldmParams.enableLdm;
113953 +    case ZSTD_c_ldmHashLog :
113954 +        if (value!=0)   /* 0 ==> auto */
113955 +            BOUNDCHECK(ZSTD_c_ldmHashLog, value);
113956 +        CCtxParams->ldmParams.hashLog = value;
113957 +        return CCtxParams->ldmParams.hashLog;
113959 +    case ZSTD_c_ldmMinMatch :
113960 +        if (value!=0)   /* 0 ==> default */
113961 +            BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
113962 +        CCtxParams->ldmParams.minMatchLength = value;
113963 +        return CCtxParams->ldmParams.minMatchLength;
113965 +    case ZSTD_c_ldmBucketSizeLog :
113966 +        if (value!=0)   /* 0 ==> default */
113967 +            BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
113968 +        CCtxParams->ldmParams.bucketSizeLog = value;
113969 +        return CCtxParams->ldmParams.bucketSizeLog;
113971 +    case ZSTD_c_ldmHashRateLog :
113972 +        RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN,
113973 +                        parameter_outOfBound, "Param out of bounds!");
113974 +        CCtxParams->ldmParams.hashRateLog = value;
113975 +        return CCtxParams->ldmParams.hashRateLog;
113977 +    case ZSTD_c_targetCBlockSize :
113978 +        if (value!=0)   /* 0 ==> default */
113979 +            BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
113980 +        CCtxParams->targetCBlockSize = value;
113981 +        return CCtxParams->targetCBlockSize;
113983 +    case ZSTD_c_srcSizeHint :
113984 +        if (value!=0)    /* 0 ==> default */
113985 +            BOUNDCHECK(ZSTD_c_srcSizeHint, value);
113986 +        CCtxParams->srcSizeHint = value;
113987 +        return CCtxParams->srcSizeHint;
113989 +    case ZSTD_c_stableInBuffer:
113990 +        BOUNDCHECK(ZSTD_c_stableInBuffer, value);
113991 +        CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value;
113992 +        return CCtxParams->inBufferMode;
113994 +    case ZSTD_c_stableOutBuffer:
113995 +        BOUNDCHECK(ZSTD_c_stableOutBuffer, value);
113996 +        CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value;
113997 +        return CCtxParams->outBufferMode;
113999 +    case ZSTD_c_blockDelimiters:
114000 +        BOUNDCHECK(ZSTD_c_blockDelimiters, value);
114001 +        CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value;
114002 +        return CCtxParams->blockDelimiters;
114004 +    case ZSTD_c_validateSequences:
114005 +        BOUNDCHECK(ZSTD_c_validateSequences, value);
114006 +        CCtxParams->validateSequences = value;
114007 +        return CCtxParams->validateSequences;
114009 +    default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
114010 +    }
114013 +size_t ZSTD_CCtx_getParameter(ZSTD_CCtx const* cctx, ZSTD_cParameter param, int* value)
114015 +    return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
114018 +size_t ZSTD_CCtxParams_getParameter(
114019 +        ZSTD_CCtx_params const* CCtxParams, ZSTD_cParameter param, int* value)
114021 +    switch(param)
114022 +    {
114023 +    case ZSTD_c_format :
114024 +        *value = CCtxParams->format;
114025 +        break;
114026 +    case ZSTD_c_compressionLevel :
114027 +        *value = CCtxParams->compressionLevel;
114028 +        break;
114029 +    case ZSTD_c_windowLog :
114030 +        *value = (int)CCtxParams->cParams.windowLog;
114031 +        break;
114032 +    case ZSTD_c_hashLog :
114033 +        *value = (int)CCtxParams->cParams.hashLog;
114034 +        break;
114035 +    case ZSTD_c_chainLog :
114036 +        *value = (int)CCtxParams->cParams.chainLog;
114037 +        break;
114038 +    case ZSTD_c_searchLog :
114039 +        *value = CCtxParams->cParams.searchLog;
114040 +        break;
114041 +    case ZSTD_c_minMatch :
114042 +        *value = CCtxParams->cParams.minMatch;
114043 +        break;
114044 +    case ZSTD_c_targetLength :
114045 +        *value = CCtxParams->cParams.targetLength;
114046 +        break;
114047 +    case ZSTD_c_strategy :
114048 +        *value = (unsigned)CCtxParams->cParams.strategy;
114049 +        break;
114050 +    case ZSTD_c_contentSizeFlag :
114051 +        *value = CCtxParams->fParams.contentSizeFlag;
114052 +        break;
114053 +    case ZSTD_c_checksumFlag :
114054 +        *value = CCtxParams->fParams.checksumFlag;
114055 +        break;
114056 +    case ZSTD_c_dictIDFlag :
114057 +        *value = !CCtxParams->fParams.noDictIDFlag;
114058 +        break;
114059 +    case ZSTD_c_forceMaxWindow :
114060 +        *value = CCtxParams->forceWindow;
114061 +        break;
114062 +    case ZSTD_c_forceAttachDict :
114063 +        *value = CCtxParams->attachDictPref;
114064 +        break;
114065 +    case ZSTD_c_literalCompressionMode :
114066 +        *value = CCtxParams->literalCompressionMode;
114067 +        break;
114068 +    case ZSTD_c_nbWorkers :
114069 +        assert(CCtxParams->nbWorkers == 0);
114070 +        *value = CCtxParams->nbWorkers;
114071 +        break;
114072 +    case ZSTD_c_jobSize :
114073 +        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
114074 +    case ZSTD_c_overlapLog :
114075 +        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
114076 +    case ZSTD_c_rsyncable :
114077 +        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
114078 +    case ZSTD_c_enableDedicatedDictSearch :
114079 +        *value = CCtxParams->enableDedicatedDictSearch;
114080 +        break;
114081 +    case ZSTD_c_enableLongDistanceMatching :
114082 +        *value = CCtxParams->ldmParams.enableLdm;
114083 +        break;
114084 +    case ZSTD_c_ldmHashLog :
114085 +        *value = CCtxParams->ldmParams.hashLog;
114086 +        break;
114087 +    case ZSTD_c_ldmMinMatch :
114088 +        *value = CCtxParams->ldmParams.minMatchLength;
114089 +        break;
114090 +    case ZSTD_c_ldmBucketSizeLog :
114091 +        *value = CCtxParams->ldmParams.bucketSizeLog;
114092 +        break;
114093 +    case ZSTD_c_ldmHashRateLog :
114094 +        *value = CCtxParams->ldmParams.hashRateLog;
114095 +        break;
114096 +    case ZSTD_c_targetCBlockSize :
114097 +        *value = (int)CCtxParams->targetCBlockSize;
114098 +        break;
114099 +    case ZSTD_c_srcSizeHint :
114100 +        *value = (int)CCtxParams->srcSizeHint;
114101 +        break;
114102 +    case ZSTD_c_stableInBuffer :
114103 +        *value = (int)CCtxParams->inBufferMode;
114104 +        break;
114105 +    case ZSTD_c_stableOutBuffer :
114106 +        *value = (int)CCtxParams->outBufferMode;
114107 +        break;
114108 +    case ZSTD_c_blockDelimiters :
114109 +        *value = (int)CCtxParams->blockDelimiters;
114110 +        break;
114111 +    case ZSTD_c_validateSequences :
114112 +        *value = (int)CCtxParams->validateSequences;
114113 +        break;
114114 +    default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
114115 +    }
114116 +    return 0;
114119 +/** ZSTD_CCtx_setParametersUsingCCtxParams() :
114120 + *  just applies `params` into `cctx`
114121 + *  no action is performed, parameters are merely stored.
114122 + *  If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
114123 + *    This is possible even if a compression is ongoing.
114124 + *    In which case, new parameters will be applied on the fly, starting with next compression job.
114125 + */
114126 +size_t ZSTD_CCtx_setParametersUsingCCtxParams(
114127 +        ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
114129 +    DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
114130 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
114131 +                    "The context is in the wrong stage!");
114132 +    RETURN_ERROR_IF(cctx->cdict, stage_wrong,
114133 +                    "Can't override parameters with cdict attached (some must "
114134 +                    "be inherited from the cdict).");
114136 +    cctx->requestedParams = *params;
114137 +    return 0;
114140 +ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
114142 +    DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
114143 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
114144 +                    "Can't set pledgedSrcSize when not in init stage.");
114145 +    cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
114146 +    return 0;
114149 +static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(
114150 +        int const compressionLevel,
114151 +        size_t const dictSize);
114152 +static int ZSTD_dedicatedDictSearch_isSupported(
114153 +        const ZSTD_compressionParameters* cParams);
114154 +static void ZSTD_dedicatedDictSearch_revertCParams(
114155 +        ZSTD_compressionParameters* cParams);
114158 + * Initializes the local dict using the requested parameters.
114159 + * NOTE: This does not use the pledged src size, because it may be used for more
114160 + * than one compression.
114161 + */
114162 +static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
114164 +    ZSTD_localDict* const dl = &cctx->localDict;
114165 +    if (dl->dict == NULL) {
114166 +        /* No local dictionary. */
114167 +        assert(dl->dictBuffer == NULL);
114168 +        assert(dl->cdict == NULL);
114169 +        assert(dl->dictSize == 0);
114170 +        return 0;
114171 +    }
114172 +    if (dl->cdict != NULL) {
114173 +        assert(cctx->cdict == dl->cdict);
114174 +        /* Local dictionary already initialized. */
114175 +        return 0;
114176 +    }
114177 +    assert(dl->dictSize > 0);
114178 +    assert(cctx->cdict == NULL);
114179 +    assert(cctx->prefixDict.dict == NULL);
114181 +    dl->cdict = ZSTD_createCDict_advanced2(
114182 +            dl->dict,
114183 +            dl->dictSize,
114184 +            ZSTD_dlm_byRef,
114185 +            dl->dictContentType,
114186 +            &cctx->requestedParams,
114187 +            cctx->customMem);
114188 +    RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed");
114189 +    cctx->cdict = dl->cdict;
114190 +    return 0;
114193 +size_t ZSTD_CCtx_loadDictionary_advanced(
114194 +        ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
114195 +        ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
114197 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
114198 +                    "Can't load a dictionary when ctx is not in init stage.");
114199 +    DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
114200 +    ZSTD_clearAllDicts(cctx);  /* in case one already exists */
114201 +    if (dict == NULL || dictSize == 0)  /* no dictionary mode */
114202 +        return 0;
114203 +    if (dictLoadMethod == ZSTD_dlm_byRef) {
114204 +        cctx->localDict.dict = dict;
114205 +    } else {
114206 +        void* dictBuffer;
114207 +        RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
114208 +                        "no malloc for static CCtx");
114209 +        dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem);
114210 +        RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!");
114211 +        ZSTD_memcpy(dictBuffer, dict, dictSize);
114212 +        cctx->localDict.dictBuffer = dictBuffer;
114213 +        cctx->localDict.dict = dictBuffer;
114214 +    }
114215 +    cctx->localDict.dictSize = dictSize;
114216 +    cctx->localDict.dictContentType = dictContentType;
114217 +    return 0;
114220 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
114221 +      ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
114223 +    return ZSTD_CCtx_loadDictionary_advanced(
114224 +            cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
114227 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
114229 +    return ZSTD_CCtx_loadDictionary_advanced(
114230 +            cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
114234 +size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
114236 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
114237 +                    "Can't ref a dict when ctx not in init stage.");
114238 +    /* Free the existing local cdict (if any) to save memory. */
114239 +    ZSTD_clearAllDicts(cctx);
114240 +    cctx->cdict = cdict;
114241 +    return 0;
114244 +size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool)
114246 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
114247 +                    "Can't ref a pool when ctx not in init stage.");
114248 +    cctx->pool = pool;
114249 +    return 0;
114252 +size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
114254 +    return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
114257 +size_t ZSTD_CCtx_refPrefix_advanced(
114258 +        ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
114260 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
114261 +                    "Can't ref a prefix when ctx not in init stage.");
114262 +    ZSTD_clearAllDicts(cctx);
114263 +    if (prefix != NULL && prefixSize > 0) {
114264 +        cctx->prefixDict.dict = prefix;
114265 +        cctx->prefixDict.dictSize = prefixSize;
114266 +        cctx->prefixDict.dictContentType = dictContentType;
114267 +    }
114268 +    return 0;
114271 +/*! ZSTD_CCtx_reset() :
114272 + *  Also dumps dictionary */
114273 +size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
114275 +    if ( (reset == ZSTD_reset_session_only)
114276 +      || (reset == ZSTD_reset_session_and_parameters) ) {
114277 +        cctx->streamStage = zcss_init;
114278 +        cctx->pledgedSrcSizePlusOne = 0;
114279 +    }
114280 +    if ( (reset == ZSTD_reset_parameters)
114281 +      || (reset == ZSTD_reset_session_and_parameters) ) {
114282 +        RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
114283 +                        "Can't reset parameters only when not in init stage.");
114284 +        ZSTD_clearAllDicts(cctx);
114285 +        return ZSTD_CCtxParams_reset(&cctx->requestedParams);
114286 +    }
114287 +    return 0;
114291 +/** ZSTD_checkCParams() :
114292 +    control CParam values remain within authorized range.
114293 +    @return : 0, or an error code if one value is beyond authorized range */
114294 +size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
114296 +    BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);
114297 +    BOUNDCHECK(ZSTD_c_chainLog,  (int)cParams.chainLog);
114298 +    BOUNDCHECK(ZSTD_c_hashLog,   (int)cParams.hashLog);
114299 +    BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
114300 +    BOUNDCHECK(ZSTD_c_minMatch,  (int)cParams.minMatch);
114301 +    BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
114302 +    BOUNDCHECK(ZSTD_c_strategy,  cParams.strategy);
114303 +    return 0;
114306 +/** ZSTD_clampCParams() :
114307 + *  make CParam values within valid range.
114308 + *  @return : valid CParams */
114309 +static ZSTD_compressionParameters
114310 +ZSTD_clampCParams(ZSTD_compressionParameters cParams)
114312 +#   define CLAMP_TYPE(cParam, val, type) {                                \
114313 +        ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);         \
114314 +        if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound;      \
114315 +        else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
114316 +    }
114317 +#   define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
114318 +    CLAMP(ZSTD_c_windowLog, cParams.windowLog);
114319 +    CLAMP(ZSTD_c_chainLog,  cParams.chainLog);
114320 +    CLAMP(ZSTD_c_hashLog,   cParams.hashLog);
114321 +    CLAMP(ZSTD_c_searchLog, cParams.searchLog);
114322 +    CLAMP(ZSTD_c_minMatch,  cParams.minMatch);
114323 +    CLAMP(ZSTD_c_targetLength,cParams.targetLength);
114324 +    CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
114325 +    return cParams;
114328 +/** ZSTD_cycleLog() :
114329 + *  condition for correct operation : hashLog > 1 */
114330 +U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
114332 +    U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
114333 +    return hashLog - btScale;
114336 +/** ZSTD_dictAndWindowLog() :
114337 + * Returns an adjusted window log that is large enough to fit the source and the dictionary.
114338 + * The zstd format says that the entire dictionary is valid if one byte of the dictionary
114339 + * is within the window. So the hashLog and chainLog should be large enough to reference both
114340 + * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing
114341 + * the hashLog and windowLog.
114342 + * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN.
114343 + */
114344 +static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize)
114346 +    const U64 maxWindowSize = 1ULL << ZSTD_WINDOWLOG_MAX;
114347 +    /* No dictionary ==> No change */
114348 +    if (dictSize == 0) {
114349 +        return windowLog;
114350 +    }
114351 +    assert(windowLog <= ZSTD_WINDOWLOG_MAX);
114352 +    assert(srcSize != ZSTD_CONTENTSIZE_UNKNOWN); /* Handled in ZSTD_adjustCParams_internal() */
114353 +    {
114354 +        U64 const windowSize = 1ULL << windowLog;
114355 +        U64 const dictAndWindowSize = dictSize + windowSize;
114356 +        /* If the window size is already large enough to fit both the source and the dictionary
114357 +         * then just use the window size. Otherwise adjust so that it fits the dictionary and
114358 +         * the window.
114359 +         */
114360 +        if (windowSize >= dictSize + srcSize) {
114361 +            return windowLog; /* Window size large enough already */
114362 +        } else if (dictAndWindowSize >= maxWindowSize) {
114363 +            return ZSTD_WINDOWLOG_MAX; /* Larger than max window log */
114364 +        } else  {
114365 +            return ZSTD_highbit32((U32)dictAndWindowSize - 1) + 1;
114366 +        }
114367 +    }
114370 +/** ZSTD_adjustCParams_internal() :
114371 + *  optimize `cPar` for a specified input (`srcSize` and `dictSize`).
114372 + *  mostly downsize to reduce memory consumption and initialization latency.
114373 + * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
114374 + * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`.
114375 + *  note : `srcSize==0` means 0!
114376 + *  condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
114377 +static ZSTD_compressionParameters
114378 +ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
114379 +                            unsigned long long srcSize,
114380 +                            size_t dictSize,
114381 +                            ZSTD_cParamMode_e mode)
114383 +    const U64 minSrcSize = 513; /* (1<<9) + 1 */
114384 +    const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
114385 +    assert(ZSTD_checkCParams(cPar)==0);
114387 +    switch (mode) {
114388 +    case ZSTD_cpm_unknown:
114389 +    case ZSTD_cpm_noAttachDict:
114390 +        /* If we don't know the source size, don't make any
114391 +         * assumptions about it. We will already have selected
114392 +         * smaller parameters if a dictionary is in use.
114393 +         */
114394 +        break;
114395 +    case ZSTD_cpm_createCDict:
114396 +        /* Assume a small source size when creating a dictionary
114397 +         * with an unkown source size.
114398 +         */
114399 +        if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN)
114400 +            srcSize = minSrcSize;
114401 +        break;
114402 +    case ZSTD_cpm_attachDict:
114403 +        /* Dictionary has its own dedicated parameters which have
114404 +         * already been selected. We are selecting parameters
114405 +         * for only the source.
114406 +         */
114407 +        dictSize = 0;
114408 +        break;
114409 +    default:
114410 +        assert(0);
114411 +        break;
114412 +    }
114414 +    /* resize windowLog if input is small enough, to use less memory */
114415 +    if ( (srcSize < maxWindowResize)
114416 +      && (dictSize < maxWindowResize) )  {
114417 +        U32 const tSize = (U32)(srcSize + dictSize);
114418 +        static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
114419 +        U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
114420 +                            ZSTD_highbit32(tSize-1) + 1;
114421 +        if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
114422 +    }
114423 +    if (srcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
114424 +        U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize);
114425 +        U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
114426 +        if (cPar.hashLog > dictAndWindowLog+1) cPar.hashLog = dictAndWindowLog+1;
114427 +        if (cycleLog > dictAndWindowLog)
114428 +            cPar.chainLog -= (cycleLog - dictAndWindowLog);
114429 +    }
114431 +    if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
114432 +        cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* minimum wlog required for valid frame header */
114434 +    return cPar;
114437 +ZSTD_compressionParameters
114438 +ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
114439 +                   unsigned long long srcSize,
114440 +                   size_t dictSize)
114442 +    cPar = ZSTD_clampCParams(cPar);   /* resulting cPar is necessarily valid (all parameters within range) */
114443 +    if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;
114444 +    return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown);
114447 +static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
114448 +static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
114450 +static void ZSTD_overrideCParams(
114451 +              ZSTD_compressionParameters* cParams,
114452 +        const ZSTD_compressionParameters* overrides)
114454 +    if (overrides->windowLog)    cParams->windowLog    = overrides->windowLog;
114455 +    if (overrides->hashLog)      cParams->hashLog      = overrides->hashLog;
114456 +    if (overrides->chainLog)     cParams->chainLog     = overrides->chainLog;
114457 +    if (overrides->searchLog)    cParams->searchLog    = overrides->searchLog;
114458 +    if (overrides->minMatch)     cParams->minMatch     = overrides->minMatch;
114459 +    if (overrides->targetLength) cParams->targetLength = overrides->targetLength;
114460 +    if (overrides->strategy)     cParams->strategy     = overrides->strategy;
114463 +ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
114464 +        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
114466 +    ZSTD_compressionParameters cParams;
114467 +    if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
114468 +      srcSizeHint = CCtxParams->srcSizeHint;
114469 +    }
114470 +    cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode);
114471 +    if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
114472 +    ZSTD_overrideCParams(&cParams, &CCtxParams->cParams);
114473 +    assert(!ZSTD_checkCParams(cParams));
114474 +    /* srcSizeHint == 0 means 0 */
114475 +    return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode);
114478 +static size_t
114479 +ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
114480 +                       const U32 forCCtx)
114482 +    size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
114483 +    size_t const hSize = ((size_t)1) << cParams->hashLog;
114484 +    U32    const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
114485 +    size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
114486 +    /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't
114487 +     * surrounded by redzones in ASAN. */
114488 +    size_t const tableSpace = chainSize * sizeof(U32)
114489 +                            + hSize * sizeof(U32)
114490 +                            + h3Size * sizeof(U32);
114491 +    size_t const optPotentialSpace =
114492 +        ZSTD_cwksp_alloc_size((MaxML+1) * sizeof(U32))
114493 +      + ZSTD_cwksp_alloc_size((MaxLL+1) * sizeof(U32))
114494 +      + ZSTD_cwksp_alloc_size((MaxOff+1) * sizeof(U32))
114495 +      + ZSTD_cwksp_alloc_size((1<<Litbits) * sizeof(U32))
114496 +      + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
114497 +      + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
114498 +    size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
114499 +                                ? optPotentialSpace
114500 +                                : 0;
114501 +    DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
114502 +                (U32)chainSize, (U32)hSize, (U32)h3Size);
114503 +    return tableSpace + optSpace;
114506 +static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
114507 +        const ZSTD_compressionParameters* cParams,
114508 +        const ldmParams_t* ldmParams,
114509 +        const int isStatic,
114510 +        const size_t buffInSize,
114511 +        const size_t buffOutSize,
114512 +        const U64 pledgedSrcSize)
114514 +    size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << cParams->windowLog), pledgedSrcSize));
114515 +    size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
114516 +    U32    const divider = (cParams->minMatch==3) ? 3 : 4;
114517 +    size_t const maxNbSeq = blockSize / divider;
114518 +    size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
114519 +                            + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef))
114520 +                            + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
114521 +    size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
114522 +    size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
114523 +    size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, /* forCCtx */ 1);
114525 +    size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
114526 +    size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
114527 +    size_t const ldmSeqSpace = ldmParams->enableLdm ?
114528 +        ZSTD_cwksp_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
114531 +    size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
114532 +                             + ZSTD_cwksp_alloc_size(buffOutSize);
114534 +    size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
114536 +    size_t const neededSpace =
114537 +        cctxSpace +
114538 +        entropySpace +
114539 +        blockStateSpace +
114540 +        ldmSpace +
114541 +        ldmSeqSpace +
114542 +        matchStateSize +
114543 +        tokenSpace +
114544 +        bufferSpace;
114546 +    DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
114547 +    return neededSpace;
114550 +size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
114552 +    ZSTD_compressionParameters const cParams =
114553 +                ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
114555 +    RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
114556 +    /* estimateCCtxSize is for one-shot compression. So no buffers should
114557 +     * be needed. However, we still allocate two 0-sized buffers, which can
114558 +     * take space under ASAN. */
114559 +    return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
114560 +        &cParams, &params->ldmParams, 1, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN);
114563 +size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
114565 +    ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
114566 +    return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
114569 +static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
114571 +    int tier = 0;
114572 +    size_t largestSize = 0;
114573 +    static const unsigned long long srcSizeTiers[4] = {16 KB, 128 KB, 256 KB, ZSTD_CONTENTSIZE_UNKNOWN};
114574 +    for (; tier < 4; ++tier) {
114575 +        /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */
114576 +        ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_cpm_noAttachDict);
114577 +        largestSize = MAX(ZSTD_estimateCCtxSize_usingCParams(cParams), largestSize);
114578 +    }
114579 +    return largestSize;
114582 +size_t ZSTD_estimateCCtxSize(int compressionLevel)
114584 +    int level;
114585 +    size_t memBudget = 0;
114586 +    for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
114587 +        /* Ensure monotonically increasing memory usage as compression level increases */
114588 +        size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
114589 +        if (newMB > memBudget) memBudget = newMB;
114590 +    }
114591 +    return memBudget;
114594 +size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
114596 +    RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
114597 +    {   ZSTD_compressionParameters const cParams =
114598 +                ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
114599 +        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
114600 +        size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered)
114601 +                ? ((size_t)1 << cParams.windowLog) + blockSize
114602 +                : 0;
114603 +        size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered)
114604 +                ? ZSTD_compressBound(blockSize) + 1
114605 +                : 0;
114607 +        return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
114608 +            &cParams, &params->ldmParams, 1, inBuffSize, outBuffSize,
114609 +            ZSTD_CONTENTSIZE_UNKNOWN);
114610 +    }
114613 +size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
114615 +    ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
114616 +    return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
114619 +static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
114621 +    ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
114622 +    return ZSTD_estimateCStreamSize_usingCParams(cParams);
114625 +size_t ZSTD_estimateCStreamSize(int compressionLevel)
114627 +    int level;
114628 +    size_t memBudget = 0;
114629 +    for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
114630 +        size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
114631 +        if (newMB > memBudget) memBudget = newMB;
114632 +    }
114633 +    return memBudget;
114636 +/* ZSTD_getFrameProgression():
114637 + * tells how much data has been consumed (input) and produced (output) for current frame.
114638 + * able to count progression inside worker threads (non-blocking mode).
114639 + */
114640 +ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
114642 +    {   ZSTD_frameProgression fp;
114643 +        size_t const buffered = (cctx->inBuff == NULL) ? 0 :
114644 +                                cctx->inBuffPos - cctx->inToCompress;
114645 +        if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
114646 +        assert(buffered <= ZSTD_BLOCKSIZE_MAX);
114647 +        fp.ingested = cctx->consumedSrcSize + buffered;
114648 +        fp.consumed = cctx->consumedSrcSize;
114649 +        fp.produced = cctx->producedCSize;
114650 +        fp.flushed  = cctx->producedCSize;   /* simplified; some data might still be left within streaming output buffer */
114651 +        fp.currentJobID = 0;
114652 +        fp.nbActiveWorkers = 0;
114653 +        return fp;
114654 +}   }
114656 +/*! ZSTD_toFlushNow()
114657 + *  Only useful for multithreading scenarios currently (nbWorkers >= 1).
114658 + */
114659 +size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
114661 +    (void)cctx;
114662 +    return 0;   /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
114665 +static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
114666 +                                    ZSTD_compressionParameters cParams2)
114668 +    (void)cParams1;
114669 +    (void)cParams2;
114670 +    assert(cParams1.windowLog    == cParams2.windowLog);
114671 +    assert(cParams1.chainLog     == cParams2.chainLog);
114672 +    assert(cParams1.hashLog      == cParams2.hashLog);
114673 +    assert(cParams1.searchLog    == cParams2.searchLog);
114674 +    assert(cParams1.minMatch     == cParams2.minMatch);
114675 +    assert(cParams1.targetLength == cParams2.targetLength);
114676 +    assert(cParams1.strategy     == cParams2.strategy);
114679 +void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
114681 +    int i;
114682 +    for (i = 0; i < ZSTD_REP_NUM; ++i)
114683 +        bs->rep[i] = repStartValue[i];
114684 +    bs->entropy.huf.repeatMode = HUF_repeat_none;
114685 +    bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
114686 +    bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
114687 +    bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
114690 +/*! ZSTD_invalidateMatchState()
114691 + *  Invalidate all the matches in the match finder tables.
114692 + *  Requires nextSrc and base to be set (can be NULL).
114693 + */
114694 +static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
114696 +    ZSTD_window_clear(&ms->window);
114698 +    ms->nextToUpdate = ms->window.dictLimit;
114699 +    ms->loadedDictEnd = 0;
114700 +    ms->opt.litLengthSum = 0;  /* force reset of btopt stats */
114701 +    ms->dictMatchState = NULL;
114705 + * Controls, for this matchState reset, whether the tables need to be cleared /
114706 + * prepared for the coming compression (ZSTDcrp_makeClean), or whether the
114707 + * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a
114708 + * subsequent operation will overwrite the table space anyways (e.g., copying
114709 + * the matchState contents in from a CDict).
114710 + */
114711 +typedef enum {
114712 +    ZSTDcrp_makeClean,
114713 +    ZSTDcrp_leaveDirty
114714 +} ZSTD_compResetPolicy_e;
114717 + * Controls, for this matchState reset, whether indexing can continue where it
114718 + * left off (ZSTDirp_continue), or whether it needs to be restarted from zero
114719 + * (ZSTDirp_reset).
114720 + */
114721 +typedef enum {
114722 +    ZSTDirp_continue,
114723 +    ZSTDirp_reset
114724 +} ZSTD_indexResetPolicy_e;
114726 +typedef enum {
114727 +    ZSTD_resetTarget_CDict,
114728 +    ZSTD_resetTarget_CCtx
114729 +} ZSTD_resetTarget_e;
114731 +static size_t
114732 +ZSTD_reset_matchState(ZSTD_matchState_t* ms,
114733 +                      ZSTD_cwksp* ws,
114734 +                const ZSTD_compressionParameters* cParams,
114735 +                const ZSTD_compResetPolicy_e crp,
114736 +                const ZSTD_indexResetPolicy_e forceResetIndex,
114737 +                const ZSTD_resetTarget_e forWho)
114739 +    size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
114740 +    size_t const hSize = ((size_t)1) << cParams->hashLog;
114741 +    U32    const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
114742 +    size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
114744 +    DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
114745 +    if (forceResetIndex == ZSTDirp_reset) {
114746 +        ZSTD_window_init(&ms->window);
114747 +        ZSTD_cwksp_mark_tables_dirty(ws);
114748 +    }
114750 +    ms->hashLog3 = hashLog3;
114752 +    ZSTD_invalidateMatchState(ms);
114754 +    assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */
114756 +    ZSTD_cwksp_clear_tables(ws);
114758 +    DEBUGLOG(5, "reserving table space");
114759 +    /* table Space */
114760 +    ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32));
114761 +    ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32));
114762 +    ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32));
114763 +    RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
114764 +                    "failed a workspace allocation in ZSTD_reset_matchState");
114766 +    DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty);
114767 +    if (crp!=ZSTDcrp_leaveDirty) {
114768 +        /* reset tables only */
114769 +        ZSTD_cwksp_clean_tables(ws);
114770 +    }
114772 +    /* opt parser space */
114773 +    if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
114774 +        DEBUGLOG(4, "reserving optimal parser space");
114775 +        ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
114776 +        ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
114777 +        ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
114778 +        ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
114779 +        ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
114780 +        ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
114781 +    }
114783 +    ms->cParams = *cParams;
114785 +    RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
114786 +                    "failed a workspace allocation in ZSTD_reset_matchState");
114788 +    return 0;
114791 +/* ZSTD_indexTooCloseToMax() :
114792 + * minor optimization : prefer memset() rather than reduceIndex()
114793 + * which is measurably slow in some circumstances (reported for Visual Studio).
114794 + * Works when re-using a context for a lot of smallish inputs :
114795 + * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN,
114796 + * memset() will be triggered before reduceIndex().
114797 + */
114798 +#define ZSTD_INDEXOVERFLOW_MARGIN (16 MB)
114799 +static int ZSTD_indexTooCloseToMax(ZSTD_window_t w)
114801 +    return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
114804 +/*! ZSTD_resetCCtx_internal() :
114805 +    note : `params` are assumed fully validated at this stage */
114806 +static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
114807 +                                      ZSTD_CCtx_params params,
114808 +                                      U64 const pledgedSrcSize,
114809 +                                      ZSTD_compResetPolicy_e const crp,
114810 +                                      ZSTD_buffered_policy_e const zbuff)
114812 +    ZSTD_cwksp* const ws = &zc->workspace;
114813 +    DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
114814 +                (U32)pledgedSrcSize, params.cParams.windowLog);
114815 +    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
114817 +    zc->isFirstBlock = 1;
114819 +    if (params.ldmParams.enableLdm) {
114820 +        /* Adjust long distance matching parameters */
114821 +        ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
114822 +        assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
114823 +        assert(params.ldmParams.hashRateLog < 32);
114824 +    }
114826 +    {   size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
114827 +        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
114828 +        U32    const divider = (params.cParams.minMatch==3) ? 3 : 4;
114829 +        size_t const maxNbSeq = blockSize / divider;
114830 +        size_t const buffOutSize = (zbuff == ZSTDb_buffered && params.outBufferMode == ZSTD_bm_buffered)
114831 +                ? ZSTD_compressBound(blockSize) + 1
114832 +                : 0;
114833 +        size_t const buffInSize = (zbuff == ZSTDb_buffered && params.inBufferMode == ZSTD_bm_buffered)
114834 +                ? windowSize + blockSize
114835 +                : 0;
114836 +        size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
114838 +        int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window);
114839 +        ZSTD_indexResetPolicy_e needsIndexReset =
114840 +            (!indexTooClose && zc->initialized) ? ZSTDirp_continue : ZSTDirp_reset;
114842 +        size_t const neededSpace =
114843 +            ZSTD_estimateCCtxSize_usingCCtxParams_internal(
114844 +                &params.cParams, &params.ldmParams, zc->staticSize != 0,
114845 +                buffInSize, buffOutSize, pledgedSrcSize);
114846 +        FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
114848 +        if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0);
114850 +        /* Check if workspace is large enough, alloc a new one if needed */
114851 +        {
114852 +            int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
114853 +            int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
114855 +            DEBUGLOG(4, "Need %zu B workspace", neededSpace);
114856 +            DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
114858 +            if (workspaceTooSmall || workspaceWasteful) {
114859 +                DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
114860 +                            ZSTD_cwksp_sizeof(ws) >> 10,
114861 +                            neededSpace >> 10);
114863 +                RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
114865 +                needsIndexReset = ZSTDirp_reset;
114867 +                ZSTD_cwksp_free(ws, zc->customMem);
114868 +                FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), "");
114870 +                DEBUGLOG(5, "reserving object space");
114871 +                /* Statically sized space.
114872 +                 * entropyWorkspace never moves,
114873 +                 * though prev/next block swap places */
114874 +                assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
114875 +                zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
114876 +                RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
114877 +                zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
114878 +                RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
114879 +                zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
114880 +                RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
114881 +        }   }
114883 +        ZSTD_cwksp_clear(ws);
114885 +        /* init params */
114886 +        zc->appliedParams = params;
114887 +        zc->blockState.matchState.cParams = params.cParams;
114888 +        zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
114889 +        zc->consumedSrcSize = 0;
114890 +        zc->producedCSize = 0;
114891 +        if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
114892 +            zc->appliedParams.fParams.contentSizeFlag = 0;
114893 +        DEBUGLOG(4, "pledged content size : %u ; flag : %u",
114894 +            (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
114895 +        zc->blockSize = blockSize;
114897 +        xxh64_reset(&zc->xxhState, 0);
114898 +        zc->stage = ZSTDcs_init;
114899 +        zc->dictID = 0;
114900 +        zc->dictContentSize = 0;
114902 +        ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
114904 +        /* ZSTD_wildcopy() is used to copy into the literals buffer,
114905 +         * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
114906 +         */
114907 +        zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
114908 +        zc->seqStore.maxNbLit = blockSize;
114910 +        /* buffers */
114911 +        zc->bufferedPolicy = zbuff;
114912 +        zc->inBuffSize = buffInSize;
114913 +        zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
114914 +        zc->outBuffSize = buffOutSize;
114915 +        zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
114917 +        /* ldm bucketOffsets table */
114918 +        if (params.ldmParams.enableLdm) {
114919 +            /* TODO: avoid memset? */
114920 +            size_t const numBuckets =
114921 +                  ((size_t)1) << (params.ldmParams.hashLog -
114922 +                                  params.ldmParams.bucketSizeLog);
114923 +            zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets);
114924 +            ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets);
114925 +        }
114927 +        /* sequences storage */
114928 +        ZSTD_referenceExternalSequences(zc, NULL, 0);
114929 +        zc->seqStore.maxNbSeq = maxNbSeq;
114930 +        zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
114931 +        zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
114932 +        zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
114933 +        zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
114935 +        FORWARD_IF_ERROR(ZSTD_reset_matchState(
114936 +            &zc->blockState.matchState,
114937 +            ws,
114938 +            &params.cParams,
114939 +            crp,
114940 +            needsIndexReset,
114941 +            ZSTD_resetTarget_CCtx), "");
114943 +        /* ldm hash table */
114944 +        if (params.ldmParams.enableLdm) {
114945 +            /* TODO: avoid memset? */
114946 +            size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
114947 +            zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
114948 +            ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
114949 +            zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
114950 +            zc->maxNbLdmSequences = maxNbLdmSeq;
114952 +            ZSTD_window_init(&zc->ldmState.window);
114953 +            ZSTD_window_clear(&zc->ldmState.window);
114954 +            zc->ldmState.loadedDictEnd = 0;
114955 +        }
114957 +        /* Due to alignment, when reusing a workspace, we can actually consume
114958 +         * up to 3 extra bytes for alignment. See the comments in zstd_cwksp.h
114959 +         */
114960 +        assert(ZSTD_cwksp_used(ws) >= neededSpace &&
114961 +               ZSTD_cwksp_used(ws) <= neededSpace + 3);
114963 +        DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
114964 +        zc->initialized = 1;
114966 +        return 0;
114967 +    }
114970 +/* ZSTD_invalidateRepCodes() :
114971 + * ensures next compression will not use repcodes from previous block.
114972 + * Note : only works with regular variant;
114973 + *        do not use with extDict variant ! */
114974 +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
114975 +    int i;
114976 +    for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
114977 +    assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
114980 +/* These are the approximate sizes for each strategy past which copying the
114981 + * dictionary tables into the working context is faster than using them
114982 + * in-place.
114983 + */
114984 +static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
114985 +    8 KB,  /* unused */
114986 +    8 KB,  /* ZSTD_fast */
114987 +    16 KB, /* ZSTD_dfast */
114988 +    32 KB, /* ZSTD_greedy */
114989 +    32 KB, /* ZSTD_lazy */
114990 +    32 KB, /* ZSTD_lazy2 */
114991 +    32 KB, /* ZSTD_btlazy2 */
114992 +    32 KB, /* ZSTD_btopt */
114993 +    8 KB,  /* ZSTD_btultra */
114994 +    8 KB   /* ZSTD_btultra2 */
114997 +static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
114998 +                                 const ZSTD_CCtx_params* params,
114999 +                                 U64 pledgedSrcSize)
115001 +    size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
115002 +    int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch;
115003 +    return dedicatedDictSearch
115004 +        || ( ( pledgedSrcSize <= cutoff
115005 +            || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
115006 +            || params->attachDictPref == ZSTD_dictForceAttach )
115007 +          && params->attachDictPref != ZSTD_dictForceCopy
115008 +          && !params->forceWindow ); /* dictMatchState isn't correctly
115009 +                                      * handled in _enforceMaxDist */
115012 +static size_t
115013 +ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
115014 +                        const ZSTD_CDict* cdict,
115015 +                        ZSTD_CCtx_params params,
115016 +                        U64 pledgedSrcSize,
115017 +                        ZSTD_buffered_policy_e zbuff)
115019 +    {
115020 +        ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams;
115021 +        unsigned const windowLog = params.cParams.windowLog;
115022 +        assert(windowLog != 0);
115023 +        /* Resize working context table params for input only, since the dict
115024 +         * has its own tables. */
115025 +        /* pledgedSrcSize == 0 means 0! */
115027 +        if (cdict->matchState.dedicatedDictSearch) {
115028 +            ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams);
115029 +        }
115031 +        params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize,
115032 +                                                     cdict->dictContentSize, ZSTD_cpm_attachDict);
115033 +        params.cParams.windowLog = windowLog;
115034 +        FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
115035 +                                                 ZSTDcrp_makeClean, zbuff), "");
115036 +        assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy);
115037 +    }
115039 +    {   const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
115040 +                                  - cdict->matchState.window.base);
115041 +        const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
115042 +        if (cdictLen == 0) {
115043 +            /* don't even attach dictionaries with no contents */
115044 +            DEBUGLOG(4, "skipping attaching empty dictionary");
115045 +        } else {
115046 +            DEBUGLOG(4, "attaching dictionary into context");
115047 +            cctx->blockState.matchState.dictMatchState = &cdict->matchState;
115049 +            /* prep working match state so dict matches never have negative indices
115050 +             * when they are translated to the working context's index space. */
115051 +            if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
115052 +                cctx->blockState.matchState.window.nextSrc =
115053 +                    cctx->blockState.matchState.window.base + cdictEnd;
115054 +                ZSTD_window_clear(&cctx->blockState.matchState.window);
115055 +            }
115056 +            /* loadedDictEnd is expressed within the referential of the active context */
115057 +            cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
115058 +    }   }
115060 +    cctx->dictID = cdict->dictID;
115061 +    cctx->dictContentSize = cdict->dictContentSize;
115063 +    /* copy block state */
115064 +    ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
115066 +    return 0;
115069 +static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
115070 +                            const ZSTD_CDict* cdict,
115071 +                            ZSTD_CCtx_params params,
115072 +                            U64 pledgedSrcSize,
115073 +                            ZSTD_buffered_policy_e zbuff)
115075 +    const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
115077 +    assert(!cdict->matchState.dedicatedDictSearch);
115079 +    DEBUGLOG(4, "copying dictionary into context");
115081 +    {   unsigned const windowLog = params.cParams.windowLog;
115082 +        assert(windowLog != 0);
115083 +        /* Copy only compression parameters related to tables. */
115084 +        params.cParams = *cdict_cParams;
115085 +        params.cParams.windowLog = windowLog;
115086 +        FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
115087 +                                                 ZSTDcrp_leaveDirty, zbuff), "");
115088 +        assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
115089 +        assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
115090 +        assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
115091 +    }
115093 +    ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
115095 +    /* copy tables */
115096 +    {   size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
115097 +        size_t const hSize =  (size_t)1 << cdict_cParams->hashLog;
115099 +        ZSTD_memcpy(cctx->blockState.matchState.hashTable,
115100 +               cdict->matchState.hashTable,
115101 +               hSize * sizeof(U32));
115102 +        ZSTD_memcpy(cctx->blockState.matchState.chainTable,
115103 +               cdict->matchState.chainTable,
115104 +               chainSize * sizeof(U32));
115105 +    }
115107 +    /* Zero the hashTable3, since the cdict never fills it */
115108 +    {   int const h3log = cctx->blockState.matchState.hashLog3;
115109 +        size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
115110 +        assert(cdict->matchState.hashLog3 == 0);
115111 +        ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
115112 +    }
115114 +    ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
115116 +    /* copy dictionary offsets */
115117 +    {   ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
115118 +        ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
115119 +        dstMatchState->window       = srcMatchState->window;
115120 +        dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
115121 +        dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
115122 +    }
115124 +    cctx->dictID = cdict->dictID;
115125 +    cctx->dictContentSize = cdict->dictContentSize;
115127 +    /* copy block state */
115128 +    ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
115130 +    return 0;
115133 +/* We have a choice between copying the dictionary context into the working
115134 + * context, or referencing the dictionary context from the working context
115135 + * in-place. We decide here which strategy to use. */
115136 +static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
115137 +                            const ZSTD_CDict* cdict,
115138 +                            const ZSTD_CCtx_params* params,
115139 +                            U64 pledgedSrcSize,
115140 +                            ZSTD_buffered_policy_e zbuff)
115143 +    DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
115144 +                (unsigned)pledgedSrcSize);
115146 +    if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
115147 +        return ZSTD_resetCCtx_byAttachingCDict(
115148 +            cctx, cdict, *params, pledgedSrcSize, zbuff);
115149 +    } else {
115150 +        return ZSTD_resetCCtx_byCopyingCDict(
115151 +            cctx, cdict, *params, pledgedSrcSize, zbuff);
115152 +    }
115155 +/*! ZSTD_copyCCtx_internal() :
115156 + *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
115157 + *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
115158 + *  The "context", in this case, refers to the hash and chain tables,
115159 + *  entropy tables, and dictionary references.
115160 + * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
115161 + * @return : 0, or an error code */
115162 +static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
115163 +                            const ZSTD_CCtx* srcCCtx,
115164 +                            ZSTD_frameParameters fParams,
115165 +                            U64 pledgedSrcSize,
115166 +                            ZSTD_buffered_policy_e zbuff)
115168 +    DEBUGLOG(5, "ZSTD_copyCCtx_internal");
115169 +    RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong,
115170 +                    "Can't copy a ctx that's not in init stage.");
115172 +    ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
115173 +    {   ZSTD_CCtx_params params = dstCCtx->requestedParams;
115174 +        /* Copy only compression parameters related to tables. */
115175 +        params.cParams = srcCCtx->appliedParams.cParams;
115176 +        params.fParams = fParams;
115177 +        ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
115178 +                                ZSTDcrp_leaveDirty, zbuff);
115179 +        assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
115180 +        assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
115181 +        assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
115182 +        assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
115183 +        assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
115184 +    }
115186 +    ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
115188 +    /* copy tables */
115189 +    {   size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
115190 +        size_t const hSize =  (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
115191 +        int const h3log = srcCCtx->blockState.matchState.hashLog3;
115192 +        size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
115194 +        ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable,
115195 +               srcCCtx->blockState.matchState.hashTable,
115196 +               hSize * sizeof(U32));
115197 +        ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable,
115198 +               srcCCtx->blockState.matchState.chainTable,
115199 +               chainSize * sizeof(U32));
115200 +        ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3,
115201 +               srcCCtx->blockState.matchState.hashTable3,
115202 +               h3Size * sizeof(U32));
115203 +    }
115205 +    ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
115207 +    /* copy dictionary offsets */
115208 +    {
115209 +        const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
115210 +        ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
115211 +        dstMatchState->window       = srcMatchState->window;
115212 +        dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
115213 +        dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
115214 +    }
115215 +    dstCCtx->dictID = srcCCtx->dictID;
115216 +    dstCCtx->dictContentSize = srcCCtx->dictContentSize;
115218 +    /* copy block state */
115219 +    ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
115221 +    return 0;
115224 +/*! ZSTD_copyCCtx() :
115225 + *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
115226 + *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
115227 + *  pledgedSrcSize==0 means "unknown".
115228 +*   @return : 0, or an error code */
115229 +size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
115231 +    ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
115232 +    ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy;
115233 +    ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
115234 +    if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
115235 +    fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
115237 +    return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
115238 +                                fParams, pledgedSrcSize,
115239 +                                zbuff);
115243 +#define ZSTD_ROWSIZE 16
115244 +/*! ZSTD_reduceTable() :
115245 + *  reduce table indexes by `reducerValue`, or squash to zero.
115246 + *  PreserveMark preserves "unsorted mark" for btlazy2 strategy.
115247 + *  It must be set to a clear 0/1 value, to remove branch during inlining.
115248 + *  Presume table size is a multiple of ZSTD_ROWSIZE
115249 + *  to help auto-vectorization */
115250 +FORCE_INLINE_TEMPLATE void
115251 +ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
115253 +    int const nbRows = (int)size / ZSTD_ROWSIZE;
115254 +    int cellNb = 0;
115255 +    int rowNb;
115256 +    assert((size & (ZSTD_ROWSIZE-1)) == 0);  /* multiple of ZSTD_ROWSIZE */
115257 +    assert(size < (1U<<31));   /* can be casted to int */
115260 +    for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
115261 +        int column;
115262 +        for (column=0; column<ZSTD_ROWSIZE; column++) {
115263 +            if (preserveMark) {
115264 +                U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0;
115265 +                table[cellNb] += adder;
115266 +            }
115267 +            if (table[cellNb] < reducerValue) table[cellNb] = 0;
115268 +            else table[cellNb] -= reducerValue;
115269 +            cellNb++;
115270 +    }   }
115273 +static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
115275 +    ZSTD_reduceTable_internal(table, size, reducerValue, 0);
115278 +static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
115280 +    ZSTD_reduceTable_internal(table, size, reducerValue, 1);
115283 +/*! ZSTD_reduceIndex() :
115284 +*   rescale all indexes to avoid future overflow (indexes are U32) */
115285 +static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
115287 +    {   U32 const hSize = (U32)1 << params->cParams.hashLog;
115288 +        ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
115289 +    }
115291 +    if (params->cParams.strategy != ZSTD_fast) {
115292 +        U32 const chainSize = (U32)1 << params->cParams.chainLog;
115293 +        if (params->cParams.strategy == ZSTD_btlazy2)
115294 +            ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
115295 +        else
115296 +            ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
115297 +    }
115299 +    if (ms->hashLog3) {
115300 +        U32 const h3Size = (U32)1 << ms->hashLog3;
115301 +        ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
115302 +    }
115306 +/*-*******************************************************
115307 +*  Block entropic compression
115308 +*********************************************************/
115310 +/* See doc/zstd_compression_format.md for detailed format description */
115312 +void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
115314 +    const seqDef* const sequences = seqStorePtr->sequencesStart;
115315 +    BYTE* const llCodeTable = seqStorePtr->llCode;
115316 +    BYTE* const ofCodeTable = seqStorePtr->ofCode;
115317 +    BYTE* const mlCodeTable = seqStorePtr->mlCode;
115318 +    U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
115319 +    U32 u;
115320 +    assert(nbSeq <= seqStorePtr->maxNbSeq);
115321 +    for (u=0; u<nbSeq; u++) {
115322 +        U32 const llv = sequences[u].litLength;
115323 +        U32 const mlv = sequences[u].matchLength;
115324 +        llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
115325 +        ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
115326 +        mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
115327 +    }
115328 +    if (seqStorePtr->longLengthID==1)
115329 +        llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
115330 +    if (seqStorePtr->longLengthID==2)
115331 +        mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
115334 +/* ZSTD_useTargetCBlockSize():
115335 + * Returns if target compressed block size param is being used.
115336 + * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize.
115337 + * Returns 1 if true, 0 otherwise. */
115338 +static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)
115340 +    DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize);
115341 +    return (cctxParams->targetCBlockSize != 0);
115344 +/* ZSTD_entropyCompressSequences_internal():
115345 + * actually compresses both literals and sequences */
115346 +MEM_STATIC size_t
115347 +ZSTD_entropyCompressSequences_internal(seqStore_t* seqStorePtr,
115348 +                          const ZSTD_entropyCTables_t* prevEntropy,
115349 +                                ZSTD_entropyCTables_t* nextEntropy,
115350 +                          const ZSTD_CCtx_params* cctxParams,
115351 +                                void* dst, size_t dstCapacity,
115352 +                                void* entropyWorkspace, size_t entropyWkspSize,
115353 +                          const int bmi2)
115355 +    const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
115356 +    ZSTD_strategy const strategy = cctxParams->cParams.strategy;
115357 +    unsigned* count = (unsigned*)entropyWorkspace;
115358 +    FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
115359 +    FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
115360 +    FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
115361 +    U32 LLtype, Offtype, MLtype;   /* compressed, raw or rle */
115362 +    const seqDef* const sequences = seqStorePtr->sequencesStart;
115363 +    const BYTE* const ofCodeTable = seqStorePtr->ofCode;
115364 +    const BYTE* const llCodeTable = seqStorePtr->llCode;
115365 +    const BYTE* const mlCodeTable = seqStorePtr->mlCode;
115366 +    BYTE* const ostart = (BYTE*)dst;
115367 +    BYTE* const oend = ostart + dstCapacity;
115368 +    BYTE* op = ostart;
115369 +    size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
115370 +    BYTE* seqHead;
115371 +    BYTE* lastNCount = NULL;
115373 +    entropyWorkspace = count + (MaxSeq + 1);
115374 +    entropyWkspSize -= (MaxSeq + 1) * sizeof(*count);
115376 +    DEBUGLOG(4, "ZSTD_entropyCompressSequences_internal (nbSeq=%zu)", nbSeq);
115377 +    ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
115378 +    assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);
115380 +    /* Compress literals */
115381 +    {   const BYTE* const literals = seqStorePtr->litStart;
115382 +        size_t const litSize = (size_t)(seqStorePtr->lit - literals);
115383 +        size_t const cSize = ZSTD_compressLiterals(
115384 +                                    &prevEntropy->huf, &nextEntropy->huf,
115385 +                                    cctxParams->cParams.strategy,
115386 +                                    ZSTD_disableLiteralsCompression(cctxParams),
115387 +                                    op, dstCapacity,
115388 +                                    literals, litSize,
115389 +                                    entropyWorkspace, entropyWkspSize,
115390 +                                    bmi2);
115391 +        FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");
115392 +        assert(cSize <= dstCapacity);
115393 +        op += cSize;
115394 +    }
115396 +    /* Sequences Header */
115397 +    RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
115398 +                    dstSize_tooSmall, "Can't fit seq hdr in output buf!");
115399 +    if (nbSeq < 128) {
115400 +        *op++ = (BYTE)nbSeq;
115401 +    } else if (nbSeq < LONGNBSEQ) {
115402 +        op[0] = (BYTE)((nbSeq>>8) + 0x80);
115403 +        op[1] = (BYTE)nbSeq;
115404 +        op+=2;
115405 +    } else {
115406 +        op[0]=0xFF;
115407 +        MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ));
115408 +        op+=3;
115409 +    }
115410 +    assert(op <= oend);
115411 +    if (nbSeq==0) {
115412 +        /* Copy the old tables over as if we repeated them */
115413 +        ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
115414 +        return (size_t)(op - ostart);
115415 +    }
115417 +    /* seqHead : flags for FSE encoding type */
115418 +    seqHead = op++;
115419 +    assert(op <= oend);
115421 +    /* convert length/distances into codes */
115422 +    ZSTD_seqToCodes(seqStorePtr);
115423 +    /* build CTable for Literal Lengths */
115424 +    {   unsigned max = MaxLL;
115425 +        size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);   /* can't fail */
115426 +        DEBUGLOG(5, "Building LL table");
115427 +        nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
115428 +        LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,
115429 +                                        count, max, mostFrequent, nbSeq,
115430 +                                        LLFSELog, prevEntropy->fse.litlengthCTable,
115431 +                                        LL_defaultNorm, LL_defaultNormLog,
115432 +                                        ZSTD_defaultAllowed, strategy);
115433 +        assert(set_basic < set_compressed && set_rle < set_compressed);
115434 +        assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
115435 +        {   size_t const countSize = ZSTD_buildCTable(
115436 +                op, (size_t)(oend - op),
115437 +                CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
115438 +                count, max, llCodeTable, nbSeq,
115439 +                LL_defaultNorm, LL_defaultNormLog, MaxLL,
115440 +                prevEntropy->fse.litlengthCTable,
115441 +                sizeof(prevEntropy->fse.litlengthCTable),
115442 +                entropyWorkspace, entropyWkspSize);
115443 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
115444 +            if (LLtype == set_compressed)
115445 +                lastNCount = op;
115446 +            op += countSize;
115447 +            assert(op <= oend);
115448 +    }   }
115449 +    /* build CTable for Offsets */
115450 +    {   unsigned max = MaxOff;
115451 +        size_t const mostFrequent = HIST_countFast_wksp(
115452 +            count, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);  /* can't fail */
115453 +        /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
115454 +        ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
115455 +        DEBUGLOG(5, "Building OF table");
115456 +        nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
115457 +        Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode,
115458 +                                        count, max, mostFrequent, nbSeq,
115459 +                                        OffFSELog, prevEntropy->fse.offcodeCTable,
115460 +                                        OF_defaultNorm, OF_defaultNormLog,
115461 +                                        defaultPolicy, strategy);
115462 +        assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
115463 +        {   size_t const countSize = ZSTD_buildCTable(
115464 +                op, (size_t)(oend - op),
115465 +                CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
115466 +                count, max, ofCodeTable, nbSeq,
115467 +                OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
115468 +                prevEntropy->fse.offcodeCTable,
115469 +                sizeof(prevEntropy->fse.offcodeCTable),
115470 +                entropyWorkspace, entropyWkspSize);
115471 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
115472 +            if (Offtype == set_compressed)
115473 +                lastNCount = op;
115474 +            op += countSize;
115475 +            assert(op <= oend);
115476 +    }   }
115477 +    /* build CTable for MatchLengths */
115478 +    {   unsigned max = MaxML;
115479 +        size_t const mostFrequent = HIST_countFast_wksp(
115480 +            count, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);   /* can't fail */
115481 +        DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
115482 +        nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
115483 +        MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,
115484 +                                        count, max, mostFrequent, nbSeq,
115485 +                                        MLFSELog, prevEntropy->fse.matchlengthCTable,
115486 +                                        ML_defaultNorm, ML_defaultNormLog,
115487 +                                        ZSTD_defaultAllowed, strategy);
115488 +        assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
115489 +        {   size_t const countSize = ZSTD_buildCTable(
115490 +                op, (size_t)(oend - op),
115491 +                CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
115492 +                count, max, mlCodeTable, nbSeq,
115493 +                ML_defaultNorm, ML_defaultNormLog, MaxML,
115494 +                prevEntropy->fse.matchlengthCTable,
115495 +                sizeof(prevEntropy->fse.matchlengthCTable),
115496 +                entropyWorkspace, entropyWkspSize);
115497 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
115498 +            if (MLtype == set_compressed)
115499 +                lastNCount = op;
115500 +            op += countSize;
115501 +            assert(op <= oend);
115502 +    }   }
115504 +    *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
115506 +    {   size_t const bitstreamSize = ZSTD_encodeSequences(
115507 +                                        op, (size_t)(oend - op),
115508 +                                        CTable_MatchLength, mlCodeTable,
115509 +                                        CTable_OffsetBits, ofCodeTable,
115510 +                                        CTable_LitLength, llCodeTable,
115511 +                                        sequences, nbSeq,
115512 +                                        longOffsets, bmi2);
115513 +        FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
115514 +        op += bitstreamSize;
115515 +        assert(op <= oend);
115516 +        /* zstd versions <= 1.3.4 mistakenly report corruption when
115517 +         * FSE_readNCount() receives a buffer < 4 bytes.
115518 +         * Fixed by https://github.com/facebook/zstd/pull/1146.
115519 +         * This can happen when the last set_compressed table present is 2
115520 +         * bytes and the bitstream is only one byte.
115521 +         * In this exceedingly rare case, we will simply emit an uncompressed
115522 +         * block, since it isn't worth optimizing.
115523 +         */
115524 +        if (lastNCount && (op - lastNCount) < 4) {
115525 +            /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
115526 +            assert(op - lastNCount == 3);
115527 +            DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
115528 +                        "emitting an uncompressed block.");
115529 +            return 0;
115530 +        }
115531 +    }
115533 +    DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
115534 +    return (size_t)(op - ostart);
115537 +MEM_STATIC size_t
115538 +ZSTD_entropyCompressSequences(seqStore_t* seqStorePtr,
115539 +                       const ZSTD_entropyCTables_t* prevEntropy,
115540 +                             ZSTD_entropyCTables_t* nextEntropy,
115541 +                       const ZSTD_CCtx_params* cctxParams,
115542 +                             void* dst, size_t dstCapacity,
115543 +                             size_t srcSize,
115544 +                             void* entropyWorkspace, size_t entropyWkspSize,
115545 +                             int bmi2)
115547 +    size_t const cSize = ZSTD_entropyCompressSequences_internal(
115548 +                            seqStorePtr, prevEntropy, nextEntropy, cctxParams,
115549 +                            dst, dstCapacity,
115550 +                            entropyWorkspace, entropyWkspSize, bmi2);
115551 +    if (cSize == 0) return 0;
115552 +    /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
115553 +     * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
115554 +     */
115555 +    if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
115556 +        return 0;  /* block not compressed */
115557 +    FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSequences_internal failed");
115559 +    /* Check compressibility */
115560 +    {   size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
115561 +        if (cSize >= maxCSize) return 0;  /* block not compressed */
115562 +    }
115563 +    DEBUGLOG(4, "ZSTD_entropyCompressSequences() cSize: %zu\n", cSize);
115564 +    return cSize;
115567 +/* ZSTD_selectBlockCompressor() :
115568 + * Not static, but internal use only (used by long distance matcher)
115569 + * assumption : strat is a valid strategy */
115570 +ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
115572 +    static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
115573 +        { ZSTD_compressBlock_fast  /* default for 0 */,
115574 +          ZSTD_compressBlock_fast,
115575 +          ZSTD_compressBlock_doubleFast,
115576 +          ZSTD_compressBlock_greedy,
115577 +          ZSTD_compressBlock_lazy,
115578 +          ZSTD_compressBlock_lazy2,
115579 +          ZSTD_compressBlock_btlazy2,
115580 +          ZSTD_compressBlock_btopt,
115581 +          ZSTD_compressBlock_btultra,
115582 +          ZSTD_compressBlock_btultra2 },
115583 +        { ZSTD_compressBlock_fast_extDict  /* default for 0 */,
115584 +          ZSTD_compressBlock_fast_extDict,
115585 +          ZSTD_compressBlock_doubleFast_extDict,
115586 +          ZSTD_compressBlock_greedy_extDict,
115587 +          ZSTD_compressBlock_lazy_extDict,
115588 +          ZSTD_compressBlock_lazy2_extDict,
115589 +          ZSTD_compressBlock_btlazy2_extDict,
115590 +          ZSTD_compressBlock_btopt_extDict,
115591 +          ZSTD_compressBlock_btultra_extDict,
115592 +          ZSTD_compressBlock_btultra_extDict },
115593 +        { ZSTD_compressBlock_fast_dictMatchState  /* default for 0 */,
115594 +          ZSTD_compressBlock_fast_dictMatchState,
115595 +          ZSTD_compressBlock_doubleFast_dictMatchState,
115596 +          ZSTD_compressBlock_greedy_dictMatchState,
115597 +          ZSTD_compressBlock_lazy_dictMatchState,
115598 +          ZSTD_compressBlock_lazy2_dictMatchState,
115599 +          ZSTD_compressBlock_btlazy2_dictMatchState,
115600 +          ZSTD_compressBlock_btopt_dictMatchState,
115601 +          ZSTD_compressBlock_btultra_dictMatchState,
115602 +          ZSTD_compressBlock_btultra_dictMatchState },
115603 +        { NULL  /* default for 0 */,
115604 +          NULL,
115605 +          NULL,
115606 +          ZSTD_compressBlock_greedy_dedicatedDictSearch,
115607 +          ZSTD_compressBlock_lazy_dedicatedDictSearch,
115608 +          ZSTD_compressBlock_lazy2_dedicatedDictSearch,
115609 +          NULL,
115610 +          NULL,
115611 +          NULL,
115612 +          NULL }
115613 +    };
115614 +    ZSTD_blockCompressor selectedCompressor;
115615 +    ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
115617 +    assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
115618 +    selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
115619 +    assert(selectedCompressor != NULL);
115620 +    return selectedCompressor;
115623 +static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
115624 +                                   const BYTE* anchor, size_t lastLLSize)
115626 +    ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
115627 +    seqStorePtr->lit += lastLLSize;
115630 +void ZSTD_resetSeqStore(seqStore_t* ssPtr)
115632 +    ssPtr->lit = ssPtr->litStart;
115633 +    ssPtr->sequences = ssPtr->sequencesStart;
115634 +    ssPtr->longLengthID = 0;
115637 +typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
115639 +static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
115641 +    ZSTD_matchState_t* const ms = &zc->blockState.matchState;
115642 +    DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
115643 +    assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
115644 +    /* Assert that we have correctly flushed the ctx params into the ms's copy */
115645 +    ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
115646 +    if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
115647 +        if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {
115648 +            ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);
115649 +        } else {
115650 +            ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
115651 +        }
115652 +        return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
115653 +    }
115654 +    ZSTD_resetSeqStore(&(zc->seqStore));
115655 +    /* required for optimal parser to read stats from dictionary */
115656 +    ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
115657 +    /* tell the optimal parser how we expect to compress literals */
115658 +    ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
115659 +    /* a gap between an attached dict and the current window is not safe,
115660 +     * they must remain adjacent,
115661 +     * and when that stops being the case, the dict must be unset */
115662 +    assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
115664 +    /* limited update after a very long match */
115665 +    {   const BYTE* const base = ms->window.base;
115666 +        const BYTE* const istart = (const BYTE*)src;
115667 +        const U32 curr = (U32)(istart-base);
115668 +        if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1));   /* ensure no overflow */
115669 +        if (curr > ms->nextToUpdate + 384)
115670 +            ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384));
115671 +    }
115673 +    /* select and store sequences */
115674 +    {   ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
115675 +        size_t lastLLSize;
115676 +        {   int i;
115677 +            for (i = 0; i < ZSTD_REP_NUM; ++i)
115678 +                zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
115679 +        }
115680 +        if (zc->externSeqStore.pos < zc->externSeqStore.size) {
115681 +            assert(!zc->appliedParams.ldmParams.enableLdm);
115682 +            /* Updates ldmSeqStore.pos */
115683 +            lastLLSize =
115684 +                ZSTD_ldm_blockCompress(&zc->externSeqStore,
115685 +                                       ms, &zc->seqStore,
115686 +                                       zc->blockState.nextCBlock->rep,
115687 +                                       src, srcSize);
115688 +            assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
115689 +        } else if (zc->appliedParams.ldmParams.enableLdm) {
115690 +            rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
115692 +            ldmSeqStore.seq = zc->ldmSequences;
115693 +            ldmSeqStore.capacity = zc->maxNbLdmSequences;
115694 +            /* Updates ldmSeqStore.size */
115695 +            FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
115696 +                                               &zc->appliedParams.ldmParams,
115697 +                                               src, srcSize), "");
115698 +            /* Updates ldmSeqStore.pos */
115699 +            lastLLSize =
115700 +                ZSTD_ldm_blockCompress(&ldmSeqStore,
115701 +                                       ms, &zc->seqStore,
115702 +                                       zc->blockState.nextCBlock->rep,
115703 +                                       src, srcSize);
115704 +            assert(ldmSeqStore.pos == ldmSeqStore.size);
115705 +        } else {   /* not long range mode */
115706 +            ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
115707 +            ms->ldmSeqStore = NULL;
115708 +            lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
115709 +        }
115710 +        {   const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
115711 +            ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
115712 +    }   }
115713 +    return ZSTDbss_compress;
115716 +static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
115718 +    const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
115719 +    const seqDef* seqStoreSeqs = seqStore->sequencesStart;
115720 +    size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;
115721 +    size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);
115722 +    size_t literalsRead = 0;
115723 +    size_t lastLLSize;
115725 +    ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
115726 +    size_t i;
115727 +    repcodes_t updatedRepcodes;
115729 +    assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
115730 +    /* Ensure we have enough space for last literals "sequence" */
115731 +    assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
115732 +    ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
115733 +    for (i = 0; i < seqStoreSeqSize; ++i) {
115734 +        U32 rawOffset = seqStoreSeqs[i].offset - ZSTD_REP_NUM;
115735 +        outSeqs[i].litLength = seqStoreSeqs[i].litLength;
115736 +        outSeqs[i].matchLength = seqStoreSeqs[i].matchLength + MINMATCH;
115737 +        outSeqs[i].rep = 0;
115739 +        if (i == seqStore->longLengthPos) {
115740 +            if (seqStore->longLengthID == 1) {
115741 +                outSeqs[i].litLength += 0x10000;
115742 +            } else if (seqStore->longLengthID == 2) {
115743 +                outSeqs[i].matchLength += 0x10000;
115744 +            }
115745 +        }
115747 +        if (seqStoreSeqs[i].offset <= ZSTD_REP_NUM) {
115748 +            /* Derive the correct offset corresponding to a repcode */
115749 +            outSeqs[i].rep = seqStoreSeqs[i].offset;
115750 +            if (outSeqs[i].litLength != 0) {
115751 +                rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];
115752 +            } else {
115753 +                if (outSeqs[i].rep == 3) {
115754 +                    rawOffset = updatedRepcodes.rep[0] - 1;
115755 +                } else {
115756 +                    rawOffset = updatedRepcodes.rep[outSeqs[i].rep];
115757 +                }
115758 +            }
115759 +        }
115760 +        outSeqs[i].offset = rawOffset;
115761 +        /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
115762 +           so we provide seqStoreSeqs[i].offset - 1 */
115763 +        updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep,
115764 +                                         seqStoreSeqs[i].offset - 1,
115765 +                                         seqStoreSeqs[i].litLength == 0);
115766 +        literalsRead += outSeqs[i].litLength;
115767 +    }
115768 +    /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
115769 +     * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker
115770 +     * for the block boundary, according to the API.
115771 +     */
115772 +    assert(seqStoreLiteralsSize >= literalsRead);
115773 +    lastLLSize = seqStoreLiteralsSize - literalsRead;
115774 +    outSeqs[i].litLength = (U32)lastLLSize;
115775 +    outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0;
115776 +    seqStoreSeqSize++;
115777 +    zc->seqCollector.seqIndex += seqStoreSeqSize;
115780 +size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
115781 +                              size_t outSeqsSize, const void* src, size_t srcSize)
115783 +    const size_t dstCapacity = ZSTD_compressBound(srcSize);
115784 +    void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
115785 +    SeqCollector seqCollector;
115787 +    RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
115789 +    seqCollector.collectSequences = 1;
115790 +    seqCollector.seqStart = outSeqs;
115791 +    seqCollector.seqIndex = 0;
115792 +    seqCollector.maxSequences = outSeqsSize;
115793 +    zc->seqCollector = seqCollector;
115795 +    ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
115796 +    ZSTD_customFree(dst, ZSTD_defaultCMem);
115797 +    return zc->seqCollector.seqIndex;
115800 +size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize) {
115801 +    size_t in = 0;
115802 +    size_t out = 0;
115803 +    for (; in < seqsSize; ++in) {
115804 +        if (sequences[in].offset == 0 && sequences[in].matchLength == 0) {
115805 +            if (in != seqsSize - 1) {
115806 +                sequences[in+1].litLength += sequences[in].litLength;
115807 +            }
115808 +        } else {
115809 +            sequences[out] = sequences[in];
115810 +            ++out;
115811 +        }
115812 +    }
115813 +    return out;
115816 +/* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */
115817 +static int ZSTD_isRLE(const BYTE* src, size_t length) {
115818 +    const BYTE* ip = src;
115819 +    const BYTE value = ip[0];
115820 +    const size_t valueST = (size_t)((U64)value * 0x0101010101010101ULL);
115821 +    const size_t unrollSize = sizeof(size_t) * 4;
115822 +    const size_t unrollMask = unrollSize - 1;
115823 +    const size_t prefixLength = length & unrollMask;
115824 +    size_t i;
115825 +    size_t u;
115826 +    if (length == 1) return 1;
115827 +    /* Check if prefix is RLE first before using unrolled loop */
115828 +    if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) {
115829 +        return 0;
115830 +    }
115831 +    for (i = prefixLength; i != length; i += unrollSize) {
115832 +        for (u = 0; u < unrollSize; u += sizeof(size_t)) {
115833 +            if (MEM_readST(ip + i + u) != valueST) {
115834 +                return 0;
115835 +            }
115836 +        }
115837 +    }
115838 +    return 1;
115841 +/* Returns true if the given block may be RLE.
115842 + * This is just a heuristic based on the compressibility.
115843 + * It may return both false positives and false negatives.
115844 + */
115845 +static int ZSTD_maybeRLE(seqStore_t const* seqStore)
115847 +    size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
115848 +    size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart);
115850 +    return nbSeqs < 4 && nbLits < 10;
115853 +static void ZSTD_confirmRepcodesAndEntropyTables(ZSTD_CCtx* zc)
115855 +    ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
115856 +    zc->blockState.prevCBlock = zc->blockState.nextCBlock;
115857 +    zc->blockState.nextCBlock = tmp;
115860 +static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
115861 +                                        void* dst, size_t dstCapacity,
115862 +                                        const void* src, size_t srcSize, U32 frame)
115864 +    /* This the upper bound for the length of an rle block.
115865 +     * This isn't the actual upper bound. Finding the real threshold
115866 +     * needs further investigation.
115867 +     */
115868 +    const U32 rleMaxLength = 25;
115869 +    size_t cSize;
115870 +    const BYTE* ip = (const BYTE*)src;
115871 +    BYTE* op = (BYTE*)dst;
115872 +    DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
115873 +                (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
115874 +                (unsigned)zc->blockState.matchState.nextToUpdate);
115876 +    {   const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
115877 +        FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
115878 +        if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
115879 +    }
115881 +    if (zc->seqCollector.collectSequences) {
115882 +        ZSTD_copyBlockSequences(zc);
115883 +        ZSTD_confirmRepcodesAndEntropyTables(zc);
115884 +        return 0;
115885 +    }
115887 +    /* encode sequences and literals */
115888 +    cSize = ZSTD_entropyCompressSequences(&zc->seqStore,
115889 +            &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
115890 +            &zc->appliedParams,
115891 +            dst, dstCapacity,
115892 +            srcSize,
115893 +            zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
115894 +            zc->bmi2);
115896 +    if (zc->seqCollector.collectSequences) {
115897 +        ZSTD_copyBlockSequences(zc);
115898 +        return 0;
115899 +    }
115902 +    if (frame &&
115903 +        /* We don't want to emit our first block as a RLE even if it qualifies because
115904 +         * doing so will cause the decoder (cli only) to throw a "should consume all input error."
115905 +         * This is only an issue for zstd <= v1.4.3
115906 +         */
115907 +        !zc->isFirstBlock &&
115908 +        cSize < rleMaxLength &&
115909 +        ZSTD_isRLE(ip, srcSize))
115910 +    {
115911 +        cSize = 1;
115912 +        op[0] = ip[0];
115913 +    }
115915 +out:
115916 +    if (!ZSTD_isError(cSize) && cSize > 1) {
115917 +        ZSTD_confirmRepcodesAndEntropyTables(zc);
115918 +    }
115919 +    /* We check that dictionaries have offset codes available for the first
115920 +     * block. After the first block, the offcode table might not have large
115921 +     * enough codes to represent the offsets in the data.
115922 +     */
115923 +    if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
115924 +        zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
115926 +    return cSize;
115929 +static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
115930 +                               void* dst, size_t dstCapacity,
115931 +                               const void* src, size_t srcSize,
115932 +                               const size_t bss, U32 lastBlock)
115934 +    DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()");
115935 +    if (bss == ZSTDbss_compress) {
115936 +        if (/* We don't want to emit our first block as a RLE even if it qualifies because
115937 +            * doing so will cause the decoder (cli only) to throw a "should consume all input error."
115938 +            * This is only an issue for zstd <= v1.4.3
115939 +            */
115940 +            !zc->isFirstBlock &&
115941 +            ZSTD_maybeRLE(&zc->seqStore) &&
115942 +            ZSTD_isRLE((BYTE const*)src, srcSize))
115943 +        {
115944 +            return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock);
115945 +        }
115946 +        /* Attempt superblock compression.
115947 +         *
115948 +         * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the
115949 +         * standard ZSTD_compressBound(). This is a problem, because even if we have
115950 +         * space now, taking an extra byte now could cause us to run out of space later
115951 +         * and violate ZSTD_compressBound().
115952 +         *
115953 +         * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize.
115954 +         *
115955 +         * In order to respect ZSTD_compressBound() we must attempt to emit a raw
115956 +         * uncompressed block in these cases:
115957 +         *   * cSize == 0: Return code for an uncompressed block.
115958 +         *   * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize).
115959 +         *     ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of
115960 +         *     output space.
115961 +         *   * cSize >= blockBound(srcSize): We have expanded the block too much so
115962 +         *     emit an uncompressed block.
115963 +         */
115964 +        {
115965 +            size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock);
115966 +            if (cSize != ERROR(dstSize_tooSmall)) {
115967 +                size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);
115968 +                FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed");
115969 +                if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) {
115970 +                    ZSTD_confirmRepcodesAndEntropyTables(zc);
115971 +                    return cSize;
115972 +                }
115973 +            }
115974 +        }
115975 +    }
115977 +    DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()");
115978 +    /* Superblock compression failed, attempt to emit a single no compress block.
115979 +     * The decoder will be able to stream this block since it is uncompressed.
115980 +     */
115981 +    return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
115984 +static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc,
115985 +                               void* dst, size_t dstCapacity,
115986 +                               const void* src, size_t srcSize,
115987 +                               U32 lastBlock)
115989 +    size_t cSize = 0;
115990 +    const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
115991 +    DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)",
115992 +                (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize);
115993 +    FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
115995 +    cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock);
115996 +    FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed");
115998 +    if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
115999 +        zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
116001 +    return cSize;
116004 +static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
116005 +                                         ZSTD_cwksp* ws,
116006 +                                         ZSTD_CCtx_params const* params,
116007 +                                         void const* ip,
116008 +                                         void const* iend)
116010 +    if (ZSTD_window_needOverflowCorrection(ms->window, iend)) {
116011 +        U32 const maxDist = (U32)1 << params->cParams.windowLog;
116012 +        U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
116013 +        U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
116014 +        ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
116015 +        ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
116016 +        ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
116017 +        ZSTD_cwksp_mark_tables_dirty(ws);
116018 +        ZSTD_reduceIndex(ms, params, correction);
116019 +        ZSTD_cwksp_mark_tables_clean(ws);
116020 +        if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
116021 +        else ms->nextToUpdate -= correction;
116022 +        /* invalidate dictionaries on overflow correction */
116023 +        ms->loadedDictEnd = 0;
116024 +        ms->dictMatchState = NULL;
116025 +    }
116028 +/*! ZSTD_compress_frameChunk() :
116029 +*   Compress a chunk of data into one or multiple blocks.
116030 +*   All blocks will be terminated, all input will be consumed.
116031 +*   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
116032 +*   Frame is supposed already started (header already produced)
116033 +*   @return : compressed size, or an error code
116035 +static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
116036 +                                     void* dst, size_t dstCapacity,
116037 +                               const void* src, size_t srcSize,
116038 +                                     U32 lastFrameChunk)
116040 +    size_t blockSize = cctx->blockSize;
116041 +    size_t remaining = srcSize;
116042 +    const BYTE* ip = (const BYTE*)src;
116043 +    BYTE* const ostart = (BYTE*)dst;
116044 +    BYTE* op = ostart;
116045 +    U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
116047 +    assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
116049 +    DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
116050 +    if (cctx->appliedParams.fParams.checksumFlag && srcSize)
116051 +        xxh64_update(&cctx->xxhState, src, srcSize);
116053 +    while (remaining) {
116054 +        ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
116055 +        U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
116057 +        RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
116058 +                        dstSize_tooSmall,
116059 +                        "not enough space to store compressed block");
116060 +        if (remaining < blockSize) blockSize = remaining;
116062 +        ZSTD_overflowCorrectIfNeeded(
116063 +            ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
116064 +        ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
116066 +        /* Ensure hash/chain table insertion resumes no sooner than lowlimit */
116067 +        if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
116069 +        {   size_t cSize;
116070 +            if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) {
116071 +                cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock);
116072 +                FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed");
116073 +                assert(cSize > 0);
116074 +                assert(cSize <= blockSize + ZSTD_blockHeaderSize);
116075 +            } else {
116076 +                cSize = ZSTD_compressBlock_internal(cctx,
116077 +                                        op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
116078 +                                        ip, blockSize, 1 /* frame */);
116079 +                FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed");
116081 +                if (cSize == 0) {  /* block is not compressible */
116082 +                    cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
116083 +                    FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
116084 +                } else {
116085 +                    U32 const cBlockHeader = cSize == 1 ?
116086 +                        lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
116087 +                        lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
116088 +                    MEM_writeLE24(op, cBlockHeader);
116089 +                    cSize += ZSTD_blockHeaderSize;
116090 +                }
116091 +            }
116094 +            ip += blockSize;
116095 +            assert(remaining >= blockSize);
116096 +            remaining -= blockSize;
116097 +            op += cSize;
116098 +            assert(dstCapacity >= cSize);
116099 +            dstCapacity -= cSize;
116100 +            cctx->isFirstBlock = 0;
116101 +            DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
116102 +                        (unsigned)cSize);
116103 +    }   }
116105 +    if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
116106 +    return (size_t)(op-ostart);
116110 +static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
116111 +                                    const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
116112 +{   BYTE* const op = (BYTE*)dst;
116113 +    U32   const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */
116114 +    U32   const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength;   /* 0-3 */
116115 +    U32   const checksumFlag = params->fParams.checksumFlag>0;
116116 +    U32   const windowSize = (U32)1 << params->cParams.windowLog;
116117 +    U32   const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
116118 +    BYTE  const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
116119 +    U32   const fcsCode = params->fParams.contentSizeFlag ?
116120 +                     (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0;  /* 0-3 */
116121 +    BYTE  const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
116122 +    size_t pos=0;
116124 +    assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
116125 +    RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall,
116126 +                    "dst buf is too small to fit worst-case frame header size.");
116127 +    DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
116128 +                !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
116129 +    if (params->format == ZSTD_f_zstd1) {
116130 +        MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
116131 +        pos = 4;
116132 +    }
116133 +    op[pos++] = frameHeaderDescriptionByte;
116134 +    if (!singleSegment) op[pos++] = windowLogByte;
116135 +    switch(dictIDSizeCode)
116136 +    {
116137 +        default:  assert(0); /* impossible */
116138 +        case 0 : break;
116139 +        case 1 : op[pos] = (BYTE)(dictID); pos++; break;
116140 +        case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
116141 +        case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
116142 +    }
116143 +    switch(fcsCode)
116144 +    {
116145 +        default:  assert(0); /* impossible */
116146 +        case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
116147 +        case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
116148 +        case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
116149 +        case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
116150 +    }
116151 +    return pos;
116154 +/* ZSTD_writeSkippableFrame_advanced() :
116155 + * Writes out a skippable frame with the specified magic number variant (16 are supported),
116156 + * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data.
116158 + * Returns the total number of bytes written, or a ZSTD error code.
116159 + */
116160 +size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
116161 +                                const void* src, size_t srcSize, unsigned magicVariant) {
116162 +    BYTE* op = (BYTE*)dst;
116163 +    RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */,
116164 +                    dstSize_tooSmall, "Not enough room for skippable frame");
116165 +    RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame");
116166 +    RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported");
116168 +    MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant));
116169 +    MEM_writeLE32(op+4, (U32)srcSize);
116170 +    ZSTD_memcpy(op+8, src, srcSize);
116171 +    return srcSize + ZSTD_SKIPPABLEHEADERSIZE;
116174 +/* ZSTD_writeLastEmptyBlock() :
116175 + * output an empty Block with end-of-frame mark to complete a frame
116176 + * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
116177 + *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
116178 + */
116179 +size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
116181 +    RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall,
116182 +                    "dst buf is too small to write frame trailer empty block.");
116183 +    {   U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1);  /* 0 size */
116184 +        MEM_writeLE24(dst, cBlockHeader24);
116185 +        return ZSTD_blockHeaderSize;
116186 +    }
116189 +size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
116191 +    RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong,
116192 +                    "wrong cctx stage");
116193 +    RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm,
116194 +                    parameter_unsupported,
116195 +                    "incompatible with ldm");
116196 +    cctx->externSeqStore.seq = seq;
116197 +    cctx->externSeqStore.size = nbSeq;
116198 +    cctx->externSeqStore.capacity = nbSeq;
116199 +    cctx->externSeqStore.pos = 0;
116200 +    cctx->externSeqStore.posInSequence = 0;
116201 +    return 0;
116205 +static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
116206 +                              void* dst, size_t dstCapacity,
116207 +                        const void* src, size_t srcSize,
116208 +                               U32 frame, U32 lastFrameChunk)
116210 +    ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
116211 +    size_t fhSize = 0;
116213 +    DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
116214 +                cctx->stage, (unsigned)srcSize);
116215 +    RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
116216 +                    "missing init (ZSTD_compressBegin)");
116218 +    if (frame && (cctx->stage==ZSTDcs_init)) {
116219 +        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
116220 +                                       cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
116221 +        FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
116222 +        assert(fhSize <= dstCapacity);
116223 +        dstCapacity -= fhSize;
116224 +        dst = (char*)dst + fhSize;
116225 +        cctx->stage = ZSTDcs_ongoing;
116226 +    }
116228 +    if (!srcSize) return fhSize;  /* do not generate an empty block if no input */
116230 +    if (!ZSTD_window_update(&ms->window, src, srcSize)) {
116231 +        ms->nextToUpdate = ms->window.dictLimit;
116232 +    }
116233 +    if (cctx->appliedParams.ldmParams.enableLdm) {
116234 +        ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
116235 +    }
116237 +    if (!frame) {
116238 +        /* overflow check and correction for block mode */
116239 +        ZSTD_overflowCorrectIfNeeded(
116240 +            ms, &cctx->workspace, &cctx->appliedParams,
116241 +            src, (BYTE const*)src + srcSize);
116242 +    }
116244 +    DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
116245 +    {   size_t const cSize = frame ?
116246 +                             ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
116247 +                             ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
116248 +        FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed");
116249 +        cctx->consumedSrcSize += srcSize;
116250 +        cctx->producedCSize += (cSize + fhSize);
116251 +        assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
116252 +        if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
116253 +            ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
116254 +            RETURN_ERROR_IF(
116255 +                cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
116256 +                srcSize_wrong,
116257 +                "error : pledgedSrcSize = %u, while realSrcSize >= %u",
116258 +                (unsigned)cctx->pledgedSrcSizePlusOne-1,
116259 +                (unsigned)cctx->consumedSrcSize);
116260 +        }
116261 +        return cSize + fhSize;
116262 +    }
116265 +size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
116266 +                              void* dst, size_t dstCapacity,
116267 +                        const void* src, size_t srcSize)
116269 +    DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
116270 +    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
116274 +size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
116276 +    ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
116277 +    assert(!ZSTD_checkCParams(cParams));
116278 +    return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
116281 +size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
116283 +    DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
116284 +    { size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
116285 +      RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }
116287 +    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
116290 +/*! ZSTD_loadDictionaryContent() :
116291 + *  @return : 0, or an error code
116292 + */
116293 +static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
116294 +                                         ldmState_t* ls,
116295 +                                         ZSTD_cwksp* ws,
116296 +                                         ZSTD_CCtx_params const* params,
116297 +                                         const void* src, size_t srcSize,
116298 +                                         ZSTD_dictTableLoadMethod_e dtlm)
116300 +    const BYTE* ip = (const BYTE*) src;
116301 +    const BYTE* const iend = ip + srcSize;
116303 +    ZSTD_window_update(&ms->window, src, srcSize);
116304 +    ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
116306 +    if (params->ldmParams.enableLdm && ls != NULL) {
116307 +        ZSTD_window_update(&ls->window, src, srcSize);
116308 +        ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
116309 +    }
116311 +    /* Assert that we the ms params match the params we're being given */
116312 +    ZSTD_assertEqualCParams(params->cParams, ms->cParams);
116314 +    if (srcSize <= HASH_READ_SIZE) return 0;
116316 +    while (iend - ip > HASH_READ_SIZE) {
116317 +        size_t const remaining = (size_t)(iend - ip);
116318 +        size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX);
116319 +        const BYTE* const ichunk = ip + chunk;
116321 +        ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, ichunk);
116323 +        if (params->ldmParams.enableLdm && ls != NULL)
116324 +            ZSTD_ldm_fillHashTable(ls, (const BYTE*)src, (const BYTE*)src + srcSize, &params->ldmParams);
116326 +        switch(params->cParams.strategy)
116327 +        {
116328 +        case ZSTD_fast:
116329 +            ZSTD_fillHashTable(ms, ichunk, dtlm);
116330 +            break;
116331 +        case ZSTD_dfast:
116332 +            ZSTD_fillDoubleHashTable(ms, ichunk, dtlm);
116333 +            break;
116335 +        case ZSTD_greedy:
116336 +        case ZSTD_lazy:
116337 +        case ZSTD_lazy2:
116338 +            if (chunk >= HASH_READ_SIZE && ms->dedicatedDictSearch) {
116339 +                assert(chunk == remaining); /* must load everything in one go */
116340 +                ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, ichunk-HASH_READ_SIZE);
116341 +            } else if (chunk >= HASH_READ_SIZE) {
116342 +                ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE);
116343 +            }
116344 +            break;
116346 +        case ZSTD_btlazy2:   /* we want the dictionary table fully sorted */
116347 +        case ZSTD_btopt:
116348 +        case ZSTD_btultra:
116349 +        case ZSTD_btultra2:
116350 +            if (chunk >= HASH_READ_SIZE)
116351 +                ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk);
116352 +            break;
116354 +        default:
116355 +            assert(0);  /* not possible : not a valid strategy id */
116356 +        }
116358 +        ip = ichunk;
116359 +    }
116361 +    ms->nextToUpdate = (U32)(iend - ms->window.base);
116362 +    return 0;
116366 +/* Dictionaries that assign zero probability to symbols that show up causes problems
116367 + * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check
116368 + * and only dictionaries with 100% valid symbols can be assumed valid.
116369 + */
116370 +static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
116372 +    U32 s;
116373 +    if (dictMaxSymbolValue < maxSymbolValue) {
116374 +        return FSE_repeat_check;
116375 +    }
116376 +    for (s = 0; s <= maxSymbolValue; ++s) {
116377 +        if (normalizedCounter[s] == 0) {
116378 +            return FSE_repeat_check;
116379 +        }
116380 +    }
116381 +    return FSE_repeat_valid;
116384 +size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
116385 +                         const void* const dict, size_t dictSize)
116387 +    short offcodeNCount[MaxOff+1];
116388 +    unsigned offcodeMaxValue = MaxOff;
116389 +    const BYTE* dictPtr = (const BYTE*)dict;    /* skip magic num and dict ID */
116390 +    const BYTE* const dictEnd = dictPtr + dictSize;
116391 +    dictPtr += 8;
116392 +    bs->entropy.huf.repeatMode = HUF_repeat_check;
116394 +    {   unsigned maxSymbolValue = 255;
116395 +        unsigned hasZeroWeights = 1;
116396 +        size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,
116397 +            dictEnd-dictPtr, &hasZeroWeights);
116399 +        /* We only set the loaded table as valid if it contains all non-zero
116400 +         * weights. Otherwise, we set it to check */
116401 +        if (!hasZeroWeights)
116402 +            bs->entropy.huf.repeatMode = HUF_repeat_valid;
116404 +        RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
116405 +        RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
116406 +        dictPtr += hufHeaderSize;
116407 +    }
116409 +    {   unsigned offcodeLog;
116410 +        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
116411 +        RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
116412 +        RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
116413 +        /* fill all offset symbols to avoid garbage at end of table */
116414 +        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
116415 +                bs->entropy.fse.offcodeCTable,
116416 +                offcodeNCount, MaxOff, offcodeLog,
116417 +                workspace, HUF_WORKSPACE_SIZE)),
116418 +            dictionary_corrupted, "");
116419 +        /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
116420 +        dictPtr += offcodeHeaderSize;
116421 +    }
116423 +    {   short matchlengthNCount[MaxML+1];
116424 +        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
116425 +        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
116426 +        RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
116427 +        RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
116428 +        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
116429 +                bs->entropy.fse.matchlengthCTable,
116430 +                matchlengthNCount, matchlengthMaxValue, matchlengthLog,
116431 +                workspace, HUF_WORKSPACE_SIZE)),
116432 +            dictionary_corrupted, "");
116433 +        bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML);
116434 +        dictPtr += matchlengthHeaderSize;
116435 +    }
116437 +    {   short litlengthNCount[MaxLL+1];
116438 +        unsigned litlengthMaxValue = MaxLL, litlengthLog;
116439 +        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
116440 +        RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
116441 +        RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
116442 +        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
116443 +                bs->entropy.fse.litlengthCTable,
116444 +                litlengthNCount, litlengthMaxValue, litlengthLog,
116445 +                workspace, HUF_WORKSPACE_SIZE)),
116446 +            dictionary_corrupted, "");
116447 +        bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL);
116448 +        dictPtr += litlengthHeaderSize;
116449 +    }
116451 +    RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
116452 +    bs->rep[0] = MEM_readLE32(dictPtr+0);
116453 +    bs->rep[1] = MEM_readLE32(dictPtr+4);
116454 +    bs->rep[2] = MEM_readLE32(dictPtr+8);
116455 +    dictPtr += 12;
116457 +    {   size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
116458 +        U32 offcodeMax = MaxOff;
116459 +        if (dictContentSize <= ((U32)-1) - 128 KB) {
116460 +            U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
116461 +            offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
116462 +        }
116463 +        /* All offset values <= dictContentSize + 128 KB must be representable for a valid table */
116464 +        bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff));
116466 +        /* All repCodes must be <= dictContentSize and != 0 */
116467 +        {   U32 u;
116468 +            for (u=0; u<3; u++) {
116469 +                RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, "");
116470 +                RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");
116471 +    }   }   }
116473 +    return dictPtr - (const BYTE*)dict;
116476 +/* Dictionary format :
116477 + * See :
116478 + * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format
116479 + */
116480 +/*! ZSTD_loadZstdDictionary() :
116481 + * @return : dictID, or an error code
116482 + *  assumptions : magic number supposed already checked
116483 + *                dictSize supposed >= 8
116484 + */
116485 +static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
116486 +                                      ZSTD_matchState_t* ms,
116487 +                                      ZSTD_cwksp* ws,
116488 +                                      ZSTD_CCtx_params const* params,
116489 +                                      const void* dict, size_t dictSize,
116490 +                                      ZSTD_dictTableLoadMethod_e dtlm,
116491 +                                      void* workspace)
116493 +    const BYTE* dictPtr = (const BYTE*)dict;
116494 +    const BYTE* const dictEnd = dictPtr + dictSize;
116495 +    size_t dictID;
116496 +    size_t eSize;
116498 +    ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
116499 +    assert(dictSize >= 8);
116500 +    assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
116502 +    dictID = params->fParams.noDictIDFlag ? 0 :  MEM_readLE32(dictPtr + 4 /* skip magic number */ );
116503 +    eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize);
116504 +    FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed");
116505 +    dictPtr += eSize;
116507 +    {
116508 +        size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
116509 +        FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
116510 +            ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), "");
116511 +    }
116512 +    return dictID;
116515 +/** ZSTD_compress_insertDictionary() :
116516 +*   @return : dictID, or an error code */
116517 +static size_t
116518 +ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
116519 +                               ZSTD_matchState_t* ms,
116520 +                               ldmState_t* ls,
116521 +                               ZSTD_cwksp* ws,
116522 +                         const ZSTD_CCtx_params* params,
116523 +                         const void* dict, size_t dictSize,
116524 +                               ZSTD_dictContentType_e dictContentType,
116525 +                               ZSTD_dictTableLoadMethod_e dtlm,
116526 +                               void* workspace)
116528 +    DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
116529 +    if ((dict==NULL) || (dictSize<8)) {
116530 +        RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
116531 +        return 0;
116532 +    }
116534 +    ZSTD_reset_compressedBlockState(bs);
116536 +    /* dict restricted modes */
116537 +    if (dictContentType == ZSTD_dct_rawContent)
116538 +        return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm);
116540 +    if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
116541 +        if (dictContentType == ZSTD_dct_auto) {
116542 +            DEBUGLOG(4, "raw content dictionary detected");
116543 +            return ZSTD_loadDictionaryContent(
116544 +                ms, ls, ws, params, dict, dictSize, dtlm);
116545 +        }
116546 +        RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
116547 +        assert(0);   /* impossible */
116548 +    }
116550 +    /* dict as full zstd dictionary */
116551 +    return ZSTD_loadZstdDictionary(
116552 +        bs, ms, ws, params, dict, dictSize, dtlm, workspace);
116555 +#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
116556 +#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
116558 +/*! ZSTD_compressBegin_internal() :
116559 + * @return : 0, or an error code */
116560 +static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
116561 +                                    const void* dict, size_t dictSize,
116562 +                                    ZSTD_dictContentType_e dictContentType,
116563 +                                    ZSTD_dictTableLoadMethod_e dtlm,
116564 +                                    const ZSTD_CDict* cdict,
116565 +                                    const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
116566 +                                    ZSTD_buffered_policy_e zbuff)
116568 +    DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
116569 +    /* params are supposed to be fully validated at this point */
116570 +    assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
116571 +    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
116572 +    if ( (cdict)
116573 +      && (cdict->dictContentSize > 0)
116574 +      && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
116575 +        || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
116576 +        || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
116577 +        || cdict->compressionLevel == 0)
116578 +      && (params->attachDictPref != ZSTD_dictForceLoad) ) {
116579 +        return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
116580 +    }
116582 +    FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, *params, pledgedSrcSize,
116583 +                                     ZSTDcrp_makeClean, zbuff) , "");
116584 +    {   size_t const dictID = cdict ?
116585 +                ZSTD_compress_insertDictionary(
116586 +                        cctx->blockState.prevCBlock, &cctx->blockState.matchState,
116587 +                        &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
116588 +                        cdict->dictContentSize, cdict->dictContentType, dtlm,
116589 +                        cctx->entropyWorkspace)
116590 +              : ZSTD_compress_insertDictionary(
116591 +                        cctx->blockState.prevCBlock, &cctx->blockState.matchState,
116592 +                        &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize,
116593 +                        dictContentType, dtlm, cctx->entropyWorkspace);
116594 +        FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
116595 +        assert(dictID <= UINT_MAX);
116596 +        cctx->dictID = (U32)dictID;
116597 +        cctx->dictContentSize = cdict ? cdict->dictContentSize : dictSize;
116598 +    }
116599 +    return 0;
116602 +size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
116603 +                                    const void* dict, size_t dictSize,
116604 +                                    ZSTD_dictContentType_e dictContentType,
116605 +                                    ZSTD_dictTableLoadMethod_e dtlm,
116606 +                                    const ZSTD_CDict* cdict,
116607 +                                    const ZSTD_CCtx_params* params,
116608 +                                    unsigned long long pledgedSrcSize)
116610 +    DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog);
116611 +    /* compression parameters verification and optimization */
116612 +    FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , "");
116613 +    return ZSTD_compressBegin_internal(cctx,
116614 +                                       dict, dictSize, dictContentType, dtlm,
116615 +                                       cdict,
116616 +                                       params, pledgedSrcSize,
116617 +                                       ZSTDb_not_buffered);
116620 +/*! ZSTD_compressBegin_advanced() :
116621 +*   @return : 0, or an error code */
116622 +size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
116623 +                             const void* dict, size_t dictSize,
116624 +                                   ZSTD_parameters params, unsigned long long pledgedSrcSize)
116626 +    ZSTD_CCtx_params cctxParams;
116627 +    ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
116628 +    return ZSTD_compressBegin_advanced_internal(cctx,
116629 +                                            dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
116630 +                                            NULL /*cdict*/,
116631 +                                            &cctxParams, pledgedSrcSize);
116634 +size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
116636 +    ZSTD_CCtx_params cctxParams;
116637 +    {
116638 +        ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
116639 +        ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel);
116640 +    }
116641 +    DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
116642 +    return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
116643 +                                       &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
116646 +size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
116648 +    return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
116652 +/*! ZSTD_writeEpilogue() :
116653 +*   Ends a frame.
116654 +*   @return : nb of bytes written into dst (or an error code) */
116655 +static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
116657 +    BYTE* const ostart = (BYTE*)dst;
116658 +    BYTE* op = ostart;
116659 +    size_t fhSize = 0;
116661 +    DEBUGLOG(4, "ZSTD_writeEpilogue");
116662 +    RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
116664 +    /* special case : empty frame */
116665 +    if (cctx->stage == ZSTDcs_init) {
116666 +        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
116667 +        FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
116668 +        dstCapacity -= fhSize;
116669 +        op += fhSize;
116670 +        cctx->stage = ZSTDcs_ongoing;
116671 +    }
116673 +    if (cctx->stage != ZSTDcs_ending) {
116674 +        /* write one last empty block, make it the "last" block */
116675 +        U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
116676 +        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue");
116677 +        MEM_writeLE32(op, cBlockHeader24);
116678 +        op += ZSTD_blockHeaderSize;
116679 +        dstCapacity -= ZSTD_blockHeaderSize;
116680 +    }
116682 +    if (cctx->appliedParams.fParams.checksumFlag) {
116683 +        U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
116684 +        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
116685 +        DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
116686 +        MEM_writeLE32(op, checksum);
116687 +        op += 4;
116688 +    }
116690 +    cctx->stage = ZSTDcs_created;  /* return to "created but no init" status */
116691 +    return op-ostart;
116694 +void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
116696 +    (void)cctx;
116697 +    (void)extraCSize;
116700 +size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
116701 +                         void* dst, size_t dstCapacity,
116702 +                   const void* src, size_t srcSize)
116704 +    size_t endResult;
116705 +    size_t const cSize = ZSTD_compressContinue_internal(cctx,
116706 +                                dst, dstCapacity, src, srcSize,
116707 +                                1 /* frame mode */, 1 /* last chunk */);
116708 +    FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed");
116709 +    endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
116710 +    FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed");
116711 +    assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
116712 +    if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
116713 +        ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
116714 +        DEBUGLOG(4, "end of frame : controlling src size");
116715 +        RETURN_ERROR_IF(
116716 +            cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
116717 +            srcSize_wrong,
116718 +             "error : pledgedSrcSize = %u, while realSrcSize = %u",
116719 +            (unsigned)cctx->pledgedSrcSizePlusOne-1,
116720 +            (unsigned)cctx->consumedSrcSize);
116721 +    }
116722 +    ZSTD_CCtx_trace(cctx, endResult);
116723 +    return cSize + endResult;
116726 +size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
116727 +                               void* dst, size_t dstCapacity,
116728 +                         const void* src, size_t srcSize,
116729 +                         const void* dict,size_t dictSize,
116730 +                               ZSTD_parameters params)
116732 +    ZSTD_CCtx_params cctxParams;
116733 +    DEBUGLOG(4, "ZSTD_compress_advanced");
116734 +    FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
116735 +    ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
116736 +    return ZSTD_compress_advanced_internal(cctx,
116737 +                                           dst, dstCapacity,
116738 +                                           src, srcSize,
116739 +                                           dict, dictSize,
116740 +                                           &cctxParams);
116743 +/* Internal */
116744 +size_t ZSTD_compress_advanced_internal(
116745 +        ZSTD_CCtx* cctx,
116746 +        void* dst, size_t dstCapacity,
116747 +        const void* src, size_t srcSize,
116748 +        const void* dict,size_t dictSize,
116749 +        const ZSTD_CCtx_params* params)
116751 +    DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
116752 +    FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
116753 +                         dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
116754 +                         params, srcSize, ZSTDb_not_buffered) , "");
116755 +    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
116758 +size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
116759 +                               void* dst, size_t dstCapacity,
116760 +                         const void* src, size_t srcSize,
116761 +                         const void* dict, size_t dictSize,
116762 +                               int compressionLevel)
116764 +    ZSTD_CCtx_params cctxParams;
116765 +    {
116766 +        ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);
116767 +        assert(params.fParams.contentSizeFlag == 1);
116768 +        ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel);
116769 +    }
116770 +    DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize);
116771 +    return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams);
116774 +size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
116775 +                         void* dst, size_t dstCapacity,
116776 +                   const void* src, size_t srcSize,
116777 +                         int compressionLevel)
116779 +    DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
116780 +    assert(cctx != NULL);
116781 +    return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
116784 +size_t ZSTD_compress(void* dst, size_t dstCapacity,
116785 +               const void* src, size_t srcSize,
116786 +                     int compressionLevel)
116788 +    size_t result;
116789 +    ZSTD_CCtx* cctx = ZSTD_createCCtx();
116790 +    RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed");
116791 +    result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel);
116792 +    ZSTD_freeCCtx(cctx);
116793 +    return result;
116797 +/* =====  Dictionary API  ===== */
116799 +/*! ZSTD_estimateCDictSize_advanced() :
116800 + *  Estimate amount of memory that will be needed to create a dictionary with following arguments */
116801 +size_t ZSTD_estimateCDictSize_advanced(
116802 +        size_t dictSize, ZSTD_compressionParameters cParams,
116803 +        ZSTD_dictLoadMethod_e dictLoadMethod)
116805 +    DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
116806 +    return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
116807 +         + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
116808 +         + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
116809 +         + (dictLoadMethod == ZSTD_dlm_byRef ? 0
116810 +            : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
116813 +size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
116815 +    ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
116816 +    return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
116819 +size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
116821 +    if (cdict==NULL) return 0;   /* support sizeof on NULL */
116822 +    DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
116823 +    /* cdict may be in the workspace */
116824 +    return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict))
116825 +        + ZSTD_cwksp_sizeof(&cdict->workspace);
116828 +static size_t ZSTD_initCDict_internal(
116829 +                    ZSTD_CDict* cdict,
116830 +              const void* dictBuffer, size_t dictSize,
116831 +                    ZSTD_dictLoadMethod_e dictLoadMethod,
116832 +                    ZSTD_dictContentType_e dictContentType,
116833 +                    ZSTD_CCtx_params params)
116835 +    DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
116836 +    assert(!ZSTD_checkCParams(params.cParams));
116837 +    cdict->matchState.cParams = params.cParams;
116838 +    cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch;
116839 +    if (cdict->matchState.dedicatedDictSearch && dictSize > ZSTD_CHUNKSIZE_MAX) {
116840 +        cdict->matchState.dedicatedDictSearch = 0;
116841 +    }
116842 +    if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
116843 +        cdict->dictContent = dictBuffer;
116844 +    } else {
116845 +         void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
116846 +        RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!");
116847 +        cdict->dictContent = internalBuffer;
116848 +        ZSTD_memcpy(internalBuffer, dictBuffer, dictSize);
116849 +    }
116850 +    cdict->dictContentSize = dictSize;
116851 +    cdict->dictContentType = dictContentType;
116853 +    cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
116856 +    /* Reset the state to no dictionary */
116857 +    ZSTD_reset_compressedBlockState(&cdict->cBlockState);
116858 +    FORWARD_IF_ERROR(ZSTD_reset_matchState(
116859 +        &cdict->matchState,
116860 +        &cdict->workspace,
116861 +        &params.cParams,
116862 +        ZSTDcrp_makeClean,
116863 +        ZSTDirp_reset,
116864 +        ZSTD_resetTarget_CDict), "");
116865 +    /* (Maybe) load the dictionary
116866 +     * Skips loading the dictionary if it is < 8 bytes.
116867 +     */
116868 +    {   params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
116869 +        params.fParams.contentSizeFlag = 1;
116870 +        {   size_t const dictID = ZSTD_compress_insertDictionary(
116871 +                    &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
116872 +                    &params, cdict->dictContent, cdict->dictContentSize,
116873 +                    dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
116874 +            FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
116875 +            assert(dictID <= (size_t)(U32)-1);
116876 +            cdict->dictID = (U32)dictID;
116877 +        }
116878 +    }
116880 +    return 0;
116883 +static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
116884 +                                      ZSTD_dictLoadMethod_e dictLoadMethod,
116885 +                                      ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
116887 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
116889 +    {   size_t const workspaceSize =
116890 +            ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
116891 +            ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
116892 +            ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) +
116893 +            (dictLoadMethod == ZSTD_dlm_byRef ? 0
116894 +             : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
116895 +        void* const workspace = ZSTD_customMalloc(workspaceSize, customMem);
116896 +        ZSTD_cwksp ws;
116897 +        ZSTD_CDict* cdict;
116899 +        if (!workspace) {
116900 +            ZSTD_customFree(workspace, customMem);
116901 +            return NULL;
116902 +        }
116904 +        ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc);
116906 +        cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
116907 +        assert(cdict != NULL);
116908 +        ZSTD_cwksp_move(&cdict->workspace, &ws);
116909 +        cdict->customMem = customMem;
116910 +        cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */
116912 +        return cdict;
116913 +    }
116916 +ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
116917 +                                      ZSTD_dictLoadMethod_e dictLoadMethod,
116918 +                                      ZSTD_dictContentType_e dictContentType,
116919 +                                      ZSTD_compressionParameters cParams,
116920 +                                      ZSTD_customMem customMem)
116922 +    ZSTD_CCtx_params cctxParams;
116923 +    ZSTD_memset(&cctxParams, 0, sizeof(cctxParams));
116924 +    ZSTD_CCtxParams_init(&cctxParams, 0);
116925 +    cctxParams.cParams = cParams;
116926 +    cctxParams.customMem = customMem;
116927 +    return ZSTD_createCDict_advanced2(
116928 +        dictBuffer, dictSize,
116929 +        dictLoadMethod, dictContentType,
116930 +        &cctxParams, customMem);
116933 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
116934 +        const void* dict, size_t dictSize,
116935 +        ZSTD_dictLoadMethod_e dictLoadMethod,
116936 +        ZSTD_dictContentType_e dictContentType,
116937 +        const ZSTD_CCtx_params* originalCctxParams,
116938 +        ZSTD_customMem customMem)
116940 +    ZSTD_CCtx_params cctxParams = *originalCctxParams;
116941 +    ZSTD_compressionParameters cParams;
116942 +    ZSTD_CDict* cdict;
116944 +    DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType);
116945 +    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
116947 +    if (cctxParams.enableDedicatedDictSearch) {
116948 +        cParams = ZSTD_dedicatedDictSearch_getCParams(
116949 +            cctxParams.compressionLevel, dictSize);
116950 +        ZSTD_overrideCParams(&cParams, &cctxParams.cParams);
116951 +    } else {
116952 +        cParams = ZSTD_getCParamsFromCCtxParams(
116953 +            &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
116954 +    }
116956 +    if (!ZSTD_dedicatedDictSearch_isSupported(&cParams)) {
116957 +        /* Fall back to non-DDSS params */
116958 +        cctxParams.enableDedicatedDictSearch = 0;
116959 +        cParams = ZSTD_getCParamsFromCCtxParams(
116960 +            &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
116961 +    }
116963 +    cctxParams.cParams = cParams;
116965 +    cdict = ZSTD_createCDict_advanced_internal(dictSize,
116966 +                        dictLoadMethod, cctxParams.cParams,
116967 +                        customMem);
116969 +    if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
116970 +                                    dict, dictSize,
116971 +                                    dictLoadMethod, dictContentType,
116972 +                                    cctxParams) )) {
116973 +        ZSTD_freeCDict(cdict);
116974 +        return NULL;
116975 +    }
116977 +    return cdict;
116980 +ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
116982 +    ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
116983 +    ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
116984 +                                                  ZSTD_dlm_byCopy, ZSTD_dct_auto,
116985 +                                                  cParams, ZSTD_defaultCMem);
116986 +    if (cdict)
116987 +        cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
116988 +    return cdict;
116991 +ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
116993 +    ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
116994 +    ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
116995 +                                     ZSTD_dlm_byRef, ZSTD_dct_auto,
116996 +                                     cParams, ZSTD_defaultCMem);
116997 +    if (cdict)
116998 +        cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
116999 +    return cdict;
117002 +size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
117004 +    if (cdict==NULL) return 0;   /* support free on NULL */
117005 +    {   ZSTD_customMem const cMem = cdict->customMem;
117006 +        int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
117007 +        ZSTD_cwksp_free(&cdict->workspace, cMem);
117008 +        if (!cdictInWorkspace) {
117009 +            ZSTD_customFree(cdict, cMem);
117010 +        }
117011 +        return 0;
117012 +    }
117015 +/*! ZSTD_initStaticCDict_advanced() :
117016 + *  Generate a digested dictionary in provided memory area.
117017 + *  workspace: The memory area to emplace the dictionary into.
117018 + *             Provided pointer must 8-bytes aligned.
117019 + *             It must outlive dictionary usage.
117020 + *  workspaceSize: Use ZSTD_estimateCDictSize()
117021 + *                 to determine how large workspace must be.
117022 + *  cParams : use ZSTD_getCParams() to transform a compression level
117023 + *            into its relevants cParams.
117024 + * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
117025 + *  Note : there is no corresponding "free" function.
117026 + *         Since workspace was allocated externally, it must be freed externally.
117027 + */
117028 +const ZSTD_CDict* ZSTD_initStaticCDict(
117029 +                                 void* workspace, size_t workspaceSize,
117030 +                           const void* dict, size_t dictSize,
117031 +                                 ZSTD_dictLoadMethod_e dictLoadMethod,
117032 +                                 ZSTD_dictContentType_e dictContentType,
117033 +                                 ZSTD_compressionParameters cParams)
117035 +    size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
117036 +    size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
117037 +                            + (dictLoadMethod == ZSTD_dlm_byRef ? 0
117038 +                               : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
117039 +                            + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
117040 +                            + matchStateSize;
117041 +    ZSTD_CDict* cdict;
117042 +    ZSTD_CCtx_params params;
117044 +    if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
117046 +    {
117047 +        ZSTD_cwksp ws;
117048 +        ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
117049 +        cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
117050 +        if (cdict == NULL) return NULL;
117051 +        ZSTD_cwksp_move(&cdict->workspace, &ws);
117052 +    }
117054 +    DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
117055 +        (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
117056 +    if (workspaceSize < neededSize) return NULL;
117058 +    ZSTD_CCtxParams_init(&params, 0);
117059 +    params.cParams = cParams;
117061 +    if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
117062 +                                              dict, dictSize,
117063 +                                              dictLoadMethod, dictContentType,
117064 +                                              params) ))
117065 +        return NULL;
117067 +    return cdict;
117070 +ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
117072 +    assert(cdict != NULL);
117073 +    return cdict->matchState.cParams;
117076 +/*! ZSTD_getDictID_fromCDict() :
117077 + *  Provides the dictID of the dictionary loaded into `cdict`.
117078 + *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
117079 + *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
117080 +unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict)
117082 +    if (cdict==NULL) return 0;
117083 +    return cdict->dictID;
117087 +/* ZSTD_compressBegin_usingCDict_advanced() :
117088 + * cdict must be != NULL */
117089 +size_t ZSTD_compressBegin_usingCDict_advanced(
117090 +    ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
117091 +    ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
117093 +    ZSTD_CCtx_params cctxParams;
117094 +    DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
117095 +    RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!");
117096 +    /* Initialize the cctxParams from the cdict */
117097 +    {
117098 +        ZSTD_parameters params;
117099 +        params.fParams = fParams;
117100 +        params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
117101 +                        || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
117102 +                        || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
117103 +                        || cdict->compressionLevel == 0 ) ?
117104 +                ZSTD_getCParamsFromCDict(cdict)
117105 +              : ZSTD_getCParams(cdict->compressionLevel,
117106 +                                pledgedSrcSize,
117107 +                                cdict->dictContentSize);
117108 +        ZSTD_CCtxParams_init_internal(&cctxParams, &params, cdict->compressionLevel);
117109 +    }
117110 +    /* Increase window log to fit the entire dictionary and source if the
117111 +     * source size is known. Limit the increase to 19, which is the
117112 +     * window log for compression level 1 with the largest source size.
117113 +     */
117114 +    if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
117115 +        U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
117116 +        U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
117117 +        cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog);
117118 +    }
117119 +    return ZSTD_compressBegin_internal(cctx,
117120 +                                        NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
117121 +                                        cdict,
117122 +                                        &cctxParams, pledgedSrcSize,
117123 +                                        ZSTDb_not_buffered);
117126 +/* ZSTD_compressBegin_usingCDict() :
117127 + * pledgedSrcSize=0 means "unknown"
117128 + * if pledgedSrcSize>0, it will enable contentSizeFlag */
117129 +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
117131 +    ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
117132 +    DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
117133 +    return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
117136 +size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
117137 +                                void* dst, size_t dstCapacity,
117138 +                                const void* src, size_t srcSize,
117139 +                                const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
117141 +    FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize), "");   /* will check if cdict != NULL */
117142 +    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
117145 +/*! ZSTD_compress_usingCDict() :
117146 + *  Compression using a digested Dictionary.
117147 + *  Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
117148 + *  Note that compression parameters are decided at CDict creation time
117149 + *  while frame parameters are hardcoded */
117150 +size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
117151 +                                void* dst, size_t dstCapacity,
117152 +                                const void* src, size_t srcSize,
117153 +                                const ZSTD_CDict* cdict)
117155 +    ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
117156 +    return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
117161 +/* ******************************************************************
117162 +*  Streaming
117163 +********************************************************************/
117165 +ZSTD_CStream* ZSTD_createCStream(void)
117167 +    DEBUGLOG(3, "ZSTD_createCStream");
117168 +    return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
117171 +ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
117173 +    return ZSTD_initStaticCCtx(workspace, workspaceSize);
117176 +ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
117177 +{   /* CStream and CCtx are now same object */
117178 +    return ZSTD_createCCtx_advanced(customMem);
117181 +size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
117183 +    return ZSTD_freeCCtx(zcs);   /* same object */
117188 +/*======   Initialization   ======*/
117190 +size_t ZSTD_CStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX; }
117192 +size_t ZSTD_CStreamOutSize(void)
117194 +    return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
117197 +static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
117199 +    if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize))
117200 +        return ZSTD_cpm_attachDict;
117201 +    else
117202 +        return ZSTD_cpm_noAttachDict;
117205 +/* ZSTD_resetCStream():
117206 + * pledgedSrcSize == 0 means "unknown" */
117207 +size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
117209 +    /* temporary : 0 interpreted as "unknown" during transition period.
117210 +     * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
117211 +     * 0 will be interpreted as "empty" in the future.
117212 +     */
117213 +    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
117214 +    DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
117215 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
117216 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
117217 +    return 0;
117220 +/*! ZSTD_initCStream_internal() :
117221 + *  Note : for lib/compress only. Used by zstdmt_compress.c.
117222 + *  Assumption 1 : params are valid
117223 + *  Assumption 2 : either dict, or cdict, is defined, not both */
117224 +size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
117225 +                    const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
117226 +                    const ZSTD_CCtx_params* params,
117227 +                    unsigned long long pledgedSrcSize)
117229 +    DEBUGLOG(4, "ZSTD_initCStream_internal");
117230 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
117231 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
117232 +    assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
117233 +    zcs->requestedParams = *params;
117234 +    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
117235 +    if (dict) {
117236 +        FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
117237 +    } else {
117238 +        /* Dictionary is cleared if !cdict */
117239 +        FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
117240 +    }
117241 +    return 0;
117244 +/* ZSTD_initCStream_usingCDict_advanced() :
117245 + * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
117246 +size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
117247 +                                            const ZSTD_CDict* cdict,
117248 +                                            ZSTD_frameParameters fParams,
117249 +                                            unsigned long long pledgedSrcSize)
117251 +    DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
117252 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
117253 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
117254 +    zcs->requestedParams.fParams = fParams;
117255 +    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
117256 +    return 0;
117259 +/* note : cdict must outlive compression session */
117260 +size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
117262 +    DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
117263 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
117264 +    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
117265 +    return 0;
117269 +/* ZSTD_initCStream_advanced() :
117270 + * pledgedSrcSize must be exact.
117271 + * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
117272 + * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */
117273 +size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
117274 +                                 const void* dict, size_t dictSize,
117275 +                                 ZSTD_parameters params, unsigned long long pss)
117277 +    /* for compatibility with older programs relying on this behavior.
117278 +     * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
117279 +     * This line will be removed in the future.
117280 +     */
117281 +    U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
117282 +    DEBUGLOG(4, "ZSTD_initCStream_advanced");
117283 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
117284 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
117285 +    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
117286 +    ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, &params);
117287 +    FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
117288 +    return 0;
117291 +size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
117293 +    DEBUGLOG(4, "ZSTD_initCStream_usingDict");
117294 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
117295 +    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
117296 +    FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
117297 +    return 0;
117300 +size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
117302 +    /* temporary : 0 interpreted as "unknown" during transition period.
117303 +     * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
117304 +     * 0 will be interpreted as "empty" in the future.
117305 +     */
117306 +    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
117307 +    DEBUGLOG(4, "ZSTD_initCStream_srcSize");
117308 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
117309 +    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
117310 +    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
117311 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
117312 +    return 0;
117315 +size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
117317 +    DEBUGLOG(4, "ZSTD_initCStream");
117318 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
117319 +    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
117320 +    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
117321 +    return 0;
117324 +/*======   Compression   ======*/
117326 +static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
117328 +    size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
117329 +    if (hintInSize==0) hintInSize = cctx->blockSize;
117330 +    return hintInSize;
117333 +/** ZSTD_compressStream_generic():
117334 + *  internal function for all *compressStream*() variants
117335 + *  non-static, because can be called from zstdmt_compress.c
117336 + * @return : hint size for next input */
117337 +static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
117338 +                                          ZSTD_outBuffer* output,
117339 +                                          ZSTD_inBuffer* input,
117340 +                                          ZSTD_EndDirective const flushMode)
117342 +    const char* const istart = (const char*)input->src;
117343 +    const char* const iend = input->size != 0 ? istart + input->size : istart;
117344 +    const char* ip = input->pos != 0 ? istart + input->pos : istart;
117345 +    char* const ostart = (char*)output->dst;
117346 +    char* const oend = output->size != 0 ? ostart + output->size : ostart;
117347 +    char* op = output->pos != 0 ? ostart + output->pos : ostart;
117348 +    U32 someMoreWork = 1;
117350 +    /* check expectations */
117351 +    DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
117352 +    if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
117353 +        assert(zcs->inBuff != NULL);
117354 +        assert(zcs->inBuffSize > 0);
117355 +    }
117356 +    if (zcs->appliedParams.outBufferMode == ZSTD_bm_buffered) {
117357 +        assert(zcs->outBuff !=  NULL);
117358 +        assert(zcs->outBuffSize > 0);
117359 +    }
117360 +    assert(output->pos <= output->size);
117361 +    assert(input->pos <= input->size);
117362 +    assert((U32)flushMode <= (U32)ZSTD_e_end);
117364 +    while (someMoreWork) {
117365 +        switch(zcs->streamStage)
117366 +        {
117367 +        case zcss_init:
117368 +            RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
117370 +        case zcss_load:
117371 +            if ( (flushMode == ZSTD_e_end)
117372 +              && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip)     /* Enough output space */
117373 +                || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)  /* OR we are allowed to return dstSizeTooSmall */
117374 +              && (zcs->inBuffPos == 0) ) {
117375 +                /* shortcut to compression pass directly into output buffer */
117376 +                size_t const cSize = ZSTD_compressEnd(zcs,
117377 +                                                op, oend-op, ip, iend-ip);
117378 +                DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
117379 +                FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");
117380 +                ip = iend;
117381 +                op += cSize;
117382 +                zcs->frameEnded = 1;
117383 +                ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
117384 +                someMoreWork = 0; break;
117385 +            }
117386 +            /* complete loading into inBuffer in buffered mode */
117387 +            if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
117388 +                size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
117389 +                size_t const loaded = ZSTD_limitCopy(
117390 +                                        zcs->inBuff + zcs->inBuffPos, toLoad,
117391 +                                        ip, iend-ip);
117392 +                zcs->inBuffPos += loaded;
117393 +                if (loaded != 0)
117394 +                    ip += loaded;
117395 +                if ( (flushMode == ZSTD_e_continue)
117396 +                  && (zcs->inBuffPos < zcs->inBuffTarget) ) {
117397 +                    /* not enough input to fill full block : stop here */
117398 +                    someMoreWork = 0; break;
117399 +                }
117400 +                if ( (flushMode == ZSTD_e_flush)
117401 +                  && (zcs->inBuffPos == zcs->inToCompress) ) {
117402 +                    /* empty */
117403 +                    someMoreWork = 0; break;
117404 +                }
117405 +            }
117406 +            /* compress current block (note : this stage cannot be stopped in the middle) */
117407 +            DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
117408 +            {   int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered);
117409 +                void* cDst;
117410 +                size_t cSize;
117411 +                size_t oSize = oend-op;
117412 +                size_t const iSize = inputBuffered
117413 +                    ? zcs->inBuffPos - zcs->inToCompress
117414 +                    : MIN((size_t)(iend - ip), zcs->blockSize);
117415 +                if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)
117416 +                    cDst = op;   /* compress into output buffer, to skip flush stage */
117417 +                else
117418 +                    cDst = zcs->outBuff, oSize = zcs->outBuffSize;
117419 +                if (inputBuffered) {
117420 +                    unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
117421 +                    cSize = lastBlock ?
117422 +                            ZSTD_compressEnd(zcs, cDst, oSize,
117423 +                                        zcs->inBuff + zcs->inToCompress, iSize) :
117424 +                            ZSTD_compressContinue(zcs, cDst, oSize,
117425 +                                        zcs->inBuff + zcs->inToCompress, iSize);
117426 +                    FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
117427 +                    zcs->frameEnded = lastBlock;
117428 +                    /* prepare next block */
117429 +                    zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
117430 +                    if (zcs->inBuffTarget > zcs->inBuffSize)
117431 +                        zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
117432 +                    DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
117433 +                            (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
117434 +                    if (!lastBlock)
117435 +                        assert(zcs->inBuffTarget <= zcs->inBuffSize);
117436 +                    zcs->inToCompress = zcs->inBuffPos;
117437 +                } else {
117438 +                    unsigned const lastBlock = (ip + iSize == iend);
117439 +                    assert(flushMode == ZSTD_e_end /* Already validated */);
117440 +                    cSize = lastBlock ?
117441 +                            ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) :
117442 +                            ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize);
117443 +                    /* Consume the input prior to error checking to mirror buffered mode. */
117444 +                    if (iSize > 0)
117445 +                        ip += iSize;
117446 +                    FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
117447 +                    zcs->frameEnded = lastBlock;
117448 +                    if (lastBlock)
117449 +                        assert(ip == iend);
117450 +                }
117451 +                if (cDst == op) {  /* no need to flush */
117452 +                    op += cSize;
117453 +                    if (zcs->frameEnded) {
117454 +                        DEBUGLOG(5, "Frame completed directly in outBuffer");
117455 +                        someMoreWork = 0;
117456 +                        ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
117457 +                    }
117458 +                    break;
117459 +                }
117460 +                zcs->outBuffContentSize = cSize;
117461 +                zcs->outBuffFlushedSize = 0;
117462 +                zcs->streamStage = zcss_flush; /* pass-through to flush stage */
117463 +            }
117464 +           /* fall-through */
117465 +        case zcss_flush:
117466 +            DEBUGLOG(5, "flush stage");
117467 +            assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered);
117468 +            {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
117469 +                size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
117470 +                            zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
117471 +                DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
117472 +                            (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
117473 +                if (flushed)
117474 +                    op += flushed;
117475 +                zcs->outBuffFlushedSize += flushed;
117476 +                if (toFlush!=flushed) {
117477 +                    /* flush not fully completed, presumably because dst is too small */
117478 +                    assert(op==oend);
117479 +                    someMoreWork = 0;
117480 +                    break;
117481 +                }
117482 +                zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
117483 +                if (zcs->frameEnded) {
117484 +                    DEBUGLOG(5, "Frame completed on flush");
117485 +                    someMoreWork = 0;
117486 +                    ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
117487 +                    break;
117488 +                }
117489 +                zcs->streamStage = zcss_load;
117490 +                break;
117491 +            }
117493 +        default: /* impossible */
117494 +            assert(0);
117495 +        }
117496 +    }
117498 +    input->pos = ip - istart;
117499 +    output->pos = op - ostart;
117500 +    if (zcs->frameEnded) return 0;
117501 +    return ZSTD_nextInputSizeHint(zcs);
117504 +static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
117506 +    return ZSTD_nextInputSizeHint(cctx);
117510 +size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
117512 +    FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , "");
117513 +    return ZSTD_nextInputSizeHint_MTorST(zcs);
117516 +/* After a compression call set the expected input/output buffer.
117517 + * This is validated at the start of the next compression call.
117518 + */
117519 +static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input)
117521 +    if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
117522 +        cctx->expectedInBuffer = *input;
117523 +    }
117524 +    if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
117525 +        cctx->expectedOutBufferSize = output->size - output->pos;
117526 +    }
117529 +/* Validate that the input/output buffers match the expectations set by
117530 + * ZSTD_setBufferExpectations.
117531 + */
117532 +static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx,
117533 +                                        ZSTD_outBuffer const* output,
117534 +                                        ZSTD_inBuffer const* input,
117535 +                                        ZSTD_EndDirective endOp)
117537 +    if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
117538 +        ZSTD_inBuffer const expect = cctx->expectedInBuffer;
117539 +        if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size)
117540 +            RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!");
117541 +        if (endOp != ZSTD_e_end)
117542 +            RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!");
117543 +    }
117544 +    if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
117545 +        size_t const outBufferSize = output->size - output->pos;
117546 +        if (cctx->expectedOutBufferSize != outBufferSize)
117547 +            RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!");
117548 +    }
117549 +    return 0;
117552 +static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
117553 +                                             ZSTD_EndDirective endOp,
117554 +                                             size_t inSize) {
117555 +    ZSTD_CCtx_params params = cctx->requestedParams;
117556 +    ZSTD_prefixDict const prefixDict = cctx->prefixDict;
117557 +    FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */
117558 +    ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));   /* single usage */
117559 +    assert(prefixDict.dict==NULL || cctx->cdict==NULL);    /* only one can be set */
117560 +    if (cctx->cdict)
117561 +        params.compressionLevel = cctx->cdict->compressionLevel; /* let cdict take priority in terms of compression level */
117562 +    DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
117563 +    if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1;  /* auto-fix pledgedSrcSize */
117564 +    {
117565 +        size_t const dictSize = prefixDict.dict
117566 +                ? prefixDict.dictSize
117567 +                : (cctx->cdict ? cctx->cdict->dictContentSize : 0);
117568 +        ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, &params, cctx->pledgedSrcSizePlusOne - 1);
117569 +        params.cParams = ZSTD_getCParamsFromCCtxParams(
117570 +                &params, cctx->pledgedSrcSizePlusOne-1,
117571 +                dictSize, mode);
117572 +    }
117574 +    if (ZSTD_CParams_shouldEnableLdm(&params.cParams)) {
117575 +        /* Enable LDM by default for optimal parser and window size >= 128MB */
117576 +        DEBUGLOG(4, "LDM enabled by default (window size >= 128MB, strategy >= btopt)");
117577 +        params.ldmParams.enableLdm = 1;
117578 +    }
117580 +    {   U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1;
117581 +        assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
117582 +        FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
117583 +                prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dtlm_fast,
117584 +                cctx->cdict,
117585 +                &params, pledgedSrcSize,
117586 +                ZSTDb_buffered) , "");
117587 +        assert(cctx->appliedParams.nbWorkers == 0);
117588 +        cctx->inToCompress = 0;
117589 +        cctx->inBuffPos = 0;
117590 +        if (cctx->appliedParams.inBufferMode == ZSTD_bm_buffered) {
117591 +            /* for small input: avoid automatic flush on reaching end of block, since
117592 +            * it would require to add a 3-bytes null block to end frame
117593 +            */
117594 +            cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize);
117595 +        } else {
117596 +            cctx->inBuffTarget = 0;
117597 +        }
117598 +        cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
117599 +        cctx->streamStage = zcss_load;
117600 +        cctx->frameEnded = 0;
117601 +    }
117602 +    return 0;
117605 +size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
117606 +                             ZSTD_outBuffer* output,
117607 +                             ZSTD_inBuffer* input,
117608 +                             ZSTD_EndDirective endOp)
117610 +    DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
117611 +    /* check conditions */
117612 +    RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer");
117613 +    RETURN_ERROR_IF(input->pos  > input->size, srcSize_wrong, "invalid input buffer");
117614 +    RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective");
117615 +    assert(cctx != NULL);
117617 +    /* transparent initialization stage */
117618 +    if (cctx->streamStage == zcss_init) {
117619 +        FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed");
117620 +        ZSTD_setBufferExpectations(cctx, output, input);    /* Set initial buffer expectations now that we've initialized */
117621 +    }
117622 +    /* end of transparent initialization stage */
117624 +    FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers");
117625 +    /* compression stage */
117626 +    FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , "");
117627 +    DEBUGLOG(5, "completed ZSTD_compressStream2");
117628 +    ZSTD_setBufferExpectations(cctx, output, input);
117629 +    return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
117632 +size_t ZSTD_compressStream2_simpleArgs (
117633 +                            ZSTD_CCtx* cctx,
117634 +                            void* dst, size_t dstCapacity, size_t* dstPos,
117635 +                      const void* src, size_t srcSize, size_t* srcPos,
117636 +                            ZSTD_EndDirective endOp)
117638 +    ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
117639 +    ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
117640 +    /* ZSTD_compressStream2() will check validity of dstPos and srcPos */
117641 +    size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
117642 +    *dstPos = output.pos;
117643 +    *srcPos = input.pos;
117644 +    return cErr;
117647 +size_t ZSTD_compress2(ZSTD_CCtx* cctx,
117648 +                      void* dst, size_t dstCapacity,
117649 +                      const void* src, size_t srcSize)
117651 +    ZSTD_bufferMode_e const originalInBufferMode = cctx->requestedParams.inBufferMode;
117652 +    ZSTD_bufferMode_e const originalOutBufferMode = cctx->requestedParams.outBufferMode;
117653 +    DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize);
117654 +    ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
117655 +    /* Enable stable input/output buffers. */
117656 +    cctx->requestedParams.inBufferMode = ZSTD_bm_stable;
117657 +    cctx->requestedParams.outBufferMode = ZSTD_bm_stable;
117658 +    {   size_t oPos = 0;
117659 +        size_t iPos = 0;
117660 +        size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
117661 +                                        dst, dstCapacity, &oPos,
117662 +                                        src, srcSize, &iPos,
117663 +                                        ZSTD_e_end);
117664 +        /* Reset to the original values. */
117665 +        cctx->requestedParams.inBufferMode = originalInBufferMode;
117666 +        cctx->requestedParams.outBufferMode = originalOutBufferMode;
117667 +        FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed");
117668 +        if (result != 0) {  /* compression not completed, due to lack of output space */
117669 +            assert(oPos == dstCapacity);
117670 +            RETURN_ERROR(dstSize_tooSmall, "");
117671 +        }
117672 +        assert(iPos == srcSize);   /* all input is expected consumed */
117673 +        return oPos;
117674 +    }
117677 +typedef struct {
117678 +    U32 idx;             /* Index in array of ZSTD_Sequence */
117679 +    U32 posInSequence;   /* Position within sequence at idx */
117680 +    size_t posInSrc;        /* Number of bytes given by sequences provided so far */
117681 +} ZSTD_sequencePosition;
117683 +/* Returns a ZSTD error code if sequence is not valid */
117684 +static size_t ZSTD_validateSequence(U32 offCode, U32 matchLength,
117685 +                                    size_t posInSrc, U32 windowLog, size_t dictSize, U32 minMatch) {
117686 +    size_t offsetBound;
117687 +    U32 windowSize = 1 << windowLog;
117688 +    /* posInSrc represents the amount of data the the decoder would decode up to this point.
117689 +     * As long as the amount of data decoded is less than or equal to window size, offsets may be
117690 +     * larger than the total length of output decoded in order to reference the dict, even larger than
117691 +     * window size. After output surpasses windowSize, we're limited to windowSize offsets again.
117692 +     */
117693 +    offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
117694 +    RETURN_ERROR_IF(offCode > offsetBound + ZSTD_REP_MOVE, corruption_detected, "Offset too large!");
117695 +    RETURN_ERROR_IF(matchLength < minMatch, corruption_detected, "Matchlength too small");
117696 +    return 0;
117699 +/* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */
117700 +static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) {
117701 +    U32 offCode = rawOffset + ZSTD_REP_MOVE;
117702 +    U32 repCode = 0;
117704 +    if (!ll0 && rawOffset == rep[0]) {
117705 +        repCode = 1;
117706 +    } else if (rawOffset == rep[1]) {
117707 +        repCode = 2 - ll0;
117708 +    } else if (rawOffset == rep[2]) {
117709 +        repCode = 3 - ll0;
117710 +    } else if (ll0 && rawOffset == rep[0] - 1) {
117711 +        repCode = 3;
117712 +    }
117713 +    if (repCode) {
117714 +        /* ZSTD_storeSeq expects a number in the range [0, 2] to represent a repcode */
117715 +        offCode = repCode - 1;
117716 +    }
117717 +    return offCode;
117720 +/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
117721 + * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
117722 + */
117723 +static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
117724 +                                                             const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
117725 +                                                             const void* src, size_t blockSize) {
117726 +    U32 idx = seqPos->idx;
117727 +    BYTE const* ip = (BYTE const*)(src);
117728 +    const BYTE* const iend = ip + blockSize;
117729 +    repcodes_t updatedRepcodes;
117730 +    U32 dictSize;
117731 +    U32 litLength;
117732 +    U32 matchLength;
117733 +    U32 ll0;
117734 +    U32 offCode;
117736 +    if (cctx->cdict) {
117737 +        dictSize = (U32)cctx->cdict->dictContentSize;
117738 +    } else if (cctx->prefixDict.dict) {
117739 +        dictSize = (U32)cctx->prefixDict.dictSize;
117740 +    } else {
117741 +        dictSize = 0;
117742 +    }
117743 +    ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
117744 +    for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) {
117745 +        litLength = inSeqs[idx].litLength;
117746 +        matchLength = inSeqs[idx].matchLength;
117747 +        ll0 = litLength == 0;
117748 +        offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
117749 +        updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
117751 +        DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
117752 +        if (cctx->appliedParams.validateSequences) {
117753 +            seqPos->posInSrc += litLength + matchLength;
117754 +            FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
117755 +                                                cctx->appliedParams.cParams.windowLog, dictSize,
117756 +                                                cctx->appliedParams.cParams.minMatch),
117757 +                                                "Sequence validation failed");
117758 +        }
117759 +        RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
117760 +                        "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
117761 +        ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
117762 +        ip += matchLength + litLength;
117763 +    }
117764 +    ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
117766 +    if (inSeqs[idx].litLength) {
117767 +        DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength);
117768 +        ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength);
117769 +        ip += inSeqs[idx].litLength;
117770 +        seqPos->posInSrc += inSeqs[idx].litLength;
117771 +    }
117772 +    RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!");
117773 +    seqPos->idx = idx+1;
117774 +    return 0;
117777 +/* Returns the number of bytes to move the current read position back by. Only non-zero
117778 + * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something
117779 + * went wrong.
117781 + * This function will attempt to scan through blockSize bytes represented by the sequences
117782 + * in inSeqs, storing any (partial) sequences.
117784 + * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
117785 + * avoid splitting a match, or to avoid splitting a match such that it would produce a match
117786 + * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
117787 + */
117788 +static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
117789 +                                                       const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
117790 +                                                       const void* src, size_t blockSize) {
117791 +    U32 idx = seqPos->idx;
117792 +    U32 startPosInSequence = seqPos->posInSequence;
117793 +    U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;
117794 +    size_t dictSize;
117795 +    BYTE const* ip = (BYTE const*)(src);
117796 +    BYTE const* iend = ip + blockSize;  /* May be adjusted if we decide to process fewer than blockSize bytes */
117797 +    repcodes_t updatedRepcodes;
117798 +    U32 bytesAdjustment = 0;
117799 +    U32 finalMatchSplit = 0;
117800 +    U32 litLength;
117801 +    U32 matchLength;
117802 +    U32 rawOffset;
117803 +    U32 offCode;
117805 +    if (cctx->cdict) {
117806 +        dictSize = cctx->cdict->dictContentSize;
117807 +    } else if (cctx->prefixDict.dict) {
117808 +        dictSize = cctx->prefixDict.dictSize;
117809 +    } else {
117810 +        dictSize = 0;
117811 +    }
117812 +    DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
117813 +    DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
117814 +    ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
117815 +    while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
117816 +        const ZSTD_Sequence currSeq = inSeqs[idx];
117817 +        litLength = currSeq.litLength;
117818 +        matchLength = currSeq.matchLength;
117819 +        rawOffset = currSeq.offset;
117821 +        /* Modify the sequence depending on where endPosInSequence lies */
117822 +        if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) {
117823 +            if (startPosInSequence >= litLength) {
117824 +                startPosInSequence -= litLength;
117825 +                litLength = 0;
117826 +                matchLength -= startPosInSequence;
117827 +            } else {
117828 +                litLength -= startPosInSequence;
117829 +            }
117830 +            /* Move to the next sequence */
117831 +            endPosInSequence -= currSeq.litLength + currSeq.matchLength;
117832 +            startPosInSequence = 0;
117833 +            idx++;
117834 +        } else {
117835 +            /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence
117836 +               does not reach the end of the match. So, we have to split the sequence */
117837 +            DEBUGLOG(6, "Require a split: diff: %u, idx: %u PIS: %u",
117838 +                     currSeq.litLength + currSeq.matchLength - endPosInSequence, idx, endPosInSequence);
117839 +            if (endPosInSequence > litLength) {
117840 +                U32 firstHalfMatchLength;
117841 +                litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence;
117842 +                firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength;
117843 +                if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) {
117844 +                    /* Only ever split the match if it is larger than the block size */
117845 +                    U32 secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence;
117846 +                    if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) {
117847 +                        /* Move the endPosInSequence backward so that it creates match of minMatch length */
117848 +                        endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
117849 +                        bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
117850 +                        firstHalfMatchLength -= bytesAdjustment;
117851 +                    }
117852 +                    matchLength = firstHalfMatchLength;
117853 +                    /* Flag that we split the last match - after storing the sequence, exit the loop,
117854 +                       but keep the value of endPosInSequence */
117855 +                    finalMatchSplit = 1;
117856 +                } else {
117857 +                    /* Move the position in sequence backwards so that we don't split match, and break to store
117858 +                     * the last literals. We use the original currSeq.litLength as a marker for where endPosInSequence
117859 +                     * should go. We prefer to do this whenever it is not necessary to split the match, or if doing so
117860 +                     * would cause the first half of the match to be too small
117861 +                     */
117862 +                    bytesAdjustment = endPosInSequence - currSeq.litLength;
117863 +                    endPosInSequence = currSeq.litLength;
117864 +                    break;
117865 +                }
117866 +            } else {
117867 +                /* This sequence ends inside the literals, break to store the last literals */
117868 +                break;
117869 +            }
117870 +        }
117871 +        /* Check if this offset can be represented with a repcode */
117872 +        {   U32 ll0 = (litLength == 0);
117873 +            offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0);
117874 +            updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
117875 +        }
117877 +        if (cctx->appliedParams.validateSequences) {
117878 +            seqPos->posInSrc += litLength + matchLength;
117879 +            FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
117880 +                                                   cctx->appliedParams.cParams.windowLog, dictSize,
117881 +                                                   cctx->appliedParams.cParams.minMatch),
117882 +                                                   "Sequence validation failed");
117883 +        }
117884 +        DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
117885 +        RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
117886 +                        "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
117887 +        ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
117888 +        ip += matchLength + litLength;
117889 +    }
117890 +    DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
117891 +    assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength);
117892 +    seqPos->idx = idx;
117893 +    seqPos->posInSequence = endPosInSequence;
117894 +    ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
117896 +    iend -= bytesAdjustment;
117897 +    if (ip != iend) {
117898 +        /* Store any last literals */
117899 +        U32 lastLLSize = (U32)(iend - ip);
117900 +        assert(ip <= iend);
117901 +        DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize);
117902 +        ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize);
117903 +        seqPos->posInSrc += lastLLSize;
117904 +    }
117906 +    return bytesAdjustment;
117909 +typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
117910 +                                       const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
117911 +                                       const void* src, size_t blockSize);
117912 +static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) {
117913 +    ZSTD_sequenceCopier sequenceCopier = NULL;
117914 +    assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode));
117915 +    if (mode == ZSTD_sf_explicitBlockDelimiters) {
117916 +        return ZSTD_copySequencesToSeqStoreExplicitBlockDelim;
117917 +    } else if (mode == ZSTD_sf_noBlockDelimiters) {
117918 +        return ZSTD_copySequencesToSeqStoreNoBlockDelim;
117919 +    }
117920 +    assert(sequenceCopier != NULL);
117921 +    return sequenceCopier;
117924 +/* Compress, block-by-block, all of the sequences given.
117926 + * Returns the cumulative size of all compressed blocks (including their headers), otherwise a ZSTD error.
117927 + */
117928 +static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
117929 +                                              void* dst, size_t dstCapacity,
117930 +                                              const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
117931 +                                              const void* src, size_t srcSize) {
117932 +    size_t cSize = 0;
117933 +    U32 lastBlock;
117934 +    size_t blockSize;
117935 +    size_t compressedSeqsSize;
117936 +    size_t remaining = srcSize;
117937 +    ZSTD_sequencePosition seqPos = {0, 0, 0};
117939 +    BYTE const* ip = (BYTE const*)src;
117940 +    BYTE* op = (BYTE*)dst;
117941 +    ZSTD_sequenceCopier sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
117943 +    DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);
117944 +    /* Special case: empty frame */
117945 +    if (remaining == 0) {
117946 +        U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1);
117947 +        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "No room for empty frame block header");
117948 +        MEM_writeLE32(op, cBlockHeader24);
117949 +        op += ZSTD_blockHeaderSize;
117950 +        dstCapacity -= ZSTD_blockHeaderSize;
117951 +        cSize += ZSTD_blockHeaderSize;
117952 +    }
117954 +    while (remaining) {
117955 +        size_t cBlockSize;
117956 +        size_t additionalByteAdjustment;
117957 +        lastBlock = remaining <= cctx->blockSize;
117958 +        blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize;
117959 +        ZSTD_resetSeqStore(&cctx->seqStore);
117960 +        DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize);
117962 +        additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize);
117963 +        FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy");
117964 +        blockSize -= additionalByteAdjustment;
117966 +        /* If blocks are too small, emit as a nocompress block */
117967 +        if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
117968 +            cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
117969 +            FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
117970 +            DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize);
117971 +            cSize += cBlockSize;
117972 +            ip += blockSize;
117973 +            op += cBlockSize;
117974 +            remaining -= blockSize;
117975 +            dstCapacity -= cBlockSize;
117976 +            continue;
117977 +        }
117979 +        compressedSeqsSize = ZSTD_entropyCompressSequences(&cctx->seqStore,
117980 +                                &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
117981 +                                &cctx->appliedParams,
117982 +                                op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
117983 +                                blockSize,
117984 +                                cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
117985 +                                cctx->bmi2);
117986 +        FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
117987 +        DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize);
117989 +        if (!cctx->isFirstBlock &&
117990 +            ZSTD_maybeRLE(&cctx->seqStore) &&
117991 +            ZSTD_isRLE((BYTE const*)src, srcSize)) {
117992 +            /* We don't want to emit our first block as a RLE even if it qualifies because
117993 +            * doing so will cause the decoder (cli only) to throw a "should consume all input error."
117994 +            * This is only an issue for zstd <= v1.4.3
117995 +            */
117996 +            compressedSeqsSize = 1;
117997 +        }
117999 +        if (compressedSeqsSize == 0) {
118000 +            /* ZSTD_noCompressBlock writes the block header as well */
118001 +            cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
118002 +            FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
118003 +            DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize);
118004 +        } else if (compressedSeqsSize == 1) {
118005 +            cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock);
118006 +            FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed");
118007 +            DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize);
118008 +        } else {
118009 +            U32 cBlockHeader;
118010 +            /* Error checking and repcodes update */
118011 +            ZSTD_confirmRepcodesAndEntropyTables(cctx);
118012 +            if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
118013 +                cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
118015 +            /* Write block header into beginning of block*/
118016 +            cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
118017 +            MEM_writeLE24(op, cBlockHeader);
118018 +            cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
118019 +            DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize);
118020 +        }
118022 +        cSize += cBlockSize;
118023 +        DEBUGLOG(4, "cSize running total: %zu", cSize);
118025 +        if (lastBlock) {
118026 +            break;
118027 +        } else {
118028 +            ip += blockSize;
118029 +            op += cBlockSize;
118030 +            remaining -= blockSize;
118031 +            dstCapacity -= cBlockSize;
118032 +            cctx->isFirstBlock = 0;
118033 +        }
118034 +    }
118036 +    return cSize;
118039 +size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity,
118040 +                              const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
118041 +                              const void* src, size_t srcSize) {
118042 +    BYTE* op = (BYTE*)dst;
118043 +    size_t cSize = 0;
118044 +    size_t compressedBlocksSize = 0;
118045 +    size_t frameHeaderSize = 0;
118047 +    /* Transparent initialization stage, same as compressStream2() */
118048 +    DEBUGLOG(3, "ZSTD_compressSequences()");
118049 +    assert(cctx != NULL);
118050 +    FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed");
118051 +    /* Begin writing output, starting with frame header */
118052 +    frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID);
118053 +    op += frameHeaderSize;
118054 +    dstCapacity -= frameHeaderSize;
118055 +    cSize += frameHeaderSize;
118056 +    if (cctx->appliedParams.fParams.checksumFlag && srcSize) {
118057 +        xxh64_update(&cctx->xxhState, src, srcSize);
118058 +    }
118059 +    /* cSize includes block header size and compressed sequences size */
118060 +    compressedBlocksSize = ZSTD_compressSequences_internal(cctx,
118061 +                                                           op, dstCapacity,
118062 +                                                           inSeqs, inSeqsSize,
118063 +                                                           src, srcSize);
118064 +    FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!");
118065 +    cSize += compressedBlocksSize;
118066 +    dstCapacity -= compressedBlocksSize;
118068 +    if (cctx->appliedParams.fParams.checksumFlag) {
118069 +        U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
118070 +        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
118071 +        DEBUGLOG(4, "Write checksum : %08X", (unsigned)checksum);
118072 +        MEM_writeLE32((char*)dst + cSize, checksum);
118073 +        cSize += 4;
118074 +    }
118076 +    DEBUGLOG(3, "Final compressed size: %zu", cSize);
118077 +    return cSize;
118080 +/*======   Finalize   ======*/
118082 +/*! ZSTD_flushStream() :
118083 + * @return : amount of data remaining to flush */
118084 +size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
118086 +    ZSTD_inBuffer input = { NULL, 0, 0 };
118087 +    return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
118091 +size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
118093 +    ZSTD_inBuffer input = { NULL, 0, 0 };
118094 +    size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
118095 +    FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed");
118096 +    if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush;   /* minimal estimation */
118097 +    /* single thread mode : attempt to calculate remaining to flush more precisely */
118098 +    {   size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
118099 +        size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);
118100 +        size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
118101 +        DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
118102 +        return toFlush;
118103 +    }
118107 +/*-=====  Pre-defined compression levels  =====-*/
118109 +#define ZSTD_MAX_CLEVEL     22
118110 +int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
118111 +int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
118113 +static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
118114 +{   /* "default" - for any srcSize > 256 KB */
118115 +    /* W,  C,  H,  S,  L, TL, strat */
118116 +    { 19, 12, 13,  1,  6,  1, ZSTD_fast    },  /* base for negative levels */
118117 +    { 19, 13, 14,  1,  7,  0, ZSTD_fast    },  /* level  1 */
118118 +    { 20, 15, 16,  1,  6,  0, ZSTD_fast    },  /* level  2 */
118119 +    { 21, 16, 17,  1,  5,  0, ZSTD_dfast   },  /* level  3 */
118120 +    { 21, 18, 18,  1,  5,  0, ZSTD_dfast   },  /* level  4 */
118121 +    { 21, 18, 19,  2,  5,  2, ZSTD_greedy  },  /* level  5 */
118122 +    { 21, 19, 19,  3,  5,  4, ZSTD_greedy  },  /* level  6 */
118123 +    { 21, 19, 19,  3,  5,  8, ZSTD_lazy    },  /* level  7 */
118124 +    { 21, 19, 19,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */
118125 +    { 21, 19, 20,  4,  5, 16, ZSTD_lazy2   },  /* level  9 */
118126 +    { 22, 20, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */
118127 +    { 22, 21, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */
118128 +    { 22, 21, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 12 */
118129 +    { 22, 21, 22,  5,  5, 32, ZSTD_btlazy2 },  /* level 13 */
118130 +    { 22, 22, 23,  5,  5, 32, ZSTD_btlazy2 },  /* level 14 */
118131 +    { 22, 23, 23,  6,  5, 32, ZSTD_btlazy2 },  /* level 15 */
118132 +    { 22, 22, 22,  5,  5, 48, ZSTD_btopt   },  /* level 16 */
118133 +    { 23, 23, 22,  5,  4, 64, ZSTD_btopt   },  /* level 17 */
118134 +    { 23, 23, 22,  6,  3, 64, ZSTD_btultra },  /* level 18 */
118135 +    { 23, 24, 22,  7,  3,256, ZSTD_btultra2},  /* level 19 */
118136 +    { 25, 25, 23,  7,  3,256, ZSTD_btultra2},  /* level 20 */
118137 +    { 26, 26, 24,  7,  3,512, ZSTD_btultra2},  /* level 21 */
118138 +    { 27, 27, 25,  9,  3,999, ZSTD_btultra2},  /* level 22 */
118140 +{   /* for srcSize <= 256 KB */
118141 +    /* W,  C,  H,  S,  L,  T, strat */
118142 +    { 18, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
118143 +    { 18, 13, 14,  1,  6,  0, ZSTD_fast    },  /* level  1 */
118144 +    { 18, 14, 14,  1,  5,  0, ZSTD_dfast   },  /* level  2 */
118145 +    { 18, 16, 16,  1,  4,  0, ZSTD_dfast   },  /* level  3 */
118146 +    { 18, 16, 17,  2,  5,  2, ZSTD_greedy  },  /* level  4.*/
118147 +    { 18, 18, 18,  3,  5,  2, ZSTD_greedy  },  /* level  5.*/
118148 +    { 18, 18, 19,  3,  5,  4, ZSTD_lazy    },  /* level  6.*/
118149 +    { 18, 18, 19,  4,  4,  4, ZSTD_lazy    },  /* level  7 */
118150 +    { 18, 18, 19,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
118151 +    { 18, 18, 19,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
118152 +    { 18, 18, 19,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
118153 +    { 18, 18, 19,  5,  4, 12, ZSTD_btlazy2 },  /* level 11.*/
118154 +    { 18, 19, 19,  7,  4, 12, ZSTD_btlazy2 },  /* level 12.*/
118155 +    { 18, 18, 19,  4,  4, 16, ZSTD_btopt   },  /* level 13 */
118156 +    { 18, 18, 19,  4,  3, 32, ZSTD_btopt   },  /* level 14.*/
118157 +    { 18, 18, 19,  6,  3,128, ZSTD_btopt   },  /* level 15.*/
118158 +    { 18, 19, 19,  6,  3,128, ZSTD_btultra },  /* level 16.*/
118159 +    { 18, 19, 19,  8,  3,256, ZSTD_btultra },  /* level 17.*/
118160 +    { 18, 19, 19,  6,  3,128, ZSTD_btultra2},  /* level 18.*/
118161 +    { 18, 19, 19,  8,  3,256, ZSTD_btultra2},  /* level 19.*/
118162 +    { 18, 19, 19, 10,  3,512, ZSTD_btultra2},  /* level 20.*/
118163 +    { 18, 19, 19, 12,  3,512, ZSTD_btultra2},  /* level 21.*/
118164 +    { 18, 19, 19, 13,  3,999, ZSTD_btultra2},  /* level 22.*/
118166 +{   /* for srcSize <= 128 KB */
118167 +    /* W,  C,  H,  S,  L,  T, strat */
118168 +    { 17, 12, 12,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
118169 +    { 17, 12, 13,  1,  6,  0, ZSTD_fast    },  /* level  1 */
118170 +    { 17, 13, 15,  1,  5,  0, ZSTD_fast    },  /* level  2 */
118171 +    { 17, 15, 16,  2,  5,  0, ZSTD_dfast   },  /* level  3 */
118172 +    { 17, 17, 17,  2,  4,  0, ZSTD_dfast   },  /* level  4 */
118173 +    { 17, 16, 17,  3,  4,  2, ZSTD_greedy  },  /* level  5 */
118174 +    { 17, 17, 17,  3,  4,  4, ZSTD_lazy    },  /* level  6 */
118175 +    { 17, 17, 17,  3,  4,  8, ZSTD_lazy2   },  /* level  7 */
118176 +    { 17, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
118177 +    { 17, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
118178 +    { 17, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
118179 +    { 17, 17, 17,  5,  4,  8, ZSTD_btlazy2 },  /* level 11 */
118180 +    { 17, 18, 17,  7,  4, 12, ZSTD_btlazy2 },  /* level 12 */
118181 +    { 17, 18, 17,  3,  4, 12, ZSTD_btopt   },  /* level 13.*/
118182 +    { 17, 18, 17,  4,  3, 32, ZSTD_btopt   },  /* level 14.*/
118183 +    { 17, 18, 17,  6,  3,256, ZSTD_btopt   },  /* level 15.*/
118184 +    { 17, 18, 17,  6,  3,128, ZSTD_btultra },  /* level 16.*/
118185 +    { 17, 18, 17,  8,  3,256, ZSTD_btultra },  /* level 17.*/
118186 +    { 17, 18, 17, 10,  3,512, ZSTD_btultra },  /* level 18.*/
118187 +    { 17, 18, 17,  5,  3,256, ZSTD_btultra2},  /* level 19.*/
118188 +    { 17, 18, 17,  7,  3,512, ZSTD_btultra2},  /* level 20.*/
118189 +    { 17, 18, 17,  9,  3,512, ZSTD_btultra2},  /* level 21.*/
118190 +    { 17, 18, 17, 11,  3,999, ZSTD_btultra2},  /* level 22.*/
118192 +{   /* for srcSize <= 16 KB */
118193 +    /* W,  C,  H,  S,  L,  T, strat */
118194 +    { 14, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
118195 +    { 14, 14, 15,  1,  5,  0, ZSTD_fast    },  /* level  1 */
118196 +    { 14, 14, 15,  1,  4,  0, ZSTD_fast    },  /* level  2 */
118197 +    { 14, 14, 15,  2,  4,  0, ZSTD_dfast   },  /* level  3 */
118198 +    { 14, 14, 14,  4,  4,  2, ZSTD_greedy  },  /* level  4 */
118199 +    { 14, 14, 14,  3,  4,  4, ZSTD_lazy    },  /* level  5.*/
118200 +    { 14, 14, 14,  4,  4,  8, ZSTD_lazy2   },  /* level  6 */
118201 +    { 14, 14, 14,  6,  4,  8, ZSTD_lazy2   },  /* level  7 */
118202 +    { 14, 14, 14,  8,  4,  8, ZSTD_lazy2   },  /* level  8.*/
118203 +    { 14, 15, 14,  5,  4,  8, ZSTD_btlazy2 },  /* level  9.*/
118204 +    { 14, 15, 14,  9,  4,  8, ZSTD_btlazy2 },  /* level 10.*/
118205 +    { 14, 15, 14,  3,  4, 12, ZSTD_btopt   },  /* level 11.*/
118206 +    { 14, 15, 14,  4,  3, 24, ZSTD_btopt   },  /* level 12.*/
118207 +    { 14, 15, 14,  5,  3, 32, ZSTD_btultra },  /* level 13.*/
118208 +    { 14, 15, 15,  6,  3, 64, ZSTD_btultra },  /* level 14.*/
118209 +    { 14, 15, 15,  7,  3,256, ZSTD_btultra },  /* level 15.*/
118210 +    { 14, 15, 15,  5,  3, 48, ZSTD_btultra2},  /* level 16.*/
118211 +    { 14, 15, 15,  6,  3,128, ZSTD_btultra2},  /* level 17.*/
118212 +    { 14, 15, 15,  7,  3,256, ZSTD_btultra2},  /* level 18.*/
118213 +    { 14, 15, 15,  8,  3,256, ZSTD_btultra2},  /* level 19.*/
118214 +    { 14, 15, 15,  8,  3,512, ZSTD_btultra2},  /* level 20.*/
118215 +    { 14, 15, 15,  9,  3,512, ZSTD_btultra2},  /* level 21.*/
118216 +    { 14, 15, 15, 10,  3,999, ZSTD_btultra2},  /* level 22.*/
118220 +static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize)
118222 +    ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict);
118223 +    switch (cParams.strategy) {
118224 +        case ZSTD_fast:
118225 +        case ZSTD_dfast:
118226 +            break;
118227 +        case ZSTD_greedy:
118228 +        case ZSTD_lazy:
118229 +        case ZSTD_lazy2:
118230 +            cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG;
118231 +            break;
118232 +        case ZSTD_btlazy2:
118233 +        case ZSTD_btopt:
118234 +        case ZSTD_btultra:
118235 +        case ZSTD_btultra2:
118236 +            break;
118237 +    }
118238 +    return cParams;
118241 +static int ZSTD_dedicatedDictSearch_isSupported(
118242 +        ZSTD_compressionParameters const* cParams)
118244 +    return (cParams->strategy >= ZSTD_greedy)
118245 +        && (cParams->strategy <= ZSTD_lazy2)
118246 +        && (cParams->hashLog >= cParams->chainLog)
118247 +        && (cParams->chainLog <= 24);
118251 + * Reverses the adjustment applied to cparams when enabling dedicated dict
118252 + * search. This is used to recover the params set to be used in the working
118253 + * context. (Otherwise, those tables would also grow.)
118254 + */
118255 +static void ZSTD_dedicatedDictSearch_revertCParams(
118256 +        ZSTD_compressionParameters* cParams) {
118257 +    switch (cParams->strategy) {
118258 +        case ZSTD_fast:
118259 +        case ZSTD_dfast:
118260 +            break;
118261 +        case ZSTD_greedy:
118262 +        case ZSTD_lazy:
118263 +        case ZSTD_lazy2:
118264 +            cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG;
118265 +            break;
118266 +        case ZSTD_btlazy2:
118267 +        case ZSTD_btopt:
118268 +        case ZSTD_btultra:
118269 +        case ZSTD_btultra2:
118270 +            break;
118271 +    }
118274 +static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
118276 +    switch (mode) {
118277 +    case ZSTD_cpm_unknown:
118278 +    case ZSTD_cpm_noAttachDict:
118279 +    case ZSTD_cpm_createCDict:
118280 +        break;
118281 +    case ZSTD_cpm_attachDict:
118282 +        dictSize = 0;
118283 +        break;
118284 +    default:
118285 +        assert(0);
118286 +        break;
118287 +    }
118288 +    {   int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN;
118289 +        size_t const addedSize = unknown && dictSize > 0 ? 500 : 0;
118290 +        return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize;
118291 +    }
118294 +/*! ZSTD_getCParams_internal() :
118295 + * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
118296 + *  Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.
118297 + *        Use dictSize == 0 for unknown or unused.
118298 + *  Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */
118299 +static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
118301 +    U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);
118302 +    U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
118303 +    int row;
118304 +    DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel);
118306 +    /* row */
118307 +    if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT;   /* 0 == default */
118308 +    else if (compressionLevel < 0) row = 0;   /* entry 0 is baseline for fast mode */
118309 +    else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
118310 +    else row = compressionLevel;
118312 +    {   ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
118313 +        /* acceleration factor */
118314 +        if (compressionLevel < 0) {
118315 +            int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel);
118316 +            cp.targetLength = (unsigned)(-clampedCompressionLevel);
118317 +        }
118318 +        /* refine parameters based on srcSize & dictSize */
118319 +        return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode);
118320 +    }
118323 +/*! ZSTD_getCParams() :
118324 + * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
118325 + *  Size values are optional, provide 0 if not known or unused */
118326 +ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
118328 +    if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
118329 +    return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
118332 +/*! ZSTD_getParams() :
118333 + *  same idea as ZSTD_getCParams()
118334 + * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
118335 + *  Fields of `ZSTD_frameParameters` are set to default values */
118336 +static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) {
118337 +    ZSTD_parameters params;
118338 +    ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);
118339 +    DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
118340 +    ZSTD_memset(&params, 0, sizeof(params));
118341 +    params.cParams = cParams;
118342 +    params.fParams.contentSizeFlag = 1;
118343 +    return params;
118346 +/*! ZSTD_getParams() :
118347 + *  same idea as ZSTD_getCParams()
118348 + * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
118349 + *  Fields of `ZSTD_frameParameters` are set to default values */
118350 +ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
118351 +    if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
118352 +    return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
118354 diff --git a/lib/zstd/compress/zstd_compress_internal.h b/lib/zstd/compress/zstd_compress_internal.h
118355 new file mode 100644
118356 index 000000000000..b56c482322ba
118357 --- /dev/null
118358 +++ b/lib/zstd/compress/zstd_compress_internal.h
118359 @@ -0,0 +1,1188 @@
118361 + * Copyright (c) Yann Collet, Facebook, Inc.
118362 + * All rights reserved.
118364 + * This source code is licensed under both the BSD-style license (found in the
118365 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
118366 + * in the COPYING file in the root directory of this source tree).
118367 + * You may select, at your option, one of the above-listed licenses.
118368 + */
118370 +/* This header contains definitions
118371 + * that shall **only** be used by modules within lib/compress.
118372 + */
118374 +#ifndef ZSTD_COMPRESS_H
118375 +#define ZSTD_COMPRESS_H
118377 +/*-*************************************
118378 +*  Dependencies
118379 +***************************************/
118380 +#include "../common/zstd_internal.h"
118381 +#include "zstd_cwksp.h"
118384 +/*-*************************************
118385 +*  Constants
118386 +***************************************/
118387 +#define kSearchStrength      8
118388 +#define HASH_READ_SIZE       8
118389 +#define ZSTD_DUBT_UNSORTED_MARK 1   /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted".
118390 +                                       It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
118391 +                                       It's not a big deal though : candidate will just be sorted again.
118392 +                                       Additionally, candidate position 1 will be lost.
118393 +                                       But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
118394 +                                       The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy.
118395 +                                       This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
118398 +/*-*************************************
118399 +*  Context memory management
118400 +***************************************/
118401 +typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
118402 +typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
118404 +typedef struct ZSTD_prefixDict_s {
118405 +    const void* dict;
118406 +    size_t dictSize;
118407 +    ZSTD_dictContentType_e dictContentType;
118408 +} ZSTD_prefixDict;
118410 +typedef struct {
118411 +    void* dictBuffer;
118412 +    void const* dict;
118413 +    size_t dictSize;
118414 +    ZSTD_dictContentType_e dictContentType;
118415 +    ZSTD_CDict* cdict;
118416 +} ZSTD_localDict;
118418 +typedef struct {
118419 +    HUF_CElt CTable[HUF_CTABLE_SIZE_U32(255)];
118420 +    HUF_repeat repeatMode;
118421 +} ZSTD_hufCTables_t;
118423 +typedef struct {
118424 +    FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
118425 +    FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
118426 +    FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
118427 +    FSE_repeat offcode_repeatMode;
118428 +    FSE_repeat matchlength_repeatMode;
118429 +    FSE_repeat litlength_repeatMode;
118430 +} ZSTD_fseCTables_t;
118432 +typedef struct {
118433 +    ZSTD_hufCTables_t huf;
118434 +    ZSTD_fseCTables_t fse;
118435 +} ZSTD_entropyCTables_t;
118437 +typedef struct {
118438 +    U32 off;            /* Offset code (offset + ZSTD_REP_MOVE) for the match */
118439 +    U32 len;            /* Raw length of match */
118440 +} ZSTD_match_t;
118442 +typedef struct {
118443 +    U32 offset;         /* Offset of sequence */
118444 +    U32 litLength;      /* Length of literals prior to match */
118445 +    U32 matchLength;    /* Raw length of match */
118446 +} rawSeq;
118448 +typedef struct {
118449 +  rawSeq* seq;          /* The start of the sequences */
118450 +  size_t pos;           /* The index in seq where reading stopped. pos <= size. */
118451 +  size_t posInSequence; /* The position within the sequence at seq[pos] where reading
118452 +                           stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */
118453 +  size_t size;          /* The number of sequences. <= capacity. */
118454 +  size_t capacity;      /* The capacity starting from `seq` pointer */
118455 +} rawSeqStore_t;
118457 +UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
118459 +typedef struct {
118460 +    int price;
118461 +    U32 off;
118462 +    U32 mlen;
118463 +    U32 litlen;
118464 +    U32 rep[ZSTD_REP_NUM];
118465 +} ZSTD_optimal_t;
118467 +typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
118469 +typedef struct {
118470 +    /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
118471 +    unsigned* litFreq;           /* table of literals statistics, of size 256 */
118472 +    unsigned* litLengthFreq;     /* table of litLength statistics, of size (MaxLL+1) */
118473 +    unsigned* matchLengthFreq;   /* table of matchLength statistics, of size (MaxML+1) */
118474 +    unsigned* offCodeFreq;       /* table of offCode statistics, of size (MaxOff+1) */
118475 +    ZSTD_match_t* matchTable;    /* list of found matches, of size ZSTD_OPT_NUM+1 */
118476 +    ZSTD_optimal_t* priceTable;  /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
118478 +    U32  litSum;                 /* nb of literals */
118479 +    U32  litLengthSum;           /* nb of litLength codes */
118480 +    U32  matchLengthSum;         /* nb of matchLength codes */
118481 +    U32  offCodeSum;             /* nb of offset codes */
118482 +    U32  litSumBasePrice;        /* to compare to log2(litfreq) */
118483 +    U32  litLengthSumBasePrice;  /* to compare to log2(llfreq)  */
118484 +    U32  matchLengthSumBasePrice;/* to compare to log2(mlfreq)  */
118485 +    U32  offCodeSumBasePrice;    /* to compare to log2(offreq)  */
118486 +    ZSTD_OptPrice_e priceType;   /* prices can be determined dynamically, or follow a pre-defined cost structure */
118487 +    const ZSTD_entropyCTables_t* symbolCosts;  /* pre-calculated dictionary statistics */
118488 +    ZSTD_literalCompressionMode_e literalCompressionMode;
118489 +} optState_t;
118491 +typedef struct {
118492 +  ZSTD_entropyCTables_t entropy;
118493 +  U32 rep[ZSTD_REP_NUM];
118494 +} ZSTD_compressedBlockState_t;
118496 +typedef struct {
118497 +    BYTE const* nextSrc;    /* next block here to continue on current prefix */
118498 +    BYTE const* base;       /* All regular indexes relative to this position */
118499 +    BYTE const* dictBase;   /* extDict indexes relative to this position */
118500 +    U32 dictLimit;          /* below that point, need extDict */
118501 +    U32 lowLimit;           /* below that point, no more valid data */
118502 +} ZSTD_window_t;
118504 +typedef struct ZSTD_matchState_t ZSTD_matchState_t;
118505 +struct ZSTD_matchState_t {
118506 +    ZSTD_window_t window;   /* State for window round buffer management */
118507 +    U32 loadedDictEnd;      /* index of end of dictionary, within context's referential.
118508 +                             * When loadedDictEnd != 0, a dictionary is in use, and still valid.
118509 +                             * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance.
118510 +                             * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity().
118511 +                             * When dict referential is copied into active context (i.e. not attached),
118512 +                             * loadedDictEnd == dictSize, since referential starts from zero.
118513 +                             */
118514 +    U32 nextToUpdate;       /* index from which to continue table update */
118515 +    U32 hashLog3;           /* dispatch table for matches of len==3 : larger == faster, more memory */
118516 +    U32* hashTable;
118517 +    U32* hashTable3;
118518 +    U32* chainTable;
118519 +    int dedicatedDictSearch;  /* Indicates whether this matchState is using the
118520 +                               * dedicated dictionary search structure.
118521 +                               */
118522 +    optState_t opt;         /* optimal parser state */
118523 +    const ZSTD_matchState_t* dictMatchState;
118524 +    ZSTD_compressionParameters cParams;
118525 +    const rawSeqStore_t* ldmSeqStore;
118528 +typedef struct {
118529 +    ZSTD_compressedBlockState_t* prevCBlock;
118530 +    ZSTD_compressedBlockState_t* nextCBlock;
118531 +    ZSTD_matchState_t matchState;
118532 +} ZSTD_blockState_t;
118534 +typedef struct {
118535 +    U32 offset;
118536 +    U32 checksum;
118537 +} ldmEntry_t;
118539 +typedef struct {
118540 +    BYTE const* split;
118541 +    U32 hash;
118542 +    U32 checksum;
118543 +    ldmEntry_t* bucket;
118544 +} ldmMatchCandidate_t;
118546 +#define LDM_BATCH_SIZE 64
118548 +typedef struct {
118549 +    ZSTD_window_t window;   /* State for the window round buffer management */
118550 +    ldmEntry_t* hashTable;
118551 +    U32 loadedDictEnd;
118552 +    BYTE* bucketOffsets;    /* Next position in bucket to insert entry */
118553 +    size_t splitIndices[LDM_BATCH_SIZE];
118554 +    ldmMatchCandidate_t matchCandidates[LDM_BATCH_SIZE];
118555 +} ldmState_t;
118557 +typedef struct {
118558 +    U32 enableLdm;          /* 1 if enable long distance matching */
118559 +    U32 hashLog;            /* Log size of hashTable */
118560 +    U32 bucketSizeLog;      /* Log bucket size for collision resolution, at most 8 */
118561 +    U32 minMatchLength;     /* Minimum match length */
118562 +    U32 hashRateLog;       /* Log number of entries to skip */
118563 +    U32 windowLog;          /* Window log for the LDM */
118564 +} ldmParams_t;
118566 +typedef struct {
118567 +    int collectSequences;
118568 +    ZSTD_Sequence* seqStart;
118569 +    size_t seqIndex;
118570 +    size_t maxSequences;
118571 +} SeqCollector;
118573 +struct ZSTD_CCtx_params_s {
118574 +    ZSTD_format_e format;
118575 +    ZSTD_compressionParameters cParams;
118576 +    ZSTD_frameParameters fParams;
118578 +    int compressionLevel;
118579 +    int forceWindow;           /* force back-references to respect limit of
118580 +                                * 1<<wLog, even for dictionary */
118581 +    size_t targetCBlockSize;   /* Tries to fit compressed block size to be around targetCBlockSize.
118582 +                                * No target when targetCBlockSize == 0.
118583 +                                * There is no guarantee on compressed block size */
118584 +    int srcSizeHint;           /* User's best guess of source size.
118585 +                                * Hint is not valid when srcSizeHint == 0.
118586 +                                * There is no guarantee that hint is close to actual source size */
118588 +    ZSTD_dictAttachPref_e attachDictPref;
118589 +    ZSTD_literalCompressionMode_e literalCompressionMode;
118591 +    /* Multithreading: used to pass parameters to mtctx */
118592 +    int nbWorkers;
118593 +    size_t jobSize;
118594 +    int overlapLog;
118595 +    int rsyncable;
118597 +    /* Long distance matching parameters */
118598 +    ldmParams_t ldmParams;
118600 +    /* Dedicated dict search algorithm trigger */
118601 +    int enableDedicatedDictSearch;
118603 +    /* Input/output buffer modes */
118604 +    ZSTD_bufferMode_e inBufferMode;
118605 +    ZSTD_bufferMode_e outBufferMode;
118607 +    /* Sequence compression API */
118608 +    ZSTD_sequenceFormat_e blockDelimiters;
118609 +    int validateSequences;
118611 +    /* Internal use, for createCCtxParams() and freeCCtxParams() only */
118612 +    ZSTD_customMem customMem;
118613 +};  /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
118615 +#define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2))
118616 +#define ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE)
118619 + * Indicates whether this compression proceeds directly from user-provided
118620 + * source buffer to user-provided destination buffer (ZSTDb_not_buffered), or
118621 + * whether the context needs to buffer the input/output (ZSTDb_buffered).
118622 + */
118623 +typedef enum {
118624 +    ZSTDb_not_buffered,
118625 +    ZSTDb_buffered
118626 +} ZSTD_buffered_policy_e;
118628 +struct ZSTD_CCtx_s {
118629 +    ZSTD_compressionStage_e stage;
118630 +    int cParamsChanged;                  /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
118631 +    int bmi2;                            /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
118632 +    ZSTD_CCtx_params requestedParams;
118633 +    ZSTD_CCtx_params appliedParams;
118634 +    U32   dictID;
118635 +    size_t dictContentSize;
118637 +    ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
118638 +    size_t blockSize;
118639 +    unsigned long long pledgedSrcSizePlusOne;  /* this way, 0 (default) == unknown */
118640 +    unsigned long long consumedSrcSize;
118641 +    unsigned long long producedCSize;
118642 +    struct xxh64_state xxhState;
118643 +    ZSTD_customMem customMem;
118644 +    ZSTD_threadPool* pool;
118645 +    size_t staticSize;
118646 +    SeqCollector seqCollector;
118647 +    int isFirstBlock;
118648 +    int initialized;
118650 +    seqStore_t seqStore;      /* sequences storage ptrs */
118651 +    ldmState_t ldmState;      /* long distance matching state */
118652 +    rawSeq* ldmSequences;     /* Storage for the ldm output sequences */
118653 +    size_t maxNbLdmSequences;
118654 +    rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
118655 +    ZSTD_blockState_t blockState;
118656 +    U32* entropyWorkspace;  /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */
118658 +    /* Wether we are streaming or not */
118659 +    ZSTD_buffered_policy_e bufferedPolicy;
118661 +    /* streaming */
118662 +    char*  inBuff;
118663 +    size_t inBuffSize;
118664 +    size_t inToCompress;
118665 +    size_t inBuffPos;
118666 +    size_t inBuffTarget;
118667 +    char*  outBuff;
118668 +    size_t outBuffSize;
118669 +    size_t outBuffContentSize;
118670 +    size_t outBuffFlushedSize;
118671 +    ZSTD_cStreamStage streamStage;
118672 +    U32    frameEnded;
118674 +    /* Stable in/out buffer verification */
118675 +    ZSTD_inBuffer expectedInBuffer;
118676 +    size_t expectedOutBufferSize;
118678 +    /* Dictionary */
118679 +    ZSTD_localDict localDict;
118680 +    const ZSTD_CDict* cdict;
118681 +    ZSTD_prefixDict prefixDict;   /* single-usage dictionary */
118683 +    /* Multi-threading */
118685 +    /* Tracing */
118688 +typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
118690 +typedef enum {
118691 +    ZSTD_noDict = 0,
118692 +    ZSTD_extDict = 1,
118693 +    ZSTD_dictMatchState = 2,
118694 +    ZSTD_dedicatedDictSearch = 3
118695 +} ZSTD_dictMode_e;
118697 +typedef enum {
118698 +    ZSTD_cpm_noAttachDict = 0,  /* Compression with ZSTD_noDict or ZSTD_extDict.
118699 +                                 * In this mode we use both the srcSize and the dictSize
118700 +                                 * when selecting and adjusting parameters.
118701 +                                 */
118702 +    ZSTD_cpm_attachDict = 1,    /* Compression with ZSTD_dictMatchState or ZSTD_dedicatedDictSearch.
118703 +                                 * In this mode we only take the srcSize into account when selecting
118704 +                                 * and adjusting parameters.
118705 +                                 */
118706 +    ZSTD_cpm_createCDict = 2,   /* Creating a CDict.
118707 +                                 * In this mode we take both the source size and the dictionary size
118708 +                                 * into account when selecting and adjusting the parameters.
118709 +                                 */
118710 +    ZSTD_cpm_unknown = 3,       /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams.
118711 +                                 * We don't know what these parameters are for. We default to the legacy
118712 +                                 * behavior of taking both the source size and the dict size into account
118713 +                                 * when selecting and adjusting parameters.
118714 +                                 */
118715 +} ZSTD_cParamMode_e;
118717 +typedef size_t (*ZSTD_blockCompressor) (
118718 +        ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
118719 +        void const* src, size_t srcSize);
118720 +ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode);
118723 +MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
118725 +    static const BYTE LL_Code[64] = {  0,  1,  2,  3,  4,  5,  6,  7,
118726 +                                       8,  9, 10, 11, 12, 13, 14, 15,
118727 +                                      16, 16, 17, 17, 18, 18, 19, 19,
118728 +                                      20, 20, 20, 20, 21, 21, 21, 21,
118729 +                                      22, 22, 22, 22, 22, 22, 22, 22,
118730 +                                      23, 23, 23, 23, 23, 23, 23, 23,
118731 +                                      24, 24, 24, 24, 24, 24, 24, 24,
118732 +                                      24, 24, 24, 24, 24, 24, 24, 24 };
118733 +    static const U32 LL_deltaCode = 19;
118734 +    return (litLength > 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
118737 +/* ZSTD_MLcode() :
118738 + * note : mlBase = matchLength - MINMATCH;
118739 + *        because it's the format it's stored in seqStore->sequences */
118740 +MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
118742 +    static const BYTE ML_Code[128] = { 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
118743 +                                      16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
118744 +                                      32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
118745 +                                      38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
118746 +                                      40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
118747 +                                      41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
118748 +                                      42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
118749 +                                      42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
118750 +    static const U32 ML_deltaCode = 36;
118751 +    return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
118754 +typedef struct repcodes_s {
118755 +    U32 rep[3];
118756 +} repcodes_t;
118758 +MEM_STATIC repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
118760 +    repcodes_t newReps;
118761 +    if (offset >= ZSTD_REP_NUM) {  /* full offset */
118762 +        newReps.rep[2] = rep[1];
118763 +        newReps.rep[1] = rep[0];
118764 +        newReps.rep[0] = offset - ZSTD_REP_MOVE;
118765 +    } else {   /* repcode */
118766 +        U32 const repCode = offset + ll0;
118767 +        if (repCode > 0) {  /* note : if repCode==0, no change */
118768 +            U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
118769 +            newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
118770 +            newReps.rep[1] = rep[0];
118771 +            newReps.rep[0] = currentOffset;
118772 +        } else {   /* repCode == 0 */
118773 +            ZSTD_memcpy(&newReps, rep, sizeof(newReps));
118774 +        }
118775 +    }
118776 +    return newReps;
118779 +/* ZSTD_cParam_withinBounds:
118780 + * @return 1 if value is within cParam bounds,
118781 + * 0 otherwise */
118782 +MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
118784 +    ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
118785 +    if (ZSTD_isError(bounds.error)) return 0;
118786 +    if (value < bounds.lowerBound) return 0;
118787 +    if (value > bounds.upperBound) return 0;
118788 +    return 1;
118791 +/* ZSTD_noCompressBlock() :
118792 + * Writes uncompressed block to dst buffer from given src.
118793 + * Returns the size of the block */
118794 +MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
118796 +    U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
118797 +    RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
118798 +                    dstSize_tooSmall, "dst buf too small for uncompressed block");
118799 +    MEM_writeLE24(dst, cBlockHeader24);
118800 +    ZSTD_memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
118801 +    return ZSTD_blockHeaderSize + srcSize;
118804 +MEM_STATIC size_t ZSTD_rleCompressBlock (void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
118806 +    BYTE* const op = (BYTE*)dst;
118807 +    U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3);
118808 +    RETURN_ERROR_IF(dstCapacity < 4, dstSize_tooSmall, "");
118809 +    MEM_writeLE24(op, cBlockHeader);
118810 +    op[3] = src;
118811 +    return 4;
118815 +/* ZSTD_minGain() :
118816 + * minimum compression required
118817 + * to generate a compress block or a compressed literals section.
118818 + * note : use same formula for both situations */
118819 +MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
118821 +    U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
118822 +    ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
118823 +    assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
118824 +    return (srcSize >> minlog) + 2;
118827 +MEM_STATIC int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)
118829 +    switch (cctxParams->literalCompressionMode) {
118830 +    case ZSTD_lcm_huffman:
118831 +        return 0;
118832 +    case ZSTD_lcm_uncompressed:
118833 +        return 1;
118834 +    default:
118835 +        assert(0 /* impossible: pre-validated */);
118836 +        /* fall-through */
118837 +    case ZSTD_lcm_auto:
118838 +        return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
118839 +    }
118842 +/*! ZSTD_safecopyLiterals() :
118843 + *  memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w.
118844 + *  Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
118845 + *  large copies.
118846 + */
118847 +static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) {
118848 +    assert(iend > ilimit_w);
118849 +    if (ip <= ilimit_w) {
118850 +        ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
118851 +        op += ilimit_w - ip;
118852 +        ip = ilimit_w;
118853 +    }
118854 +    while (ip < iend) *op++ = *ip++;
118857 +/*! ZSTD_storeSeq() :
118858 + *  Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t.
118859 + *  `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes).
118860 + *  `mlBase` : matchLength - MINMATCH
118861 + *  Allowed to overread literals up to litLimit.
118863 +HINT_INLINE UNUSED_ATTR
118864 +void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase)
118866 +    BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
118867 +    BYTE const* const litEnd = literals + litLength;
118868 +#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
118869 +    static const BYTE* g_start = NULL;
118870 +    if (g_start==NULL) g_start = (const BYTE*)literals;  /* note : index only works for compression within a single segment */
118871 +    {   U32 const pos = (U32)((const BYTE*)literals - g_start);
118872 +        DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
118873 +               pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode);
118874 +    }
118875 +#endif
118876 +    assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
118877 +    /* copy Literals */
118878 +    assert(seqStorePtr->maxNbLit <= 128 KB);
118879 +    assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
118880 +    assert(literals + litLength <= litLimit);
118881 +    if (litEnd <= litLimit_w) {
118882 +        /* Common case we can use wildcopy.
118883 +        * First copy 16 bytes, because literals are likely short.
118884 +        */
118885 +        assert(WILDCOPY_OVERLENGTH >= 16);
118886 +        ZSTD_copy16(seqStorePtr->lit, literals);
118887 +        if (litLength > 16) {
118888 +            ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
118889 +        }
118890 +    } else {
118891 +        ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);
118892 +    }
118893 +    seqStorePtr->lit += litLength;
118895 +    /* literal Length */
118896 +    if (litLength>0xFFFF) {
118897 +        assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
118898 +        seqStorePtr->longLengthID = 1;
118899 +        seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
118900 +    }
118901 +    seqStorePtr->sequences[0].litLength = (U16)litLength;
118903 +    /* match offset */
118904 +    seqStorePtr->sequences[0].offset = offCode + 1;
118906 +    /* match Length */
118907 +    if (mlBase>0xFFFF) {
118908 +        assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
118909 +        seqStorePtr->longLengthID = 2;
118910 +        seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
118911 +    }
118912 +    seqStorePtr->sequences[0].matchLength = (U16)mlBase;
118914 +    seqStorePtr->sequences++;
118918 +/*-*************************************
118919 +*  Match length counter
118920 +***************************************/
118921 +static unsigned ZSTD_NbCommonBytes (size_t val)
118923 +    if (MEM_isLittleEndian()) {
118924 +        if (MEM_64bits()) {
118925 +#       if (__GNUC__ >= 4)
118926 +            return (__builtin_ctzll((U64)val) >> 3);
118927 +#       else
118928 +            static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
118929 +                                                     0, 3, 1, 3, 1, 4, 2, 7,
118930 +                                                     0, 2, 3, 6, 1, 5, 3, 5,
118931 +                                                     1, 3, 4, 4, 2, 5, 6, 7,
118932 +                                                     7, 0, 1, 2, 3, 3, 4, 6,
118933 +                                                     2, 6, 5, 5, 3, 4, 5, 6,
118934 +                                                     7, 1, 2, 4, 6, 4, 4, 5,
118935 +                                                     7, 2, 6, 5, 7, 6, 7, 7 };
118936 +            return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
118937 +#       endif
118938 +        } else { /* 32 bits */
118939 +#       if (__GNUC__ >= 3)
118940 +            return (__builtin_ctz((U32)val) >> 3);
118941 +#       else
118942 +            static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
118943 +                                                     3, 2, 2, 1, 3, 2, 0, 1,
118944 +                                                     3, 3, 1, 2, 2, 2, 2, 0,
118945 +                                                     3, 1, 2, 0, 1, 0, 1, 1 };
118946 +            return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
118947 +#       endif
118948 +        }
118949 +    } else {  /* Big Endian CPU */
118950 +        if (MEM_64bits()) {
118951 +#       if (__GNUC__ >= 4)
118952 +            return (__builtin_clzll(val) >> 3);
118953 +#       else
118954 +            unsigned r;
118955 +            const unsigned n32 = sizeof(size_t)*4;   /* calculate this way due to compiler complaining in 32-bits mode */
118956 +            if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
118957 +            if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
118958 +            r += (!val);
118959 +            return r;
118960 +#       endif
118961 +        } else { /* 32 bits */
118962 +#       if (__GNUC__ >= 3)
118963 +            return (__builtin_clz((U32)val) >> 3);
118964 +#       else
118965 +            unsigned r;
118966 +            if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
118967 +            r += (!val);
118968 +            return r;
118969 +#       endif
118970 +    }   }
118974 +MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
118976 +    const BYTE* const pStart = pIn;
118977 +    const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
118979 +    if (pIn < pInLoopLimit) {
118980 +        { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
118981 +          if (diff) return ZSTD_NbCommonBytes(diff); }
118982 +        pIn+=sizeof(size_t); pMatch+=sizeof(size_t);
118983 +        while (pIn < pInLoopLimit) {
118984 +            size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
118985 +            if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
118986 +            pIn += ZSTD_NbCommonBytes(diff);
118987 +            return (size_t)(pIn - pStart);
118988 +    }   }
118989 +    if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
118990 +    if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
118991 +    if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
118992 +    return (size_t)(pIn - pStart);
118995 +/** ZSTD_count_2segments() :
118996 + *  can count match length with `ip` & `match` in 2 different segments.
118997 + *  convention : on reaching mEnd, match count continue starting from iStart
118998 + */
118999 +MEM_STATIC size_t
119000 +ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
119001 +                     const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
119003 +    const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
119004 +    size_t const matchLength = ZSTD_count(ip, match, vEnd);
119005 +    if (match + matchLength != mEnd) return matchLength;
119006 +    DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
119007 +    DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
119008 +    DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
119009 +    DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
119010 +    DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
119011 +    return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
119015 +/*-*************************************
119016 + *  Hashes
119017 + ***************************************/
119018 +static const U32 prime3bytes = 506832829U;
119019 +static U32    ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes)  >> (32-h) ; }
119020 +MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
119022 +static const U32 prime4bytes = 2654435761U;
119023 +static U32    ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
119024 +static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
119026 +static const U64 prime5bytes = 889523592379ULL;
119027 +static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u  << (64-40)) * prime5bytes) >> (64-h)) ; }
119028 +static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
119030 +static const U64 prime6bytes = 227718039650203ULL;
119031 +static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u  << (64-48)) * prime6bytes) >> (64-h)) ; }
119032 +static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
119034 +static const U64 prime7bytes = 58295818150454627ULL;
119035 +static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u  << (64-56)) * prime7bytes) >> (64-h)) ; }
119036 +static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
119038 +static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
119039 +static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
119040 +static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
119042 +MEM_STATIC FORCE_INLINE_ATTR
119043 +size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
119045 +    switch(mls)
119046 +    {
119047 +    default:
119048 +    case 4: return ZSTD_hash4Ptr(p, hBits);
119049 +    case 5: return ZSTD_hash5Ptr(p, hBits);
119050 +    case 6: return ZSTD_hash6Ptr(p, hBits);
119051 +    case 7: return ZSTD_hash7Ptr(p, hBits);
119052 +    case 8: return ZSTD_hash8Ptr(p, hBits);
119053 +    }
119056 +/** ZSTD_ipow() :
119057 + * Return base^exponent.
119058 + */
119059 +static U64 ZSTD_ipow(U64 base, U64 exponent)
119061 +    U64 power = 1;
119062 +    while (exponent) {
119063 +      if (exponent & 1) power *= base;
119064 +      exponent >>= 1;
119065 +      base *= base;
119066 +    }
119067 +    return power;
119070 +#define ZSTD_ROLL_HASH_CHAR_OFFSET 10
119072 +/** ZSTD_rollingHash_append() :
119073 + * Add the buffer to the hash value.
119074 + */
119075 +static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size)
119077 +    BYTE const* istart = (BYTE const*)buf;
119078 +    size_t pos;
119079 +    for (pos = 0; pos < size; ++pos) {
119080 +        hash *= prime8bytes;
119081 +        hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET;
119082 +    }
119083 +    return hash;
119086 +/** ZSTD_rollingHash_compute() :
119087 + * Compute the rolling hash value of the buffer.
119088 + */
119089 +MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size)
119091 +    return ZSTD_rollingHash_append(0, buf, size);
119094 +/** ZSTD_rollingHash_primePower() :
119095 + * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash
119096 + * over a window of length bytes.
119097 + */
119098 +MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length)
119100 +    return ZSTD_ipow(prime8bytes, length - 1);
119103 +/** ZSTD_rollingHash_rotate() :
119104 + * Rotate the rolling hash by one byte.
119105 + */
119106 +MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower)
119108 +    hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower;
119109 +    hash *= prime8bytes;
119110 +    hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET;
119111 +    return hash;
119114 +/*-*************************************
119115 +*  Round buffer management
119116 +***************************************/
119117 +#if (ZSTD_WINDOWLOG_MAX_64 > 31)
119118 +# error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX"
119119 +#endif
119120 +/* Max current allowed */
119121 +#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
119122 +/* Maximum chunk size before overflow correction needs to be called again */
119123 +#define ZSTD_CHUNKSIZE_MAX                                                     \
119124 +    ( ((U32)-1)                  /* Maximum ending current index */            \
119125 +    - ZSTD_CURRENT_MAX)          /* Maximum beginning lowLimit */
119128 + * ZSTD_window_clear():
119129 + * Clears the window containing the history by simply setting it to empty.
119130 + */
119131 +MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
119133 +    size_t const endT = (size_t)(window->nextSrc - window->base);
119134 +    U32 const end = (U32)endT;
119136 +    window->lowLimit = end;
119137 +    window->dictLimit = end;
119141 + * ZSTD_window_hasExtDict():
119142 + * Returns non-zero if the window has a non-empty extDict.
119143 + */
119144 +MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
119146 +    return window.lowLimit < window.dictLimit;
119150 + * ZSTD_matchState_dictMode():
119151 + * Inspects the provided matchState and figures out what dictMode should be
119152 + * passed to the compressor.
119153 + */
119154 +MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
119156 +    return ZSTD_window_hasExtDict(ms->window) ?
119157 +        ZSTD_extDict :
119158 +        ms->dictMatchState != NULL ?
119159 +            (ms->dictMatchState->dedicatedDictSearch ? ZSTD_dedicatedDictSearch : ZSTD_dictMatchState) :
119160 +            ZSTD_noDict;
119164 + * ZSTD_window_needOverflowCorrection():
119165 + * Returns non-zero if the indices are getting too large and need overflow
119166 + * protection.
119167 + */
119168 +MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
119169 +                                                  void const* srcEnd)
119171 +    U32 const curr = (U32)((BYTE const*)srcEnd - window.base);
119172 +    return curr > ZSTD_CURRENT_MAX;
119176 + * ZSTD_window_correctOverflow():
119177 + * Reduces the indices to protect from index overflow.
119178 + * Returns the correction made to the indices, which must be applied to every
119179 + * stored index.
119181 + * The least significant cycleLog bits of the indices must remain the same,
119182 + * which may be 0. Every index up to maxDist in the past must be valid.
119183 + * NOTE: (maxDist & cycleMask) must be zero.
119184 + */
119185 +MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
119186 +                                           U32 maxDist, void const* src)
119188 +    /* preemptive overflow correction:
119189 +     * 1. correction is large enough:
119190 +     *    lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog
119191 +     *    1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
119192 +     *
119193 +     *    current - newCurrent
119194 +     *    > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)
119195 +     *    > (3<<29) - (1<<chainLog)
119196 +     *    > (3<<29) - (1<<30)             (NOTE: chainLog <= 30)
119197 +     *    > 1<<29
119198 +     *
119199 +     * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:
119200 +     *    After correction, current is less than (1<<chainLog + 1<<windowLog).
119201 +     *    In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.
119202 +     *    In 32-bit mode we are safe, because (chainLog <= 29), so
119203 +     *    ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.
119204 +     * 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
119205 +     *    windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
119206 +     */
119207 +    U32 const cycleMask = (1U << cycleLog) - 1;
119208 +    U32 const curr = (U32)((BYTE const*)src - window->base);
119209 +    U32 const currentCycle0 = curr & cycleMask;
119210 +    /* Exclude zero so that newCurrent - maxDist >= 1. */
119211 +    U32 const currentCycle1 = currentCycle0 == 0 ? (1U << cycleLog) : currentCycle0;
119212 +    U32 const newCurrent = currentCycle1 + maxDist;
119213 +    U32 const correction = curr - newCurrent;
119214 +    assert((maxDist & cycleMask) == 0);
119215 +    assert(curr > newCurrent);
119216 +    /* Loose bound, should be around 1<<29 (see above) */
119217 +    assert(correction > 1<<28);
119219 +    window->base += correction;
119220 +    window->dictBase += correction;
119221 +    if (window->lowLimit <= correction) window->lowLimit = 1;
119222 +    else window->lowLimit -= correction;
119223 +    if (window->dictLimit <= correction) window->dictLimit = 1;
119224 +    else window->dictLimit -= correction;
119226 +    /* Ensure we can still reference the full window. */
119227 +    assert(newCurrent >= maxDist);
119228 +    assert(newCurrent - maxDist >= 1);
119229 +    /* Ensure that lowLimit and dictLimit didn't underflow. */
119230 +    assert(window->lowLimit <= newCurrent);
119231 +    assert(window->dictLimit <= newCurrent);
119233 +    DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
119234 +             window->lowLimit);
119235 +    return correction;
119239 + * ZSTD_window_enforceMaxDist():
119240 + * Updates lowLimit so that:
119241 + *    (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
119243 + * It ensures index is valid as long as index >= lowLimit.
119244 + * This must be called before a block compression call.
119246 + * loadedDictEnd is only defined if a dictionary is in use for current compression.
119247 + * As the name implies, loadedDictEnd represents the index at end of dictionary.
119248 + * The value lies within context's referential, it can be directly compared to blockEndIdx.
119250 + * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0.
119251 + * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit.
119252 + * This is because dictionaries are allowed to be referenced fully
119253 + * as long as the last byte of the dictionary is in the window.
119254 + * Once input has progressed beyond window size, dictionary cannot be referenced anymore.
119256 + * In normal dict mode, the dictionary lies between lowLimit and dictLimit.
119257 + * In dictMatchState mode, lowLimit and dictLimit are the same,
119258 + * and the dictionary is below them.
119259 + * forceWindow and dictMatchState are therefore incompatible.
119260 + */
119261 +MEM_STATIC void
119262 +ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
119263 +                     const void* blockEnd,
119264 +                           U32   maxDist,
119265 +                           U32*  loadedDictEndPtr,
119266 +                     const ZSTD_matchState_t** dictMatchStatePtr)
119268 +    U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
119269 +    U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
119270 +    DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
119271 +                (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
119273 +    /* - When there is no dictionary : loadedDictEnd == 0.
119274 +         In which case, the test (blockEndIdx > maxDist) is merely to avoid
119275 +         overflowing next operation `newLowLimit = blockEndIdx - maxDist`.
119276 +       - When there is a standard dictionary :
119277 +         Index referential is copied from the dictionary,
119278 +         which means it starts from 0.
119279 +         In which case, loadedDictEnd == dictSize,
119280 +         and it makes sense to compare `blockEndIdx > maxDist + dictSize`
119281 +         since `blockEndIdx` also starts from zero.
119282 +       - When there is an attached dictionary :
119283 +         loadedDictEnd is expressed within the referential of the context,
119284 +         so it can be directly compared against blockEndIdx.
119285 +    */
119286 +    if (blockEndIdx > maxDist + loadedDictEnd) {
119287 +        U32 const newLowLimit = blockEndIdx - maxDist;
119288 +        if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
119289 +        if (window->dictLimit < window->lowLimit) {
119290 +            DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
119291 +                        (unsigned)window->dictLimit, (unsigned)window->lowLimit);
119292 +            window->dictLimit = window->lowLimit;
119293 +        }
119294 +        /* On reaching window size, dictionaries are invalidated */
119295 +        if (loadedDictEndPtr) *loadedDictEndPtr = 0;
119296 +        if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
119297 +    }
119300 +/* Similar to ZSTD_window_enforceMaxDist(),
119301 + * but only invalidates dictionary
119302 + * when input progresses beyond window size.
119303 + * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL)
119304 + *              loadedDictEnd uses same referential as window->base
119305 + *              maxDist is the window size */
119306 +MEM_STATIC void
119307 +ZSTD_checkDictValidity(const ZSTD_window_t* window,
119308 +                       const void* blockEnd,
119309 +                             U32   maxDist,
119310 +                             U32*  loadedDictEndPtr,
119311 +                       const ZSTD_matchState_t** dictMatchStatePtr)
119313 +    assert(loadedDictEndPtr != NULL);
119314 +    assert(dictMatchStatePtr != NULL);
119315 +    {   U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
119316 +        U32 const loadedDictEnd = *loadedDictEndPtr;
119317 +        DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
119318 +                    (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
119319 +        assert(blockEndIdx >= loadedDictEnd);
119321 +        if (blockEndIdx > loadedDictEnd + maxDist) {
119322 +            /* On reaching window size, dictionaries are invalidated.
119323 +             * For simplification, if window size is reached anywhere within next block,
119324 +             * the dictionary is invalidated for the full block.
119325 +             */
119326 +            DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
119327 +            *loadedDictEndPtr = 0;
119328 +            *dictMatchStatePtr = NULL;
119329 +        } else {
119330 +            if (*loadedDictEndPtr != 0) {
119331 +                DEBUGLOG(6, "dictionary considered valid for current block");
119332 +    }   }   }
119335 +MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
119336 +    ZSTD_memset(window, 0, sizeof(*window));
119337 +    window->base = (BYTE const*)"";
119338 +    window->dictBase = (BYTE const*)"";
119339 +    window->dictLimit = 1;    /* start from 1, so that 1st position is valid */
119340 +    window->lowLimit = 1;     /* it ensures first and later CCtx usages compress the same */
119341 +    window->nextSrc = window->base + 1;   /* see issue #1241 */
119345 + * ZSTD_window_update():
119346 + * Updates the window by appending [src, src + srcSize) to the window.
119347 + * If it is not contiguous, the current prefix becomes the extDict, and we
119348 + * forget about the extDict. Handles overlap of the prefix and extDict.
119349 + * Returns non-zero if the segment is contiguous.
119350 + */
119351 +MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
119352 +                                  void const* src, size_t srcSize)
119354 +    BYTE const* const ip = (BYTE const*)src;
119355 +    U32 contiguous = 1;
119356 +    DEBUGLOG(5, "ZSTD_window_update");
119357 +    if (srcSize == 0)
119358 +        return contiguous;
119359 +    assert(window->base != NULL);
119360 +    assert(window->dictBase != NULL);
119361 +    /* Check if blocks follow each other */
119362 +    if (src != window->nextSrc) {
119363 +        /* not contiguous */
119364 +        size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
119365 +        DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
119366 +        window->lowLimit = window->dictLimit;
119367 +        assert(distanceFromBase == (size_t)(U32)distanceFromBase);  /* should never overflow */
119368 +        window->dictLimit = (U32)distanceFromBase;
119369 +        window->dictBase = window->base;
119370 +        window->base = ip - distanceFromBase;
119371 +        /* ms->nextToUpdate = window->dictLimit; */
119372 +        if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit;   /* too small extDict */
119373 +        contiguous = 0;
119374 +    }
119375 +    window->nextSrc = ip + srcSize;
119376 +    /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
119377 +    if ( (ip+srcSize > window->dictBase + window->lowLimit)
119378 +       & (ip < window->dictBase + window->dictLimit)) {
119379 +        ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
119380 +        U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
119381 +        window->lowLimit = lowLimitMax;
119382 +        DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
119383 +    }
119384 +    return contiguous;
119388 + * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix.
119389 + */
119390 +MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
119392 +    U32    const maxDistance = 1U << windowLog;
119393 +    U32    const lowestValid = ms->window.lowLimit;
119394 +    U32    const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
119395 +    U32    const isDictionary = (ms->loadedDictEnd != 0);
119396 +    /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary
119397 +     * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't
119398 +     * valid for the entire block. So this check is sufficient to find the lowest valid match index.
119399 +     */
119400 +    U32    const matchLowest = isDictionary ? lowestValid : withinWindow;
119401 +    return matchLowest;
119405 + * Returns the lowest allowed match index in the prefix.
119406 + */
119407 +MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
119409 +    U32    const maxDistance = 1U << windowLog;
119410 +    U32    const lowestValid = ms->window.dictLimit;
119411 +    U32    const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
119412 +    U32    const isDictionary = (ms->loadedDictEnd != 0);
119413 +    /* When computing the lowest prefix index we need to take the dictionary into account to handle
119414 +     * the edge case where the dictionary and the source are contiguous in memory.
119415 +     */
119416 +    U32    const matchLowest = isDictionary ? lowestValid : withinWindow;
119417 +    return matchLowest;
119422 +/* debug functions */
119423 +#if (DEBUGLEVEL>=2)
119425 +MEM_STATIC double ZSTD_fWeight(U32 rawStat)
119427 +    U32 const fp_accuracy = 8;
119428 +    U32 const fp_multiplier = (1 << fp_accuracy);
119429 +    U32 const newStat = rawStat + 1;
119430 +    U32 const hb = ZSTD_highbit32(newStat);
119431 +    U32 const BWeight = hb * fp_multiplier;
119432 +    U32 const FWeight = (newStat << fp_accuracy) >> hb;
119433 +    U32 const weight = BWeight + FWeight;
119434 +    assert(hb + fp_accuracy < 31);
119435 +    return (double)weight / fp_multiplier;
119438 +/* display a table content,
119439 + * listing each element, its frequency, and its predicted bit cost */
119440 +MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
119442 +    unsigned u, sum;
119443 +    for (u=0, sum=0; u<=max; u++) sum += table[u];
119444 +    DEBUGLOG(2, "total nb elts: %u", sum);
119445 +    for (u=0; u<=max; u++) {
119446 +        DEBUGLOG(2, "%2u: %5u  (%.2f)",
119447 +                u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) );
119448 +    }
119451 +#endif
119455 +/* ===============================================================
119456 + * Shared internal declarations
119457 + * These prototypes may be called from sources not in lib/compress
119458 + * =============================================================== */
119460 +/* ZSTD_loadCEntropy() :
119461 + * dict : must point at beginning of a valid zstd dictionary.
119462 + * return : size of dictionary header (size of magic number + dict ID + entropy tables)
119463 + * assumptions : magic number supposed already checked
119464 + *               and dictSize >= 8 */
119465 +size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
119466 +                         const void* const dict, size_t dictSize);
119468 +void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
119470 +/* ==============================================================
119471 + * Private declarations
119472 + * These prototypes shall only be called from within lib/compress
119473 + * ============================================================== */
119475 +/* ZSTD_getCParamsFromCCtxParams() :
119476 + * cParams are built depending on compressionLevel, src size hints,
119477 + * LDM and manually set compression parameters.
119478 + * Note: srcSizeHint == 0 means 0!
119479 + */
119480 +ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
119481 +        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
119483 +/*! ZSTD_initCStream_internal() :
119484 + *  Private use only. Init streaming operation.
119485 + *  expects params to be valid.
119486 + *  must receive dict, or cdict, or none, but not both.
119487 + *  @return : 0, or an error code */
119488 +size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
119489 +                     const void* dict, size_t dictSize,
119490 +                     const ZSTD_CDict* cdict,
119491 +                     const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
119493 +void ZSTD_resetSeqStore(seqStore_t* ssPtr);
119495 +/*! ZSTD_getCParamsFromCDict() :
119496 + *  as the name implies */
119497 +ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
119499 +/* ZSTD_compressBegin_advanced_internal() :
119500 + * Private use only. To be called from zstdmt_compress.c. */
119501 +size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
119502 +                                    const void* dict, size_t dictSize,
119503 +                                    ZSTD_dictContentType_e dictContentType,
119504 +                                    ZSTD_dictTableLoadMethod_e dtlm,
119505 +                                    const ZSTD_CDict* cdict,
119506 +                                    const ZSTD_CCtx_params* params,
119507 +                                    unsigned long long pledgedSrcSize);
119509 +/* ZSTD_compress_advanced_internal() :
119510 + * Private use only. To be called from zstdmt_compress.c. */
119511 +size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
119512 +                                       void* dst, size_t dstCapacity,
119513 +                                 const void* src, size_t srcSize,
119514 +                                 const void* dict,size_t dictSize,
119515 +                                 const ZSTD_CCtx_params* params);
119518 +/* ZSTD_writeLastEmptyBlock() :
119519 + * output an empty Block with end-of-frame mark to complete a frame
119520 + * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
119521 + *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
119522 + */
119523 +size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
119526 +/* ZSTD_referenceExternalSequences() :
119527 + * Must be called before starting a compression operation.
119528 + * seqs must parse a prefix of the source.
119529 + * This cannot be used when long range matching is enabled.
119530 + * Zstd will use these sequences, and pass the literals to a secondary block
119531 + * compressor.
119532 + * @return : An error code on failure.
119533 + * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
119534 + * access and data corruption.
119535 + */
119536 +size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
119538 +/** ZSTD_cycleLog() :
119539 + *  condition for correct operation : hashLog > 1 */
119540 +U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
119542 +/** ZSTD_CCtx_trace() :
119543 + *  Trace the end of a compression call.
119544 + */
119545 +void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize);
119547 +#endif /* ZSTD_COMPRESS_H */
119548 diff --git a/lib/zstd/compress/zstd_compress_literals.c b/lib/zstd/compress/zstd_compress_literals.c
119549 new file mode 100644
119550 index 000000000000..655bcda4d1f1
119551 --- /dev/null
119552 +++ b/lib/zstd/compress/zstd_compress_literals.c
119553 @@ -0,0 +1,158 @@
119555 + * Copyright (c) Yann Collet, Facebook, Inc.
119556 + * All rights reserved.
119558 + * This source code is licensed under both the BSD-style license (found in the
119559 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
119560 + * in the COPYING file in the root directory of this source tree).
119561 + * You may select, at your option, one of the above-listed licenses.
119562 + */
119564 + /*-*************************************
119565 + *  Dependencies
119566 + ***************************************/
119567 +#include "zstd_compress_literals.h"
119569 +size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
119571 +    BYTE* const ostart = (BYTE*)dst;
119572 +    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
119574 +    RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
119576 +    switch(flSize)
119577 +    {
119578 +        case 1: /* 2 - 1 - 5 */
119579 +            ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
119580 +            break;
119581 +        case 2: /* 2 - 2 - 12 */
119582 +            MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
119583 +            break;
119584 +        case 3: /* 2 - 2 - 20 */
119585 +            MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
119586 +            break;
119587 +        default:   /* not necessary : flSize is {1,2,3} */
119588 +            assert(0);
119589 +    }
119591 +    ZSTD_memcpy(ostart + flSize, src, srcSize);
119592 +    DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
119593 +    return srcSize + flSize;
119596 +size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
119598 +    BYTE* const ostart = (BYTE*)dst;
119599 +    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
119601 +    (void)dstCapacity;  /* dstCapacity already guaranteed to be >=4, hence large enough */
119603 +    switch(flSize)
119604 +    {
119605 +        case 1: /* 2 - 1 - 5 */
119606 +            ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
119607 +            break;
119608 +        case 2: /* 2 - 2 - 12 */
119609 +            MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
119610 +            break;
119611 +        case 3: /* 2 - 2 - 20 */
119612 +            MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
119613 +            break;
119614 +        default:   /* not necessary : flSize is {1,2,3} */
119615 +            assert(0);
119616 +    }
119618 +    ostart[flSize] = *(const BYTE*)src;
119619 +    DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1);
119620 +    return flSize+1;
119623 +size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
119624 +                              ZSTD_hufCTables_t* nextHuf,
119625 +                              ZSTD_strategy strategy, int disableLiteralCompression,
119626 +                              void* dst, size_t dstCapacity,
119627 +                        const void* src, size_t srcSize,
119628 +                              void* entropyWorkspace, size_t entropyWorkspaceSize,
119629 +                        const int bmi2)
119631 +    size_t const minGain = ZSTD_minGain(srcSize, strategy);
119632 +    size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
119633 +    BYTE*  const ostart = (BYTE*)dst;
119634 +    U32 singleStream = srcSize < 256;
119635 +    symbolEncodingType_e hType = set_compressed;
119636 +    size_t cLitSize;
119638 +    DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)",
119639 +                disableLiteralCompression, (U32)srcSize);
119641 +    /* Prepare nextEntropy assuming reusing the existing table */
119642 +    ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
119644 +    if (disableLiteralCompression)
119645 +        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
119647 +    /* small ? don't even attempt compression (speed opt) */
119648 +#   define COMPRESS_LITERALS_SIZE_MIN 63
119649 +    {   size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
119650 +        if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
119651 +    }
119653 +    RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
119654 +    {   HUF_repeat repeat = prevHuf->repeatMode;
119655 +        int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
119656 +        if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
119657 +        cLitSize = singleStream ?
119658 +            HUF_compress1X_repeat(
119659 +                ostart+lhSize, dstCapacity-lhSize, src, srcSize,
119660 +                HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
119661 +                (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) :
119662 +            HUF_compress4X_repeat(
119663 +                ostart+lhSize, dstCapacity-lhSize, src, srcSize,
119664 +                HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
119665 +                (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
119666 +        if (repeat != HUF_repeat_none) {
119667 +            /* reused the existing table */
119668 +            DEBUGLOG(5, "Reusing previous huffman table");
119669 +            hType = set_repeat;
119670 +        }
119671 +    }
119673 +    if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
119674 +        ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
119675 +        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
119676 +    }
119677 +    if (cLitSize==1) {
119678 +        ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
119679 +        return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
119680 +    }
119682 +    if (hType == set_compressed) {
119683 +        /* using a newly constructed table */
119684 +        nextHuf->repeatMode = HUF_repeat_check;
119685 +    }
119687 +    /* Build header */
119688 +    switch(lhSize)
119689 +    {
119690 +    case 3: /* 2 - 2 - 10 - 10 */
119691 +        {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
119692 +            MEM_writeLE24(ostart, lhc);
119693 +            break;
119694 +        }
119695 +    case 4: /* 2 - 2 - 14 - 14 */
119696 +        {   U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
119697 +            MEM_writeLE32(ostart, lhc);
119698 +            break;
119699 +        }
119700 +    case 5: /* 2 - 2 - 18 - 18 */
119701 +        {   U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
119702 +            MEM_writeLE32(ostart, lhc);
119703 +            ostart[4] = (BYTE)(cLitSize >> 10);
119704 +            break;
119705 +        }
119706 +    default:  /* not possible : lhSize is {3,4,5} */
119707 +        assert(0);
119708 +    }
119709 +    DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize));
119710 +    return lhSize+cLitSize;
119712 diff --git a/lib/zstd/compress/zstd_compress_literals.h b/lib/zstd/compress/zstd_compress_literals.h
119713 new file mode 100644
119714 index 000000000000..9904c0cd30a0
119715 --- /dev/null
119716 +++ b/lib/zstd/compress/zstd_compress_literals.h
119717 @@ -0,0 +1,29 @@
119719 + * Copyright (c) Yann Collet, Facebook, Inc.
119720 + * All rights reserved.
119722 + * This source code is licensed under both the BSD-style license (found in the
119723 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
119724 + * in the COPYING file in the root directory of this source tree).
119725 + * You may select, at your option, one of the above-listed licenses.
119726 + */
119728 +#ifndef ZSTD_COMPRESS_LITERALS_H
119729 +#define ZSTD_COMPRESS_LITERALS_H
119731 +#include "zstd_compress_internal.h" /* ZSTD_hufCTables_t, ZSTD_minGain() */
119734 +size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
119736 +size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
119738 +size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
119739 +                              ZSTD_hufCTables_t* nextHuf,
119740 +                              ZSTD_strategy strategy, int disableLiteralCompression,
119741 +                              void* dst, size_t dstCapacity,
119742 +                        const void* src, size_t srcSize,
119743 +                              void* entropyWorkspace, size_t entropyWorkspaceSize,
119744 +                        const int bmi2);
119746 +#endif /* ZSTD_COMPRESS_LITERALS_H */
119747 diff --git a/lib/zstd/compress/zstd_compress_sequences.c b/lib/zstd/compress/zstd_compress_sequences.c
119748 new file mode 100644
119749 index 000000000000..08a5b89019dd
119750 --- /dev/null
119751 +++ b/lib/zstd/compress/zstd_compress_sequences.c
119752 @@ -0,0 +1,439 @@
119754 + * Copyright (c) Yann Collet, Facebook, Inc.
119755 + * All rights reserved.
119757 + * This source code is licensed under both the BSD-style license (found in the
119758 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
119759 + * in the COPYING file in the root directory of this source tree).
119760 + * You may select, at your option, one of the above-listed licenses.
119761 + */
119763 + /*-*************************************
119764 + *  Dependencies
119765 + ***************************************/
119766 +#include "zstd_compress_sequences.h"
119769 + * -log2(x / 256) lookup table for x in [0, 256).
119770 + * If x == 0: Return 0
119771 + * Else: Return floor(-log2(x / 256) * 256)
119772 + */
119773 +static unsigned const kInverseProbabilityLog256[256] = {
119774 +    0,    2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
119775 +    1130, 1100, 1073, 1047, 1024, 1001, 980,  960,  941,  923,  906,  889,
119776 +    874,  859,  844,  830,  817,  804,  791,  779,  768,  756,  745,  734,
119777 +    724,  714,  704,  694,  685,  676,  667,  658,  650,  642,  633,  626,
119778 +    618,  610,  603,  595,  588,  581,  574,  567,  561,  554,  548,  542,
119779 +    535,  529,  523,  517,  512,  506,  500,  495,  489,  484,  478,  473,
119780 +    468,  463,  458,  453,  448,  443,  438,  434,  429,  424,  420,  415,
119781 +    411,  407,  402,  398,  394,  390,  386,  382,  377,  373,  370,  366,
119782 +    362,  358,  354,  350,  347,  343,  339,  336,  332,  329,  325,  322,
119783 +    318,  315,  311,  308,  305,  302,  298,  295,  292,  289,  286,  282,
119784 +    279,  276,  273,  270,  267,  264,  261,  258,  256,  253,  250,  247,
119785 +    244,  241,  239,  236,  233,  230,  228,  225,  222,  220,  217,  215,
119786 +    212,  209,  207,  204,  202,  199,  197,  194,  192,  190,  187,  185,
119787 +    182,  180,  178,  175,  173,  171,  168,  166,  164,  162,  159,  157,
119788 +    155,  153,  151,  149,  146,  144,  142,  140,  138,  136,  134,  132,
119789 +    130,  128,  126,  123,  121,  119,  117,  115,  114,  112,  110,  108,
119790 +    106,  104,  102,  100,  98,   96,   94,   93,   91,   89,   87,   85,
119791 +    83,   82,   80,   78,   76,   74,   73,   71,   69,   67,   66,   64,
119792 +    62,   61,   59,   57,   55,   54,   52,   50,   49,   47,   46,   44,
119793 +    42,   41,   39,   37,   36,   34,   33,   31,   30,   28,   26,   25,
119794 +    23,   22,   20,   19,   17,   16,   14,   13,   11,   10,   8,    7,
119795 +    5,    4,    2,    1,
119798 +static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
119799 +  void const* ptr = ctable;
119800 +  U16 const* u16ptr = (U16 const*)ptr;
119801 +  U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
119802 +  return maxSymbolValue;
119806 + * Returns true if we should use ncount=-1 else we should
119807 + * use ncount=1 for low probability symbols instead.
119808 + */
119809 +static unsigned ZSTD_useLowProbCount(size_t const nbSeq)
119811 +    /* Heuristic: This should cover most blocks <= 16K and
119812 +     * start to fade out after 16K to about 32K depending on
119813 +     * comprssibility.
119814 +     */
119815 +    return nbSeq >= 2048;
119819 + * Returns the cost in bytes of encoding the normalized count header.
119820 + * Returns an error if any of the helper functions return an error.
119821 + */
119822 +static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
119823 +                              size_t const nbSeq, unsigned const FSELog)
119825 +    BYTE wksp[FSE_NCOUNTBOUND];
119826 +    S16 norm[MaxSeq + 1];
119827 +    const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
119828 +    FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)), "");
119829 +    return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
119833 + * Returns the cost in bits of encoding the distribution described by count
119834 + * using the entropy bound.
119835 + */
119836 +static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
119838 +    unsigned cost = 0;
119839 +    unsigned s;
119840 +    for (s = 0; s <= max; ++s) {
119841 +        unsigned norm = (unsigned)((256 * count[s]) / total);
119842 +        if (count[s] != 0 && norm == 0)
119843 +            norm = 1;
119844 +        assert(count[s] < total);
119845 +        cost += count[s] * kInverseProbabilityLog256[norm];
119846 +    }
119847 +    return cost >> 8;
119851 + * Returns the cost in bits of encoding the distribution in count using ctable.
119852 + * Returns an error if ctable cannot represent all the symbols in count.
119853 + */
119854 +size_t ZSTD_fseBitCost(
119855 +    FSE_CTable const* ctable,
119856 +    unsigned const* count,
119857 +    unsigned const max)
119859 +    unsigned const kAccuracyLog = 8;
119860 +    size_t cost = 0;
119861 +    unsigned s;
119862 +    FSE_CState_t cstate;
119863 +    FSE_initCState(&cstate, ctable);
119864 +    if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
119865 +        DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
119866 +                    ZSTD_getFSEMaxSymbolValue(ctable), max);
119867 +        return ERROR(GENERIC);
119868 +    }
119869 +    for (s = 0; s <= max; ++s) {
119870 +        unsigned const tableLog = cstate.stateLog;
119871 +        unsigned const badCost = (tableLog + 1) << kAccuracyLog;
119872 +        unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
119873 +        if (count[s] == 0)
119874 +            continue;
119875 +        if (bitCost >= badCost) {
119876 +            DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
119877 +            return ERROR(GENERIC);
119878 +        }
119879 +        cost += (size_t)count[s] * bitCost;
119880 +    }
119881 +    return cost >> kAccuracyLog;
119885 + * Returns the cost in bits of encoding the distribution in count using the
119886 + * table described by norm. The max symbol support by norm is assumed >= max.
119887 + * norm must be valid for every symbol with non-zero probability in count.
119888 + */
119889 +size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
119890 +                             unsigned const* count, unsigned const max)
119892 +    unsigned const shift = 8 - accuracyLog;
119893 +    size_t cost = 0;
119894 +    unsigned s;
119895 +    assert(accuracyLog <= 8);
119896 +    for (s = 0; s <= max; ++s) {
119897 +        unsigned const normAcc = (norm[s] != -1) ? (unsigned)norm[s] : 1;
119898 +        unsigned const norm256 = normAcc << shift;
119899 +        assert(norm256 > 0);
119900 +        assert(norm256 < 256);
119901 +        cost += count[s] * kInverseProbabilityLog256[norm256];
119902 +    }
119903 +    return cost >> 8;
119906 +symbolEncodingType_e
119907 +ZSTD_selectEncodingType(
119908 +        FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
119909 +        size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
119910 +        FSE_CTable const* prevCTable,
119911 +        short const* defaultNorm, U32 defaultNormLog,
119912 +        ZSTD_defaultPolicy_e const isDefaultAllowed,
119913 +        ZSTD_strategy const strategy)
119915 +    ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
119916 +    if (mostFrequent == nbSeq) {
119917 +        *repeatMode = FSE_repeat_none;
119918 +        if (isDefaultAllowed && nbSeq <= 2) {
119919 +            /* Prefer set_basic over set_rle when there are 2 or less symbols,
119920 +             * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
119921 +             * If basic encoding isn't possible, always choose RLE.
119922 +             */
119923 +            DEBUGLOG(5, "Selected set_basic");
119924 +            return set_basic;
119925 +        }
119926 +        DEBUGLOG(5, "Selected set_rle");
119927 +        return set_rle;
119928 +    }
119929 +    if (strategy < ZSTD_lazy) {
119930 +        if (isDefaultAllowed) {
119931 +            size_t const staticFse_nbSeq_max = 1000;
119932 +            size_t const mult = 10 - strategy;
119933 +            size_t const baseLog = 3;
119934 +            size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog;  /* 28-36 for offset, 56-72 for lengths */
119935 +            assert(defaultNormLog >= 5 && defaultNormLog <= 6);  /* xx_DEFAULTNORMLOG */
119936 +            assert(mult <= 9 && mult >= 7);
119937 +            if ( (*repeatMode == FSE_repeat_valid)
119938 +              && (nbSeq < staticFse_nbSeq_max) ) {
119939 +                DEBUGLOG(5, "Selected set_repeat");
119940 +                return set_repeat;
119941 +            }
119942 +            if ( (nbSeq < dynamicFse_nbSeq_min)
119943 +              || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
119944 +                DEBUGLOG(5, "Selected set_basic");
119945 +                /* The format allows default tables to be repeated, but it isn't useful.
119946 +                 * When using simple heuristics to select encoding type, we don't want
119947 +                 * to confuse these tables with dictionaries. When running more careful
119948 +                 * analysis, we don't need to waste time checking both repeating tables
119949 +                 * and default tables.
119950 +                 */
119951 +                *repeatMode = FSE_repeat_none;
119952 +                return set_basic;
119953 +            }
119954 +        }
119955 +    } else {
119956 +        size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
119957 +        size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
119958 +        size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
119959 +        size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
119961 +        if (isDefaultAllowed) {
119962 +            assert(!ZSTD_isError(basicCost));
119963 +            assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
119964 +        }
119965 +        assert(!ZSTD_isError(NCountCost));
119966 +        assert(compressedCost < ERROR(maxCode));
119967 +        DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
119968 +                    (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
119969 +        if (basicCost <= repeatCost && basicCost <= compressedCost) {
119970 +            DEBUGLOG(5, "Selected set_basic");
119971 +            assert(isDefaultAllowed);
119972 +            *repeatMode = FSE_repeat_none;
119973 +            return set_basic;
119974 +        }
119975 +        if (repeatCost <= compressedCost) {
119976 +            DEBUGLOG(5, "Selected set_repeat");
119977 +            assert(!ZSTD_isError(repeatCost));
119978 +            return set_repeat;
119979 +        }
119980 +        assert(compressedCost < basicCost && compressedCost < repeatCost);
119981 +    }
119982 +    DEBUGLOG(5, "Selected set_compressed");
119983 +    *repeatMode = FSE_repeat_check;
119984 +    return set_compressed;
119987 +typedef struct {
119988 +    S16 norm[MaxSeq + 1];
119989 +    U32 wksp[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(MaxSeq, MaxFSELog)];
119990 +} ZSTD_BuildCTableWksp;
119992 +size_t
119993 +ZSTD_buildCTable(void* dst, size_t dstCapacity,
119994 +                FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
119995 +                unsigned* count, U32 max,
119996 +                const BYTE* codeTable, size_t nbSeq,
119997 +                const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
119998 +                const FSE_CTable* prevCTable, size_t prevCTableSize,
119999 +                void* entropyWorkspace, size_t entropyWorkspaceSize)
120001 +    BYTE* op = (BYTE*)dst;
120002 +    const BYTE* const oend = op + dstCapacity;
120003 +    DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity);
120005 +    switch (type) {
120006 +    case set_rle:
120007 +        FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max), "");
120008 +        RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall, "not enough space");
120009 +        *op = codeTable[0];
120010 +        return 1;
120011 +    case set_repeat:
120012 +        ZSTD_memcpy(nextCTable, prevCTable, prevCTableSize);
120013 +        return 0;
120014 +    case set_basic:
120015 +        FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), "");  /* note : could be pre-calculated */
120016 +        return 0;
120017 +    case set_compressed: {
120018 +        ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace;
120019 +        size_t nbSeq_1 = nbSeq;
120020 +        const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
120021 +        if (count[codeTable[nbSeq-1]] > 1) {
120022 +            count[codeTable[nbSeq-1]]--;
120023 +            nbSeq_1--;
120024 +        }
120025 +        assert(nbSeq_1 > 1);
120026 +        assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
120027 +        (void)entropyWorkspaceSize;
120028 +        FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "");
120029 +        {   size_t const NCountSize = FSE_writeNCount(op, oend - op, wksp->norm, max, tableLog);   /* overflow protected */
120030 +            FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
120031 +            FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "");
120032 +            return NCountSize;
120033 +        }
120034 +    }
120035 +    default: assert(0); RETURN_ERROR(GENERIC, "impossible to reach");
120036 +    }
120039 +FORCE_INLINE_TEMPLATE size_t
120040 +ZSTD_encodeSequences_body(
120041 +            void* dst, size_t dstCapacity,
120042 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
120043 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
120044 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
120045 +            seqDef const* sequences, size_t nbSeq, int longOffsets)
120047 +    BIT_CStream_t blockStream;
120048 +    FSE_CState_t  stateMatchLength;
120049 +    FSE_CState_t  stateOffsetBits;
120050 +    FSE_CState_t  stateLitLength;
120052 +    RETURN_ERROR_IF(
120053 +        ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),
120054 +        dstSize_tooSmall, "not enough space remaining");
120055 +    DEBUGLOG(6, "available space for bitstream : %i  (dstCapacity=%u)",
120056 +                (int)(blockStream.endPtr - blockStream.startPtr),
120057 +                (unsigned)dstCapacity);
120059 +    /* first symbols */
120060 +    FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
120061 +    FSE_initCState2(&stateOffsetBits,  CTable_OffsetBits,  ofCodeTable[nbSeq-1]);
120062 +    FSE_initCState2(&stateLitLength,   CTable_LitLength,   llCodeTable[nbSeq-1]);
120063 +    BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
120064 +    if (MEM_32bits()) BIT_flushBits(&blockStream);
120065 +    BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
120066 +    if (MEM_32bits()) BIT_flushBits(&blockStream);
120067 +    if (longOffsets) {
120068 +        U32 const ofBits = ofCodeTable[nbSeq-1];
120069 +        unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
120070 +        if (extraBits) {
120071 +            BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
120072 +            BIT_flushBits(&blockStream);
120073 +        }
120074 +        BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
120075 +                    ofBits - extraBits);
120076 +    } else {
120077 +        BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
120078 +    }
120079 +    BIT_flushBits(&blockStream);
120081 +    {   size_t n;
120082 +        for (n=nbSeq-2 ; n<nbSeq ; n--) {      /* intentional underflow */
120083 +            BYTE const llCode = llCodeTable[n];
120084 +            BYTE const ofCode = ofCodeTable[n];
120085 +            BYTE const mlCode = mlCodeTable[n];
120086 +            U32  const llBits = LL_bits[llCode];
120087 +            U32  const ofBits = ofCode;
120088 +            U32  const mlBits = ML_bits[mlCode];
120089 +            DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
120090 +                        (unsigned)sequences[n].litLength,
120091 +                        (unsigned)sequences[n].matchLength + MINMATCH,
120092 +                        (unsigned)sequences[n].offset);
120093 +                                                                            /* 32b*/  /* 64b*/
120094 +                                                                            /* (7)*/  /* (7)*/
120095 +            FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode);       /* 15 */  /* 15 */
120096 +            FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode);      /* 24 */  /* 24 */
120097 +            if (MEM_32bits()) BIT_flushBits(&blockStream);                  /* (7)*/
120098 +            FSE_encodeSymbol(&blockStream, &stateLitLength, llCode);        /* 16 */  /* 33 */
120099 +            if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
120100 +                BIT_flushBits(&blockStream);                                /* (7)*/
120101 +            BIT_addBits(&blockStream, sequences[n].litLength, llBits);
120102 +            if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
120103 +            BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
120104 +            if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
120105 +            if (longOffsets) {
120106 +                unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
120107 +                if (extraBits) {
120108 +                    BIT_addBits(&blockStream, sequences[n].offset, extraBits);
120109 +                    BIT_flushBits(&blockStream);                            /* (7)*/
120110 +                }
120111 +                BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
120112 +                            ofBits - extraBits);                            /* 31 */
120113 +            } else {
120114 +                BIT_addBits(&blockStream, sequences[n].offset, ofBits);     /* 31 */
120115 +            }
120116 +            BIT_flushBits(&blockStream);                                    /* (7)*/
120117 +            DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
120118 +    }   }
120120 +    DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
120121 +    FSE_flushCState(&blockStream, &stateMatchLength);
120122 +    DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
120123 +    FSE_flushCState(&blockStream, &stateOffsetBits);
120124 +    DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
120125 +    FSE_flushCState(&blockStream, &stateLitLength);
120127 +    {   size_t const streamSize = BIT_closeCStream(&blockStream);
120128 +        RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space");
120129 +        return streamSize;
120130 +    }
120133 +static size_t
120134 +ZSTD_encodeSequences_default(
120135 +            void* dst, size_t dstCapacity,
120136 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
120137 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
120138 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
120139 +            seqDef const* sequences, size_t nbSeq, int longOffsets)
120141 +    return ZSTD_encodeSequences_body(dst, dstCapacity,
120142 +                                    CTable_MatchLength, mlCodeTable,
120143 +                                    CTable_OffsetBits, ofCodeTable,
120144 +                                    CTable_LitLength, llCodeTable,
120145 +                                    sequences, nbSeq, longOffsets);
120149 +#if DYNAMIC_BMI2
120151 +static TARGET_ATTRIBUTE("bmi2") size_t
120152 +ZSTD_encodeSequences_bmi2(
120153 +            void* dst, size_t dstCapacity,
120154 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
120155 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
120156 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
120157 +            seqDef const* sequences, size_t nbSeq, int longOffsets)
120159 +    return ZSTD_encodeSequences_body(dst, dstCapacity,
120160 +                                    CTable_MatchLength, mlCodeTable,
120161 +                                    CTable_OffsetBits, ofCodeTable,
120162 +                                    CTable_LitLength, llCodeTable,
120163 +                                    sequences, nbSeq, longOffsets);
120166 +#endif
120168 +size_t ZSTD_encodeSequences(
120169 +            void* dst, size_t dstCapacity,
120170 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
120171 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
120172 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
120173 +            seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
120175 +    DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
120176 +#if DYNAMIC_BMI2
120177 +    if (bmi2) {
120178 +        return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
120179 +                                         CTable_MatchLength, mlCodeTable,
120180 +                                         CTable_OffsetBits, ofCodeTable,
120181 +                                         CTable_LitLength, llCodeTable,
120182 +                                         sequences, nbSeq, longOffsets);
120183 +    }
120184 +#endif
120185 +    (void)bmi2;
120186 +    return ZSTD_encodeSequences_default(dst, dstCapacity,
120187 +                                        CTable_MatchLength, mlCodeTable,
120188 +                                        CTable_OffsetBits, ofCodeTable,
120189 +                                        CTable_LitLength, llCodeTable,
120190 +                                        sequences, nbSeq, longOffsets);
120192 diff --git a/lib/zstd/compress/zstd_compress_sequences.h b/lib/zstd/compress/zstd_compress_sequences.h
120193 new file mode 100644
120194 index 000000000000..7991364c2f71
120195 --- /dev/null
120196 +++ b/lib/zstd/compress/zstd_compress_sequences.h
120197 @@ -0,0 +1,54 @@
120199 + * Copyright (c) Yann Collet, Facebook, Inc.
120200 + * All rights reserved.
120202 + * This source code is licensed under both the BSD-style license (found in the
120203 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
120204 + * in the COPYING file in the root directory of this source tree).
120205 + * You may select, at your option, one of the above-listed licenses.
120206 + */
120208 +#ifndef ZSTD_COMPRESS_SEQUENCES_H
120209 +#define ZSTD_COMPRESS_SEQUENCES_H
120211 +#include "../common/fse.h" /* FSE_repeat, FSE_CTable */
120212 +#include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
120214 +typedef enum {
120215 +    ZSTD_defaultDisallowed = 0,
120216 +    ZSTD_defaultAllowed = 1
120217 +} ZSTD_defaultPolicy_e;
120219 +symbolEncodingType_e
120220 +ZSTD_selectEncodingType(
120221 +        FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
120222 +        size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
120223 +        FSE_CTable const* prevCTable,
120224 +        short const* defaultNorm, U32 defaultNormLog,
120225 +        ZSTD_defaultPolicy_e const isDefaultAllowed,
120226 +        ZSTD_strategy const strategy);
120228 +size_t
120229 +ZSTD_buildCTable(void* dst, size_t dstCapacity,
120230 +                FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
120231 +                unsigned* count, U32 max,
120232 +                const BYTE* codeTable, size_t nbSeq,
120233 +                const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
120234 +                const FSE_CTable* prevCTable, size_t prevCTableSize,
120235 +                void* entropyWorkspace, size_t entropyWorkspaceSize);
120237 +size_t ZSTD_encodeSequences(
120238 +            void* dst, size_t dstCapacity,
120239 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
120240 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
120241 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
120242 +            seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
120244 +size_t ZSTD_fseBitCost(
120245 +    FSE_CTable const* ctable,
120246 +    unsigned const* count,
120247 +    unsigned const max);
120249 +size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
120250 +                             unsigned const* count, unsigned const max);
120251 +#endif /* ZSTD_COMPRESS_SEQUENCES_H */
120252 diff --git a/lib/zstd/compress/zstd_compress_superblock.c b/lib/zstd/compress/zstd_compress_superblock.c
120253 new file mode 100644
120254 index 000000000000..767f73f5bf3d
120255 --- /dev/null
120256 +++ b/lib/zstd/compress/zstd_compress_superblock.c
120257 @@ -0,0 +1,850 @@
120259 + * Copyright (c) Yann Collet, Facebook, Inc.
120260 + * All rights reserved.
120262 + * This source code is licensed under both the BSD-style license (found in the
120263 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
120264 + * in the COPYING file in the root directory of this source tree).
120265 + * You may select, at your option, one of the above-listed licenses.
120266 + */
120268 + /*-*************************************
120269 + *  Dependencies
120270 + ***************************************/
120271 +#include "zstd_compress_superblock.h"
120273 +#include "../common/zstd_internal.h"  /* ZSTD_getSequenceLength */
120274 +#include "hist.h"                     /* HIST_countFast_wksp */
120275 +#include "zstd_compress_internal.h"
120276 +#include "zstd_compress_sequences.h"
120277 +#include "zstd_compress_literals.h"
120279 +/*-*************************************
120280 +*  Superblock entropy buffer structs
120281 +***************************************/
120282 +/** ZSTD_hufCTablesMetadata_t :
120283 + *  Stores Literals Block Type for a super-block in hType, and
120284 + *  huffman tree description in hufDesBuffer.
120285 + *  hufDesSize refers to the size of huffman tree description in bytes.
120286 + *  This metadata is populated in ZSTD_buildSuperBlockEntropy_literal() */
120287 +typedef struct {
120288 +    symbolEncodingType_e hType;
120289 +    BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE];
120290 +    size_t hufDesSize;
120291 +} ZSTD_hufCTablesMetadata_t;
120293 +/** ZSTD_fseCTablesMetadata_t :
120294 + *  Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and
120295 + *  fse tables in fseTablesBuffer.
120296 + *  fseTablesSize refers to the size of fse tables in bytes.
120297 + *  This metadata is populated in ZSTD_buildSuperBlockEntropy_sequences() */
120298 +typedef struct {
120299 +    symbolEncodingType_e llType;
120300 +    symbolEncodingType_e ofType;
120301 +    symbolEncodingType_e mlType;
120302 +    BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE];
120303 +    size_t fseTablesSize;
120304 +    size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_compressSubBlock_sequences() */
120305 +} ZSTD_fseCTablesMetadata_t;
120307 +typedef struct {
120308 +    ZSTD_hufCTablesMetadata_t hufMetadata;
120309 +    ZSTD_fseCTablesMetadata_t fseMetadata;
120310 +} ZSTD_entropyCTablesMetadata_t;
120313 +/** ZSTD_buildSuperBlockEntropy_literal() :
120314 + *  Builds entropy for the super-block literals.
120315 + *  Stores literals block type (raw, rle, compressed, repeat) and
120316 + *  huffman description table to hufMetadata.
120317 + *  @return : size of huffman description table or error code */
120318 +static size_t ZSTD_buildSuperBlockEntropy_literal(void* const src, size_t srcSize,
120319 +                                            const ZSTD_hufCTables_t* prevHuf,
120320 +                                                  ZSTD_hufCTables_t* nextHuf,
120321 +                                                  ZSTD_hufCTablesMetadata_t* hufMetadata,
120322 +                                                  const int disableLiteralsCompression,
120323 +                                                  void* workspace, size_t wkspSize)
120325 +    BYTE* const wkspStart = (BYTE*)workspace;
120326 +    BYTE* const wkspEnd = wkspStart + wkspSize;
120327 +    BYTE* const countWkspStart = wkspStart;
120328 +    unsigned* const countWksp = (unsigned*)workspace;
120329 +    const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
120330 +    BYTE* const nodeWksp = countWkspStart + countWkspSize;
120331 +    const size_t nodeWkspSize = wkspEnd-nodeWksp;
120332 +    unsigned maxSymbolValue = 255;
120333 +    unsigned huffLog = HUF_TABLELOG_DEFAULT;
120334 +    HUF_repeat repeat = prevHuf->repeatMode;
120336 +    DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_literal (srcSize=%zu)", srcSize);
120338 +    /* Prepare nextEntropy assuming reusing the existing table */
120339 +    ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
120341 +    if (disableLiteralsCompression) {
120342 +        DEBUGLOG(5, "set_basic - disabled");
120343 +        hufMetadata->hType = set_basic;
120344 +        return 0;
120345 +    }
120347 +    /* small ? don't even attempt compression (speed opt) */
120348 +#   define COMPRESS_LITERALS_SIZE_MIN 63
120349 +    {   size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
120350 +        if (srcSize <= minLitSize) {
120351 +            DEBUGLOG(5, "set_basic - too small");
120352 +            hufMetadata->hType = set_basic;
120353 +            return 0;
120354 +        }
120355 +    }
120357 +    /* Scan input and build symbol stats */
120358 +    {   size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize);
120359 +        FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");
120360 +        if (largest == srcSize) {
120361 +            DEBUGLOG(5, "set_rle");
120362 +            hufMetadata->hType = set_rle;
120363 +            return 0;
120364 +        }
120365 +        if (largest <= (srcSize >> 7)+4) {
120366 +            DEBUGLOG(5, "set_basic - no gain");
120367 +            hufMetadata->hType = set_basic;
120368 +            return 0;
120369 +        }
120370 +    }
120372 +    /* Validate the previous Huffman table */
120373 +    if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
120374 +        repeat = HUF_repeat_none;
120375 +    }
120377 +    /* Build Huffman Tree */
120378 +    ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));
120379 +    huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
120380 +    {   size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,
120381 +                                                    maxSymbolValue, huffLog,
120382 +                                                    nodeWksp, nodeWkspSize);
120383 +        FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");
120384 +        huffLog = (U32)maxBits;
120385 +        {   /* Build and write the CTable */
120386 +            size_t const newCSize = HUF_estimateCompressedSize(
120387 +                    (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
120388 +            size_t const hSize = HUF_writeCTable_wksp(
120389 +                    hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
120390 +                    (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog,
120391 +                    nodeWksp, nodeWkspSize);
120392 +            /* Check against repeating the previous CTable */
120393 +            if (repeat != HUF_repeat_none) {
120394 +                size_t const oldCSize = HUF_estimateCompressedSize(
120395 +                        (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
120396 +                if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
120397 +                    DEBUGLOG(5, "set_repeat - smaller");
120398 +                    ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
120399 +                    hufMetadata->hType = set_repeat;
120400 +                    return 0;
120401 +                }
120402 +            }
120403 +            if (newCSize + hSize >= srcSize) {
120404 +                DEBUGLOG(5, "set_basic - no gains");
120405 +                ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
120406 +                hufMetadata->hType = set_basic;
120407 +                return 0;
120408 +            }
120409 +            DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
120410 +            hufMetadata->hType = set_compressed;
120411 +            nextHuf->repeatMode = HUF_repeat_check;
120412 +            return hSize;
120413 +        }
120414 +    }
120417 +/** ZSTD_buildSuperBlockEntropy_sequences() :
120418 + *  Builds entropy for the super-block sequences.
120419 + *  Stores symbol compression modes and fse table to fseMetadata.
120420 + *  @return : size of fse tables or error code */
120421 +static size_t ZSTD_buildSuperBlockEntropy_sequences(seqStore_t* seqStorePtr,
120422 +                                              const ZSTD_fseCTables_t* prevEntropy,
120423 +                                                    ZSTD_fseCTables_t* nextEntropy,
120424 +                                              const ZSTD_CCtx_params* cctxParams,
120425 +                                                    ZSTD_fseCTablesMetadata_t* fseMetadata,
120426 +                                                    void* workspace, size_t wkspSize)
120428 +    BYTE* const wkspStart = (BYTE*)workspace;
120429 +    BYTE* const wkspEnd = wkspStart + wkspSize;
120430 +    BYTE* const countWkspStart = wkspStart;
120431 +    unsigned* const countWksp = (unsigned*)workspace;
120432 +    const size_t countWkspSize = (MaxSeq + 1) * sizeof(unsigned);
120433 +    BYTE* const cTableWksp = countWkspStart + countWkspSize;
120434 +    const size_t cTableWkspSize = wkspEnd-cTableWksp;
120435 +    ZSTD_strategy const strategy = cctxParams->cParams.strategy;
120436 +    FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
120437 +    FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
120438 +    FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
120439 +    const BYTE* const ofCodeTable = seqStorePtr->ofCode;
120440 +    const BYTE* const llCodeTable = seqStorePtr->llCode;
120441 +    const BYTE* const mlCodeTable = seqStorePtr->mlCode;
120442 +    size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
120443 +    BYTE* const ostart = fseMetadata->fseTablesBuffer;
120444 +    BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
120445 +    BYTE* op = ostart;
120447 +    assert(cTableWkspSize >= (1 << MaxFSELog) * sizeof(FSE_FUNCTION_TYPE));
120448 +    DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_sequences (nbSeq=%zu)", nbSeq);
120449 +    ZSTD_memset(workspace, 0, wkspSize);
120451 +    fseMetadata->lastCountSize = 0;
120452 +    /* convert length/distances into codes */
120453 +    ZSTD_seqToCodes(seqStorePtr);
120454 +    /* build CTable for Literal Lengths */
120455 +    {   U32 LLtype;
120456 +        unsigned max = MaxLL;
120457 +        size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, llCodeTable, nbSeq, workspace, wkspSize);  /* can't fail */
120458 +        DEBUGLOG(5, "Building LL table");
120459 +        nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
120460 +        LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode,
120461 +                                        countWksp, max, mostFrequent, nbSeq,
120462 +                                        LLFSELog, prevEntropy->litlengthCTable,
120463 +                                        LL_defaultNorm, LL_defaultNormLog,
120464 +                                        ZSTD_defaultAllowed, strategy);
120465 +        assert(set_basic < set_compressed && set_rle < set_compressed);
120466 +        assert(!(LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
120467 +        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
120468 +                                                    countWksp, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
120469 +                                                    prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable),
120470 +                                                    cTableWksp, cTableWkspSize);
120471 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
120472 +            if (LLtype == set_compressed)
120473 +                fseMetadata->lastCountSize = countSize;
120474 +            op += countSize;
120475 +            fseMetadata->llType = (symbolEncodingType_e) LLtype;
120476 +    }   }
120477 +    /* build CTable for Offsets */
120478 +    {   U32 Offtype;
120479 +        unsigned max = MaxOff;
120480 +        size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, ofCodeTable, nbSeq, workspace, wkspSize);  /* can't fail */
120481 +        /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
120482 +        ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
120483 +        DEBUGLOG(5, "Building OF table");
120484 +        nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
120485 +        Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
120486 +                                        countWksp, max, mostFrequent, nbSeq,
120487 +                                        OffFSELog, prevEntropy->offcodeCTable,
120488 +                                        OF_defaultNorm, OF_defaultNormLog,
120489 +                                        defaultPolicy, strategy);
120490 +        assert(!(Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
120491 +        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
120492 +                                                    countWksp, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
120493 +                                                    prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable),
120494 +                                                    cTableWksp, cTableWkspSize);
120495 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
120496 +            if (Offtype == set_compressed)
120497 +                fseMetadata->lastCountSize = countSize;
120498 +            op += countSize;
120499 +            fseMetadata->ofType = (symbolEncodingType_e) Offtype;
120500 +    }   }
120501 +    /* build CTable for MatchLengths */
120502 +    {   U32 MLtype;
120503 +        unsigned max = MaxML;
120504 +        size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, mlCodeTable, nbSeq, workspace, wkspSize);   /* can't fail */
120505 +        DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
120506 +        nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
120507 +        MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode,
120508 +                                        countWksp, max, mostFrequent, nbSeq,
120509 +                                        MLFSELog, prevEntropy->matchlengthCTable,
120510 +                                        ML_defaultNorm, ML_defaultNormLog,
120511 +                                        ZSTD_defaultAllowed, strategy);
120512 +        assert(!(MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
120513 +        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
120514 +                                                    countWksp, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
120515 +                                                    prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable),
120516 +                                                    cTableWksp, cTableWkspSize);
120517 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
120518 +            if (MLtype == set_compressed)
120519 +                fseMetadata->lastCountSize = countSize;
120520 +            op += countSize;
120521 +            fseMetadata->mlType = (symbolEncodingType_e) MLtype;
120522 +    }   }
120523 +    assert((size_t) (op-ostart) <= sizeof(fseMetadata->fseTablesBuffer));
120524 +    return op-ostart;
120528 +/** ZSTD_buildSuperBlockEntropy() :
120529 + *  Builds entropy for the super-block.
120530 + *  @return : 0 on success or error code */
120531 +static size_t
120532 +ZSTD_buildSuperBlockEntropy(seqStore_t* seqStorePtr,
120533 +                      const ZSTD_entropyCTables_t* prevEntropy,
120534 +                            ZSTD_entropyCTables_t* nextEntropy,
120535 +                      const ZSTD_CCtx_params* cctxParams,
120536 +                            ZSTD_entropyCTablesMetadata_t* entropyMetadata,
120537 +                            void* workspace, size_t wkspSize)
120539 +    size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart;
120540 +    DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy");
120541 +    entropyMetadata->hufMetadata.hufDesSize =
120542 +        ZSTD_buildSuperBlockEntropy_literal(seqStorePtr->litStart, litSize,
120543 +                                            &prevEntropy->huf, &nextEntropy->huf,
120544 +                                            &entropyMetadata->hufMetadata,
120545 +                                            ZSTD_disableLiteralsCompression(cctxParams),
120546 +                                            workspace, wkspSize);
120547 +    FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildSuperBlockEntropy_literal failed");
120548 +    entropyMetadata->fseMetadata.fseTablesSize =
120549 +        ZSTD_buildSuperBlockEntropy_sequences(seqStorePtr,
120550 +                                              &prevEntropy->fse, &nextEntropy->fse,
120551 +                                              cctxParams,
120552 +                                              &entropyMetadata->fseMetadata,
120553 +                                              workspace, wkspSize);
120554 +    FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildSuperBlockEntropy_sequences failed");
120555 +    return 0;
120558 +/** ZSTD_compressSubBlock_literal() :
120559 + *  Compresses literals section for a sub-block.
120560 + *  When we have to write the Huffman table we will sometimes choose a header
120561 + *  size larger than necessary. This is because we have to pick the header size
120562 + *  before we know the table size + compressed size, so we have a bound on the
120563 + *  table size. If we guessed incorrectly, we fall back to uncompressed literals.
120565 + *  We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded
120566 + *  in writing the header, otherwise it is set to 0.
120568 + *  hufMetadata->hType has literals block type info.
120569 + *      If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block.
120570 + *      If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block.
120571 + *      If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block
120572 + *      If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block
120573 + *      and the following sub-blocks' literals sections will be Treeless_Literals_Block.
120574 + *  @return : compressed size of literals section of a sub-block
120575 + *            Or 0 if it unable to compress.
120576 + *            Or error code */
120577 +static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
120578 +                                    const ZSTD_hufCTablesMetadata_t* hufMetadata,
120579 +                                    const BYTE* literals, size_t litSize,
120580 +                                    void* dst, size_t dstSize,
120581 +                                    const int bmi2, int writeEntropy, int* entropyWritten)
120583 +    size_t const header = writeEntropy ? 200 : 0;
120584 +    size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header));
120585 +    BYTE* const ostart = (BYTE*)dst;
120586 +    BYTE* const oend = ostart + dstSize;
120587 +    BYTE* op = ostart + lhSize;
120588 +    U32 const singleStream = lhSize == 3;
120589 +    symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
120590 +    size_t cLitSize = 0;
120592 +    (void)bmi2; /* TODO bmi2... */
120594 +    DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
120596 +    *entropyWritten = 0;
120597 +    if (litSize == 0 || hufMetadata->hType == set_basic) {
120598 +      DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal");
120599 +      return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
120600 +    } else if (hufMetadata->hType == set_rle) {
120601 +      DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal");
120602 +      return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize);
120603 +    }
120605 +    assert(litSize > 0);
120606 +    assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat);
120608 +    if (writeEntropy && hufMetadata->hType == set_compressed) {
120609 +        ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize);
120610 +        op += hufMetadata->hufDesSize;
120611 +        cLitSize += hufMetadata->hufDesSize;
120612 +        DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize);
120613 +    }
120615 +    /* TODO bmi2 */
120616 +    {   const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable)
120617 +                                          : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable);
120618 +        op += cSize;
120619 +        cLitSize += cSize;
120620 +        if (cSize == 0 || ERR_isError(cSize)) {
120621 +            DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize));
120622 +            return 0;
120623 +        }
120624 +        /* If we expand and we aren't writing a header then emit uncompressed */
120625 +        if (!writeEntropy && cLitSize >= litSize) {
120626 +            DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible");
120627 +            return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
120628 +        }
120629 +        /* If we are writing headers then allow expansion that doesn't change our header size. */
120630 +        if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) {
120631 +            assert(cLitSize > litSize);
120632 +            DEBUGLOG(5, "Literals expanded beyond allowed header size");
120633 +            return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
120634 +        }
120635 +        DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize);
120636 +    }
120638 +    /* Build header */
120639 +    switch(lhSize)
120640 +    {
120641 +    case 3: /* 2 - 2 - 10 - 10 */
120642 +        {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
120643 +            MEM_writeLE24(ostart, lhc);
120644 +            break;
120645 +        }
120646 +    case 4: /* 2 - 2 - 14 - 14 */
120647 +        {   U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18);
120648 +            MEM_writeLE32(ostart, lhc);
120649 +            break;
120650 +        }
120651 +    case 5: /* 2 - 2 - 18 - 18 */
120652 +        {   U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22);
120653 +            MEM_writeLE32(ostart, lhc);
120654 +            ostart[4] = (BYTE)(cLitSize >> 10);
120655 +            break;
120656 +        }
120657 +    default:  /* not possible : lhSize is {3,4,5} */
120658 +        assert(0);
120659 +    }
120660 +    *entropyWritten = 1;
120661 +    DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart));
120662 +    return op-ostart;
120665 +static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) {
120666 +    const seqDef* const sstart = sequences;
120667 +    const seqDef* const send = sequences + nbSeq;
120668 +    const seqDef* sp = sstart;
120669 +    size_t matchLengthSum = 0;
120670 +    size_t litLengthSum = 0;
120671 +    while (send-sp > 0) {
120672 +        ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
120673 +        litLengthSum += seqLen.litLength;
120674 +        matchLengthSum += seqLen.matchLength;
120675 +        sp++;
120676 +    }
120677 +    assert(litLengthSum <= litSize);
120678 +    if (!lastSequence) {
120679 +        assert(litLengthSum == litSize);
120680 +    }
120681 +    return matchLengthSum + litSize;
120684 +/** ZSTD_compressSubBlock_sequences() :
120685 + *  Compresses sequences section for a sub-block.
120686 + *  fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have
120687 + *  symbol compression modes for the super-block.
120688 + *  The first successfully compressed block will have these in its header.
120689 + *  We set entropyWritten=1 when we succeed in compressing the sequences.
120690 + *  The following sub-blocks will always have repeat mode.
120691 + *  @return : compressed size of sequences section of a sub-block
120692 + *            Or 0 if it is unable to compress
120693 + *            Or error code. */
120694 +static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
120695 +                                              const ZSTD_fseCTablesMetadata_t* fseMetadata,
120696 +                                              const seqDef* sequences, size_t nbSeq,
120697 +                                              const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
120698 +                                              const ZSTD_CCtx_params* cctxParams,
120699 +                                              void* dst, size_t dstCapacity,
120700 +                                              const int bmi2, int writeEntropy, int* entropyWritten)
120702 +    const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
120703 +    BYTE* const ostart = (BYTE*)dst;
120704 +    BYTE* const oend = ostart + dstCapacity;
120705 +    BYTE* op = ostart;
120706 +    BYTE* seqHead;
120708 +    DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets);
120710 +    *entropyWritten = 0;
120711 +    /* Sequences Header */
120712 +    RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
120713 +                    dstSize_tooSmall, "");
120714 +    if (nbSeq < 0x7F)
120715 +        *op++ = (BYTE)nbSeq;
120716 +    else if (nbSeq < LONGNBSEQ)
120717 +        op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
120718 +    else
120719 +        op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
120720 +    if (nbSeq==0) {
120721 +        return op - ostart;
120722 +    }
120724 +    /* seqHead : flags for FSE encoding type */
120725 +    seqHead = op++;
120727 +    DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart));
120729 +    if (writeEntropy) {
120730 +        const U32 LLtype = fseMetadata->llType;
120731 +        const U32 Offtype = fseMetadata->ofType;
120732 +        const U32 MLtype = fseMetadata->mlType;
120733 +        DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize);
120734 +        *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
120735 +        ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize);
120736 +        op += fseMetadata->fseTablesSize;
120737 +    } else {
120738 +        const U32 repeat = set_repeat;
120739 +        *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2));
120740 +    }
120742 +    {   size_t const bitstreamSize = ZSTD_encodeSequences(
120743 +                                        op, oend - op,
120744 +                                        fseTables->matchlengthCTable, mlCode,
120745 +                                        fseTables->offcodeCTable, ofCode,
120746 +                                        fseTables->litlengthCTable, llCode,
120747 +                                        sequences, nbSeq,
120748 +                                        longOffsets, bmi2);
120749 +        FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
120750 +        op += bitstreamSize;
120751 +        /* zstd versions <= 1.3.4 mistakenly report corruption when
120752 +         * FSE_readNCount() receives a buffer < 4 bytes.
120753 +         * Fixed by https://github.com/facebook/zstd/pull/1146.
120754 +         * This can happen when the last set_compressed table present is 2
120755 +         * bytes and the bitstream is only one byte.
120756 +         * In this exceedingly rare case, we will simply emit an uncompressed
120757 +         * block, since it isn't worth optimizing.
120758 +         */
120759 +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
120760 +        if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) {
120761 +            /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
120762 +            assert(fseMetadata->lastCountSize + bitstreamSize == 3);
120763 +            DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
120764 +                        "emitting an uncompressed block.");
120765 +            return 0;
120766 +        }
120767 +#endif
120768 +        DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize);
120769 +    }
120771 +    /* zstd versions <= 1.4.0 mistakenly report error when
120772 +     * sequences section body size is less than 3 bytes.
120773 +     * Fixed by https://github.com/facebook/zstd/pull/1664.
120774 +     * This can happen when the previous sequences section block is compressed
120775 +     * with rle mode and the current block's sequences section is compressed
120776 +     * with repeat mode where sequences section body size can be 1 byte.
120777 +     */
120778 +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
120779 +    if (op-seqHead < 4) {
120780 +        DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting "
120781 +                    "an uncompressed block when sequences are < 4 bytes");
120782 +        return 0;
120783 +    }
120784 +#endif
120786 +    *entropyWritten = 1;
120787 +    return op - ostart;
120790 +/** ZSTD_compressSubBlock() :
120791 + *  Compresses a single sub-block.
120792 + *  @return : compressed size of the sub-block
120793 + *            Or 0 if it failed to compress. */
120794 +static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
120795 +                                    const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
120796 +                                    const seqDef* sequences, size_t nbSeq,
120797 +                                    const BYTE* literals, size_t litSize,
120798 +                                    const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
120799 +                                    const ZSTD_CCtx_params* cctxParams,
120800 +                                    void* dst, size_t dstCapacity,
120801 +                                    const int bmi2,
120802 +                                    int writeLitEntropy, int writeSeqEntropy,
120803 +                                    int* litEntropyWritten, int* seqEntropyWritten,
120804 +                                    U32 lastBlock)
120806 +    BYTE* const ostart = (BYTE*)dst;
120807 +    BYTE* const oend = ostart + dstCapacity;
120808 +    BYTE* op = ostart + ZSTD_blockHeaderSize;
120809 +    DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)",
120810 +                litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock);
120811 +    {   size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable,
120812 +                                                        &entropyMetadata->hufMetadata, literals, litSize,
120813 +                                                        op, oend-op, bmi2, writeLitEntropy, litEntropyWritten);
120814 +        FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed");
120815 +        if (cLitSize == 0) return 0;
120816 +        op += cLitSize;
120817 +    }
120818 +    {   size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse,
120819 +                                                  &entropyMetadata->fseMetadata,
120820 +                                                  sequences, nbSeq,
120821 +                                                  llCode, mlCode, ofCode,
120822 +                                                  cctxParams,
120823 +                                                  op, oend-op,
120824 +                                                  bmi2, writeSeqEntropy, seqEntropyWritten);
120825 +        FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed");
120826 +        if (cSeqSize == 0) return 0;
120827 +        op += cSeqSize;
120828 +    }
120829 +    /* Write block header */
120830 +    {   size_t cSize = (op-ostart)-ZSTD_blockHeaderSize;
120831 +        U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
120832 +        MEM_writeLE24(ostart, cBlockHeader24);
120833 +    }
120834 +    return op-ostart;
120837 +static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize,
120838 +                                                const ZSTD_hufCTables_t* huf,
120839 +                                                const ZSTD_hufCTablesMetadata_t* hufMetadata,
120840 +                                                void* workspace, size_t wkspSize,
120841 +                                                int writeEntropy)
120843 +    unsigned* const countWksp = (unsigned*)workspace;
120844 +    unsigned maxSymbolValue = 255;
120845 +    size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
120847 +    if (hufMetadata->hType == set_basic) return litSize;
120848 +    else if (hufMetadata->hType == set_rle) return 1;
120849 +    else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
120850 +        size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
120851 +        if (ZSTD_isError(largest)) return litSize;
120852 +        {   size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
120853 +            if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
120854 +            return cLitSizeEstimate + literalSectionHeaderSize;
120855 +    }   }
120856 +    assert(0); /* impossible */
120857 +    return 0;
120860 +static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
120861 +                        const BYTE* codeTable, unsigned maxCode,
120862 +                        size_t nbSeq, const FSE_CTable* fseCTable,
120863 +                        const U32* additionalBits,
120864 +                        short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
120865 +                        void* workspace, size_t wkspSize)
120867 +    unsigned* const countWksp = (unsigned*)workspace;
120868 +    const BYTE* ctp = codeTable;
120869 +    const BYTE* const ctStart = ctp;
120870 +    const BYTE* const ctEnd = ctStart + nbSeq;
120871 +    size_t cSymbolTypeSizeEstimateInBits = 0;
120872 +    unsigned max = maxCode;
120874 +    HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize);  /* can't fail */
120875 +    if (type == set_basic) {
120876 +        /* We selected this encoding type, so it must be valid. */
120877 +        assert(max <= defaultMax);
120878 +        cSymbolTypeSizeEstimateInBits = max <= defaultMax
120879 +                ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max)
120880 +                : ERROR(GENERIC);
120881 +    } else if (type == set_rle) {
120882 +        cSymbolTypeSizeEstimateInBits = 0;
120883 +    } else if (type == set_compressed || type == set_repeat) {
120884 +        cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
120885 +    }
120886 +    if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10;
120887 +    while (ctp < ctEnd) {
120888 +        if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
120889 +        else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
120890 +        ctp++;
120891 +    }
120892 +    return cSymbolTypeSizeEstimateInBits / 8;
120895 +static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable,
120896 +                                                  const BYTE* llCodeTable,
120897 +                                                  const BYTE* mlCodeTable,
120898 +                                                  size_t nbSeq,
120899 +                                                  const ZSTD_fseCTables_t* fseTables,
120900 +                                                  const ZSTD_fseCTablesMetadata_t* fseMetadata,
120901 +                                                  void* workspace, size_t wkspSize,
120902 +                                                  int writeEntropy)
120904 +    size_t sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
120905 +    size_t cSeqSizeEstimate = 0;
120906 +    cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff,
120907 +                                         nbSeq, fseTables->offcodeCTable, NULL,
120908 +                                         OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
120909 +                                         workspace, wkspSize);
120910 +    cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL,
120911 +                                         nbSeq, fseTables->litlengthCTable, LL_bits,
120912 +                                         LL_defaultNorm, LL_defaultNormLog, MaxLL,
120913 +                                         workspace, wkspSize);
120914 +    cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML,
120915 +                                         nbSeq, fseTables->matchlengthCTable, ML_bits,
120916 +                                         ML_defaultNorm, ML_defaultNormLog, MaxML,
120917 +                                         workspace, wkspSize);
120918 +    if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
120919 +    return cSeqSizeEstimate + sequencesSectionHeaderSize;
120922 +static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
120923 +                                        const BYTE* ofCodeTable,
120924 +                                        const BYTE* llCodeTable,
120925 +                                        const BYTE* mlCodeTable,
120926 +                                        size_t nbSeq,
120927 +                                        const ZSTD_entropyCTables_t* entropy,
120928 +                                        const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
120929 +                                        void* workspace, size_t wkspSize,
120930 +                                        int writeLitEntropy, int writeSeqEntropy) {
120931 +    size_t cSizeEstimate = 0;
120932 +    cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize,
120933 +                                                         &entropy->huf, &entropyMetadata->hufMetadata,
120934 +                                                         workspace, wkspSize, writeLitEntropy);
120935 +    cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
120936 +                                                         nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
120937 +                                                         workspace, wkspSize, writeSeqEntropy);
120938 +    return cSizeEstimate + ZSTD_blockHeaderSize;
120941 +static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata)
120943 +    if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle)
120944 +        return 1;
120945 +    if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle)
120946 +        return 1;
120947 +    if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle)
120948 +        return 1;
120949 +    return 0;
120952 +/** ZSTD_compressSubBlock_multi() :
120953 + *  Breaks super-block into multiple sub-blocks and compresses them.
120954 + *  Entropy will be written to the first block.
120955 + *  The following blocks will use repeat mode to compress.
120956 + *  All sub-blocks are compressed blocks (no raw or rle blocks).
120957 + *  @return : compressed size of the super block (which is multiple ZSTD blocks)
120958 + *            Or 0 if it failed to compress. */
120959 +static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
120960 +                            const ZSTD_compressedBlockState_t* prevCBlock,
120961 +                            ZSTD_compressedBlockState_t* nextCBlock,
120962 +                            const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
120963 +                            const ZSTD_CCtx_params* cctxParams,
120964 +                                  void* dst, size_t dstCapacity,
120965 +                            const void* src, size_t srcSize,
120966 +                            const int bmi2, U32 lastBlock,
120967 +                            void* workspace, size_t wkspSize)
120969 +    const seqDef* const sstart = seqStorePtr->sequencesStart;
120970 +    const seqDef* const send = seqStorePtr->sequences;
120971 +    const seqDef* sp = sstart;
120972 +    const BYTE* const lstart = seqStorePtr->litStart;
120973 +    const BYTE* const lend = seqStorePtr->lit;
120974 +    const BYTE* lp = lstart;
120975 +    BYTE const* ip = (BYTE const*)src;
120976 +    BYTE const* const iend = ip + srcSize;
120977 +    BYTE* const ostart = (BYTE*)dst;
120978 +    BYTE* const oend = ostart + dstCapacity;
120979 +    BYTE* op = ostart;
120980 +    const BYTE* llCodePtr = seqStorePtr->llCode;
120981 +    const BYTE* mlCodePtr = seqStorePtr->mlCode;
120982 +    const BYTE* ofCodePtr = seqStorePtr->ofCode;
120983 +    size_t targetCBlockSize = cctxParams->targetCBlockSize;
120984 +    size_t litSize, seqCount;
120985 +    int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed;
120986 +    int writeSeqEntropy = 1;
120987 +    int lastSequence = 0;
120989 +    DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)",
120990 +                (unsigned)(lend-lp), (unsigned)(send-sstart));
120992 +    litSize = 0;
120993 +    seqCount = 0;
120994 +    do {
120995 +        size_t cBlockSizeEstimate = 0;
120996 +        if (sstart == send) {
120997 +            lastSequence = 1;
120998 +        } else {
120999 +            const seqDef* const sequence = sp + seqCount;
121000 +            lastSequence = sequence == send - 1;
121001 +            litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength;
121002 +            seqCount++;
121003 +        }
121004 +        if (lastSequence) {
121005 +            assert(lp <= lend);
121006 +            assert(litSize <= (size_t)(lend - lp));
121007 +            litSize = (size_t)(lend - lp);
121008 +        }
121009 +        /* I think there is an optimization opportunity here.
121010 +         * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful
121011 +         * since it recalculates estimate from scratch.
121012 +         * For example, it would recount literal distribution and symbol codes everytime.
121013 +         */
121014 +        cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,
121015 +                                                       &nextCBlock->entropy, entropyMetadata,
121016 +                                                       workspace, wkspSize, writeLitEntropy, writeSeqEntropy);
121017 +        if (cBlockSizeEstimate > targetCBlockSize || lastSequence) {
121018 +            int litEntropyWritten = 0;
121019 +            int seqEntropyWritten = 0;
121020 +            const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence);
121021 +            const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
121022 +                                                       sp, seqCount,
121023 +                                                       lp, litSize,
121024 +                                                       llCodePtr, mlCodePtr, ofCodePtr,
121025 +                                                       cctxParams,
121026 +                                                       op, oend-op,
121027 +                                                       bmi2, writeLitEntropy, writeSeqEntropy,
121028 +                                                       &litEntropyWritten, &seqEntropyWritten,
121029 +                                                       lastBlock && lastSequence);
121030 +            FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
121031 +            if (cSize > 0 && cSize < decompressedSize) {
121032 +                DEBUGLOG(5, "Committed the sub-block");
121033 +                assert(ip + decompressedSize <= iend);
121034 +                ip += decompressedSize;
121035 +                sp += seqCount;
121036 +                lp += litSize;
121037 +                op += cSize;
121038 +                llCodePtr += seqCount;
121039 +                mlCodePtr += seqCount;
121040 +                ofCodePtr += seqCount;
121041 +                litSize = 0;
121042 +                seqCount = 0;
121043 +                /* Entropy only needs to be written once */
121044 +                if (litEntropyWritten) {
121045 +                    writeLitEntropy = 0;
121046 +                }
121047 +                if (seqEntropyWritten) {
121048 +                    writeSeqEntropy = 0;
121049 +                }
121050 +            }
121051 +        }
121052 +    } while (!lastSequence);
121053 +    if (writeLitEntropy) {
121054 +        DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten");
121055 +        ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
121056 +    }
121057 +    if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) {
121058 +        /* If we haven't written our entropy tables, then we've violated our contract and
121059 +         * must emit an uncompressed block.
121060 +         */
121061 +        DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten");
121062 +        return 0;
121063 +    }
121064 +    if (ip < iend) {
121065 +        size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock);
121066 +        DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip));
121067 +        FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
121068 +        assert(cSize != 0);
121069 +        op += cSize;
121070 +        /* We have to regenerate the repcodes because we've skipped some sequences */
121071 +        if (sp < send) {
121072 +            seqDef const* seq;
121073 +            repcodes_t rep;
121074 +            ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
121075 +            for (seq = sstart; seq < sp; ++seq) {
121076 +                rep = ZSTD_updateRep(rep.rep, seq->offset - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
121077 +            }
121078 +            ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
121079 +        }
121080 +    }
121081 +    DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed");
121082 +    return op-ostart;
121085 +size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
121086 +                               void* dst, size_t dstCapacity,
121087 +                               void const* src, size_t srcSize,
121088 +                               unsigned lastBlock) {
121089 +    ZSTD_entropyCTablesMetadata_t entropyMetadata;
121091 +    FORWARD_IF_ERROR(ZSTD_buildSuperBlockEntropy(&zc->seqStore,
121092 +          &zc->blockState.prevCBlock->entropy,
121093 +          &zc->blockState.nextCBlock->entropy,
121094 +          &zc->appliedParams,
121095 +          &entropyMetadata,
121096 +          zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
121098 +    return ZSTD_compressSubBlock_multi(&zc->seqStore,
121099 +            zc->blockState.prevCBlock,
121100 +            zc->blockState.nextCBlock,
121101 +            &entropyMetadata,
121102 +            &zc->appliedParams,
121103 +            dst, dstCapacity,
121104 +            src, srcSize,
121105 +            zc->bmi2, lastBlock,
121106 +            zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
121108 diff --git a/lib/zstd/compress/zstd_compress_superblock.h b/lib/zstd/compress/zstd_compress_superblock.h
121109 new file mode 100644
121110 index 000000000000..224ece79546e
121111 --- /dev/null
121112 +++ b/lib/zstd/compress/zstd_compress_superblock.h
121113 @@ -0,0 +1,32 @@
121115 + * Copyright (c) Yann Collet, Facebook, Inc.
121116 + * All rights reserved.
121118 + * This source code is licensed under both the BSD-style license (found in the
121119 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
121120 + * in the COPYING file in the root directory of this source tree).
121121 + * You may select, at your option, one of the above-listed licenses.
121122 + */
121124 +#ifndef ZSTD_COMPRESS_ADVANCED_H
121125 +#define ZSTD_COMPRESS_ADVANCED_H
121127 +/*-*************************************
121128 +*  Dependencies
121129 +***************************************/
121131 +#include <linux/zstd.h> /* ZSTD_CCtx */
121133 +/*-*************************************
121134 +*  Target Compressed Block Size
121135 +***************************************/
121137 +/* ZSTD_compressSuperBlock() :
121138 + * Used to compress a super block when targetCBlockSize is being used.
121139 + * The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */
121140 +size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
121141 +                               void* dst, size_t dstCapacity,
121142 +                               void const* src, size_t srcSize,
121143 +                               unsigned lastBlock);
121145 +#endif /* ZSTD_COMPRESS_ADVANCED_H */
121146 diff --git a/lib/zstd/compress/zstd_cwksp.h b/lib/zstd/compress/zstd_cwksp.h
121147 new file mode 100644
121148 index 000000000000..c231cc500ef5
121149 --- /dev/null
121150 +++ b/lib/zstd/compress/zstd_cwksp.h
121151 @@ -0,0 +1,482 @@
121153 + * Copyright (c) Yann Collet, Facebook, Inc.
121154 + * All rights reserved.
121156 + * This source code is licensed under both the BSD-style license (found in the
121157 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
121158 + * in the COPYING file in the root directory of this source tree).
121159 + * You may select, at your option, one of the above-listed licenses.
121160 + */
121162 +#ifndef ZSTD_CWKSP_H
121163 +#define ZSTD_CWKSP_H
121165 +/*-*************************************
121166 +*  Dependencies
121167 +***************************************/
121168 +#include "../common/zstd_internal.h"
121171 +/*-*************************************
121172 +*  Constants
121173 +***************************************/
121175 +/* Since the workspace is effectively its own little malloc implementation /
121176 + * arena, when we run under ASAN, we should similarly insert redzones between
121177 + * each internal element of the workspace, so ASAN will catch overruns that
121178 + * reach outside an object but that stay inside the workspace.
121180 + * This defines the size of that redzone.
121181 + */
121182 +#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
121183 +#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
121184 +#endif
121186 +/*-*************************************
121187 +*  Structures
121188 +***************************************/
121189 +typedef enum {
121190 +    ZSTD_cwksp_alloc_objects,
121191 +    ZSTD_cwksp_alloc_buffers,
121192 +    ZSTD_cwksp_alloc_aligned
121193 +} ZSTD_cwksp_alloc_phase_e;
121196 + * Used to describe whether the workspace is statically allocated (and will not
121197 + * necessarily ever be freed), or if it's dynamically allocated and we can
121198 + * expect a well-formed caller to free this.
121199 + */
121200 +typedef enum {
121201 +    ZSTD_cwksp_dynamic_alloc,
121202 +    ZSTD_cwksp_static_alloc
121203 +} ZSTD_cwksp_static_alloc_e;
121206 + * Zstd fits all its internal datastructures into a single continuous buffer,
121207 + * so that it only needs to perform a single OS allocation (or so that a buffer
121208 + * can be provided to it and it can perform no allocations at all). This buffer
121209 + * is called the workspace.
121211 + * Several optimizations complicate that process of allocating memory ranges
121212 + * from this workspace for each internal datastructure:
121214 + * - These different internal datastructures have different setup requirements:
121216 + *   - The static objects need to be cleared once and can then be trivially
121217 + *     reused for each compression.
121219 + *   - Various buffers don't need to be initialized at all--they are always
121220 + *     written into before they're read.
121222 + *   - The matchstate tables have a unique requirement that they don't need
121223 + *     their memory to be totally cleared, but they do need the memory to have
121224 + *     some bound, i.e., a guarantee that all values in the memory they've been
121225 + *     allocated is less than some maximum value (which is the starting value
121226 + *     for the indices that they will then use for compression). When this
121227 + *     guarantee is provided to them, they can use the memory without any setup
121228 + *     work. When it can't, they have to clear the area.
121230 + * - These buffers also have different alignment requirements.
121232 + * - We would like to reuse the objects in the workspace for multiple
121233 + *   compressions without having to perform any expensive reallocation or
121234 + *   reinitialization work.
121236 + * - We would like to be able to efficiently reuse the workspace across
121237 + *   multiple compressions **even when the compression parameters change** and
121238 + *   we need to resize some of the objects (where possible).
121240 + * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
121241 + * abstraction was created. It works as follows:
121243 + * Workspace Layout:
121245 + * [                        ... workspace ...                         ]
121246 + * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
121248 + * The various objects that live in the workspace are divided into the
121249 + * following categories, and are allocated separately:
121251 + * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
121252 + *   so that literally everything fits in a single buffer. Note: if present,
121253 + *   this must be the first object in the workspace, since ZSTD_customFree{CCtx,
121254 + *   CDict}() rely on a pointer comparison to see whether one or two frees are
121255 + *   required.
121257 + * - Fixed size objects: these are fixed-size, fixed-count objects that are
121258 + *   nonetheless "dynamically" allocated in the workspace so that we can
121259 + *   control how they're initialized separately from the broader ZSTD_CCtx.
121260 + *   Examples:
121261 + *   - Entropy Workspace
121262 + *   - 2 x ZSTD_compressedBlockState_t
121263 + *   - CDict dictionary contents
121265 + * - Tables: these are any of several different datastructures (hash tables,
121266 + *   chain tables, binary trees) that all respect a common format: they are
121267 + *   uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
121268 + *   Their sizes depend on the cparams.
121270 + * - Aligned: these buffers are used for various purposes that require 4 byte
121271 + *   alignment, but don't require any initialization before they're used.
121273 + * - Buffers: these buffers are used for various purposes that don't require
121274 + *   any alignment or initialization before they're used. This means they can
121275 + *   be moved around at no cost for a new compression.
121277 + * Allocating Memory:
121279 + * The various types of objects must be allocated in order, so they can be
121280 + * correctly packed into the workspace buffer. That order is:
121282 + * 1. Objects
121283 + * 2. Buffers
121284 + * 3. Aligned
121285 + * 4. Tables
121287 + * Attempts to reserve objects of different types out of order will fail.
121288 + */
121289 +typedef struct {
121290 +    void* workspace;
121291 +    void* workspaceEnd;
121293 +    void* objectEnd;
121294 +    void* tableEnd;
121295 +    void* tableValidEnd;
121296 +    void* allocStart;
121298 +    BYTE allocFailed;
121299 +    int workspaceOversizedDuration;
121300 +    ZSTD_cwksp_alloc_phase_e phase;
121301 +    ZSTD_cwksp_static_alloc_e isStatic;
121302 +} ZSTD_cwksp;
121304 +/*-*************************************
121305 +*  Functions
121306 +***************************************/
121308 +MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
121310 +MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
121311 +    (void)ws;
121312 +    assert(ws->workspace <= ws->objectEnd);
121313 +    assert(ws->objectEnd <= ws->tableEnd);
121314 +    assert(ws->objectEnd <= ws->tableValidEnd);
121315 +    assert(ws->tableEnd <= ws->allocStart);
121316 +    assert(ws->tableValidEnd <= ws->allocStart);
121317 +    assert(ws->allocStart <= ws->workspaceEnd);
121321 + * Align must be a power of 2.
121322 + */
121323 +MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
121324 +    size_t const mask = align - 1;
121325 +    assert((align & mask) == 0);
121326 +    return (size + mask) & ~mask;
121330 + * Use this to determine how much space in the workspace we will consume to
121331 + * allocate this object. (Normally it should be exactly the size of the object,
121332 + * but under special conditions, like ASAN, where we pad each object, it might
121333 + * be larger.)
121335 + * Since tables aren't currently redzoned, you don't need to call through this
121336 + * to figure out how much space you need for the matchState tables. Everything
121337 + * else is though.
121338 + */
121339 +MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
121340 +    if (size == 0)
121341 +        return 0;
121342 +    return size;
121345 +MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
121346 +        ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
121347 +    assert(phase >= ws->phase);
121348 +    if (phase > ws->phase) {
121349 +        if (ws->phase < ZSTD_cwksp_alloc_buffers &&
121350 +                phase >= ZSTD_cwksp_alloc_buffers) {
121351 +            ws->tableValidEnd = ws->objectEnd;
121352 +        }
121353 +        if (ws->phase < ZSTD_cwksp_alloc_aligned &&
121354 +                phase >= ZSTD_cwksp_alloc_aligned) {
121355 +            /* If unaligned allocations down from a too-large top have left us
121356 +             * unaligned, we need to realign our alloc ptr. Technically, this
121357 +             * can consume space that is unaccounted for in the neededSpace
121358 +             * calculation. However, I believe this can only happen when the
121359 +             * workspace is too large, and specifically when it is too large
121360 +             * by a larger margin than the space that will be consumed. */
121361 +            /* TODO: cleaner, compiler warning friendly way to do this??? */
121362 +            ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
121363 +            if (ws->allocStart < ws->tableValidEnd) {
121364 +                ws->tableValidEnd = ws->allocStart;
121365 +            }
121366 +        }
121367 +        ws->phase = phase;
121368 +    }
121372 + * Returns whether this object/buffer/etc was allocated in this workspace.
121373 + */
121374 +MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
121375 +    return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
121379 + * Internal function. Do not use directly.
121380 + */
121381 +MEM_STATIC void* ZSTD_cwksp_reserve_internal(
121382 +        ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
121383 +    void* alloc;
121384 +    void* bottom = ws->tableEnd;
121385 +    ZSTD_cwksp_internal_advance_phase(ws, phase);
121386 +    alloc = (BYTE *)ws->allocStart - bytes;
121388 +    if (bytes == 0)
121389 +        return NULL;
121392 +    DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
121393 +        alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
121394 +    ZSTD_cwksp_assert_internal_consistency(ws);
121395 +    assert(alloc >= bottom);
121396 +    if (alloc < bottom) {
121397 +        DEBUGLOG(4, "cwksp: alloc failed!");
121398 +        ws->allocFailed = 1;
121399 +        return NULL;
121400 +    }
121401 +    if (alloc < ws->tableValidEnd) {
121402 +        ws->tableValidEnd = alloc;
121403 +    }
121404 +    ws->allocStart = alloc;
121407 +    return alloc;
121411 + * Reserves and returns unaligned memory.
121412 + */
121413 +MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
121414 +    return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
121418 + * Reserves and returns memory sized on and aligned on sizeof(unsigned).
121419 + */
121420 +MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
121421 +    assert((bytes & (sizeof(U32)-1)) == 0);
121422 +    return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
121426 + * Aligned on sizeof(unsigned). These buffers have the special property that
121427 + * their values remain constrained, allowing us to re-use them without
121428 + * memset()-ing them.
121429 + */
121430 +MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
121431 +    const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
121432 +    void* alloc = ws->tableEnd;
121433 +    void* end = (BYTE *)alloc + bytes;
121434 +    void* top = ws->allocStart;
121436 +    DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
121437 +        alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
121438 +    assert((bytes & (sizeof(U32)-1)) == 0);
121439 +    ZSTD_cwksp_internal_advance_phase(ws, phase);
121440 +    ZSTD_cwksp_assert_internal_consistency(ws);
121441 +    assert(end <= top);
121442 +    if (end > top) {
121443 +        DEBUGLOG(4, "cwksp: table alloc failed!");
121444 +        ws->allocFailed = 1;
121445 +        return NULL;
121446 +    }
121447 +    ws->tableEnd = end;
121450 +    return alloc;
121454 + * Aligned on sizeof(void*).
121455 + */
121456 +MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
121457 +    size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
121458 +    void* alloc = ws->objectEnd;
121459 +    void* end = (BYTE*)alloc + roundedBytes;
121462 +    DEBUGLOG(5,
121463 +        "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
121464 +        alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
121465 +    assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
121466 +    assert((bytes & (sizeof(void*)-1)) == 0);
121467 +    ZSTD_cwksp_assert_internal_consistency(ws);
121468 +    /* we must be in the first phase, no advance is possible */
121469 +    if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
121470 +        DEBUGLOG(4, "cwksp: object alloc failed!");
121471 +        ws->allocFailed = 1;
121472 +        return NULL;
121473 +    }
121474 +    ws->objectEnd = end;
121475 +    ws->tableEnd = end;
121476 +    ws->tableValidEnd = end;
121479 +    return alloc;
121482 +MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
121483 +    DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
121486 +    assert(ws->tableValidEnd >= ws->objectEnd);
121487 +    assert(ws->tableValidEnd <= ws->allocStart);
121488 +    ws->tableValidEnd = ws->objectEnd;
121489 +    ZSTD_cwksp_assert_internal_consistency(ws);
121492 +MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
121493 +    DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
121494 +    assert(ws->tableValidEnd >= ws->objectEnd);
121495 +    assert(ws->tableValidEnd <= ws->allocStart);
121496 +    if (ws->tableValidEnd < ws->tableEnd) {
121497 +        ws->tableValidEnd = ws->tableEnd;
121498 +    }
121499 +    ZSTD_cwksp_assert_internal_consistency(ws);
121503 + * Zero the part of the allocated tables not already marked clean.
121504 + */
121505 +MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
121506 +    DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
121507 +    assert(ws->tableValidEnd >= ws->objectEnd);
121508 +    assert(ws->tableValidEnd <= ws->allocStart);
121509 +    if (ws->tableValidEnd < ws->tableEnd) {
121510 +        ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
121511 +    }
121512 +    ZSTD_cwksp_mark_tables_clean(ws);
121516 + * Invalidates table allocations.
121517 + * All other allocations remain valid.
121518 + */
121519 +MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
121520 +    DEBUGLOG(4, "cwksp: clearing tables!");
121523 +    ws->tableEnd = ws->objectEnd;
121524 +    ZSTD_cwksp_assert_internal_consistency(ws);
121528 + * Invalidates all buffer, aligned, and table allocations.
121529 + * Object allocations remain valid.
121530 + */
121531 +MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
121532 +    DEBUGLOG(4, "cwksp: clearing!");
121536 +    ws->tableEnd = ws->objectEnd;
121537 +    ws->allocStart = ws->workspaceEnd;
121538 +    ws->allocFailed = 0;
121539 +    if (ws->phase > ZSTD_cwksp_alloc_buffers) {
121540 +        ws->phase = ZSTD_cwksp_alloc_buffers;
121541 +    }
121542 +    ZSTD_cwksp_assert_internal_consistency(ws);
121546 + * The provided workspace takes ownership of the buffer [start, start+size).
121547 + * Any existing values in the workspace are ignored (the previously managed
121548 + * buffer, if present, must be separately freed).
121549 + */
121550 +MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
121551 +    DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
121552 +    assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
121553 +    ws->workspace = start;
121554 +    ws->workspaceEnd = (BYTE*)start + size;
121555 +    ws->objectEnd = ws->workspace;
121556 +    ws->tableValidEnd = ws->objectEnd;
121557 +    ws->phase = ZSTD_cwksp_alloc_objects;
121558 +    ws->isStatic = isStatic;
121559 +    ZSTD_cwksp_clear(ws);
121560 +    ws->workspaceOversizedDuration = 0;
121561 +    ZSTD_cwksp_assert_internal_consistency(ws);
121564 +MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
121565 +    void* workspace = ZSTD_customMalloc(size, customMem);
121566 +    DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
121567 +    RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
121568 +    ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
121569 +    return 0;
121572 +MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
121573 +    void *ptr = ws->workspace;
121574 +    DEBUGLOG(4, "cwksp: freeing workspace");
121575 +    ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
121576 +    ZSTD_customFree(ptr, customMem);
121580 + * Moves the management of a workspace from one cwksp to another. The src cwksp
121581 + * is left in an invalid state (src must be re-init()'ed before it's used again).
121582 + */
121583 +MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
121584 +    *dst = *src;
121585 +    ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
121588 +MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
121589 +    return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
121592 +MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
121593 +    return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
121594 +         + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
121597 +MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
121598 +    return ws->allocFailed;
121601 +/*-*************************************
121602 +*  Functions Checking Free Space
121603 +***************************************/
121605 +MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
121606 +    return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
121609 +MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
121610 +    return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
121613 +MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
121614 +    return ZSTD_cwksp_check_available(
121615 +        ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
121618 +MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
121619 +    return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
121620 +        && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
121623 +MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
121624 +        ZSTD_cwksp* ws, size_t additionalNeededSpace) {
121625 +    if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
121626 +        ws->workspaceOversizedDuration++;
121627 +    } else {
121628 +        ws->workspaceOversizedDuration = 0;
121629 +    }
121633 +#endif /* ZSTD_CWKSP_H */
121634 diff --git a/lib/zstd/compress/zstd_double_fast.c b/lib/zstd/compress/zstd_double_fast.c
121635 new file mode 100644
121636 index 000000000000..b99172e9d2e4
121637 --- /dev/null
121638 +++ b/lib/zstd/compress/zstd_double_fast.c
121639 @@ -0,0 +1,521 @@
121641 + * Copyright (c) Yann Collet, Facebook, Inc.
121642 + * All rights reserved.
121644 + * This source code is licensed under both the BSD-style license (found in the
121645 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
121646 + * in the COPYING file in the root directory of this source tree).
121647 + * You may select, at your option, one of the above-listed licenses.
121648 + */
121650 +#include "zstd_compress_internal.h"
121651 +#include "zstd_double_fast.h"
121654 +void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
121655 +                              void const* end, ZSTD_dictTableLoadMethod_e dtlm)
121657 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
121658 +    U32* const hashLarge = ms->hashTable;
121659 +    U32  const hBitsL = cParams->hashLog;
121660 +    U32  const mls = cParams->minMatch;
121661 +    U32* const hashSmall = ms->chainTable;
121662 +    U32  const hBitsS = cParams->chainLog;
121663 +    const BYTE* const base = ms->window.base;
121664 +    const BYTE* ip = base + ms->nextToUpdate;
121665 +    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
121666 +    const U32 fastHashFillStep = 3;
121668 +    /* Always insert every fastHashFillStep position into the hash tables.
121669 +     * Insert the other positions into the large hash table if their entry
121670 +     * is empty.
121671 +     */
121672 +    for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
121673 +        U32 const curr = (U32)(ip - base);
121674 +        U32 i;
121675 +        for (i = 0; i < fastHashFillStep; ++i) {
121676 +            size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
121677 +            size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
121678 +            if (i == 0)
121679 +                hashSmall[smHash] = curr + i;
121680 +            if (i == 0 || hashLarge[lgHash] == 0)
121681 +                hashLarge[lgHash] = curr + i;
121682 +            /* Only load extra positions for ZSTD_dtlm_full */
121683 +            if (dtlm == ZSTD_dtlm_fast)
121684 +                break;
121685 +    }   }
121689 +FORCE_INLINE_TEMPLATE
121690 +size_t ZSTD_compressBlock_doubleFast_generic(
121691 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
121692 +        void const* src, size_t srcSize,
121693 +        U32 const mls /* template */, ZSTD_dictMode_e const dictMode)
121695 +    ZSTD_compressionParameters const* cParams = &ms->cParams;
121696 +    U32* const hashLong = ms->hashTable;
121697 +    const U32 hBitsL = cParams->hashLog;
121698 +    U32* const hashSmall = ms->chainTable;
121699 +    const U32 hBitsS = cParams->chainLog;
121700 +    const BYTE* const base = ms->window.base;
121701 +    const BYTE* const istart = (const BYTE*)src;
121702 +    const BYTE* ip = istart;
121703 +    const BYTE* anchor = istart;
121704 +    const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
121705 +    /* presumes that, if there is a dictionary, it must be using Attach mode */
121706 +    const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
121707 +    const BYTE* const prefixLowest = base + prefixLowestIndex;
121708 +    const BYTE* const iend = istart + srcSize;
121709 +    const BYTE* const ilimit = iend - HASH_READ_SIZE;
121710 +    U32 offset_1=rep[0], offset_2=rep[1];
121711 +    U32 offsetSaved = 0;
121713 +    const ZSTD_matchState_t* const dms = ms->dictMatchState;
121714 +    const ZSTD_compressionParameters* const dictCParams =
121715 +                                     dictMode == ZSTD_dictMatchState ?
121716 +                                     &dms->cParams : NULL;
121717 +    const U32* const dictHashLong  = dictMode == ZSTD_dictMatchState ?
121718 +                                     dms->hashTable : NULL;
121719 +    const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ?
121720 +                                     dms->chainTable : NULL;
121721 +    const U32 dictStartIndex       = dictMode == ZSTD_dictMatchState ?
121722 +                                     dms->window.dictLimit : 0;
121723 +    const BYTE* const dictBase     = dictMode == ZSTD_dictMatchState ?
121724 +                                     dms->window.base : NULL;
121725 +    const BYTE* const dictStart    = dictMode == ZSTD_dictMatchState ?
121726 +                                     dictBase + dictStartIndex : NULL;
121727 +    const BYTE* const dictEnd      = dictMode == ZSTD_dictMatchState ?
121728 +                                     dms->window.nextSrc : NULL;
121729 +    const U32 dictIndexDelta       = dictMode == ZSTD_dictMatchState ?
121730 +                                     prefixLowestIndex - (U32)(dictEnd - dictBase) :
121731 +                                     0;
121732 +    const U32 dictHBitsL           = dictMode == ZSTD_dictMatchState ?
121733 +                                     dictCParams->hashLog : hBitsL;
121734 +    const U32 dictHBitsS           = dictMode == ZSTD_dictMatchState ?
121735 +                                     dictCParams->chainLog : hBitsS;
121736 +    const U32 dictAndPrefixLength  = (U32)((ip - prefixLowest) + (dictEnd - dictStart));
121738 +    DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic");
121740 +    assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
121742 +    /* if a dictionary is attached, it must be within window range */
121743 +    if (dictMode == ZSTD_dictMatchState) {
121744 +        assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
121745 +    }
121747 +    /* init */
121748 +    ip += (dictAndPrefixLength == 0);
121749 +    if (dictMode == ZSTD_noDict) {
121750 +        U32 const curr = (U32)(ip - base);
121751 +        U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
121752 +        U32 const maxRep = curr - windowLow;
121753 +        if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
121754 +        if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
121755 +    }
121756 +    if (dictMode == ZSTD_dictMatchState) {
121757 +        /* dictMatchState repCode checks don't currently handle repCode == 0
121758 +         * disabling. */
121759 +        assert(offset_1 <= dictAndPrefixLength);
121760 +        assert(offset_2 <= dictAndPrefixLength);
121761 +    }
121763 +    /* Main Search Loop */
121764 +    while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
121765 +        size_t mLength;
121766 +        U32 offset;
121767 +        size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
121768 +        size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
121769 +        size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
121770 +        size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
121771 +        U32 const curr = (U32)(ip-base);
121772 +        U32 const matchIndexL = hashLong[h2];
121773 +        U32 matchIndexS = hashSmall[h];
121774 +        const BYTE* matchLong = base + matchIndexL;
121775 +        const BYTE* match = base + matchIndexS;
121776 +        const U32 repIndex = curr + 1 - offset_1;
121777 +        const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
121778 +                            && repIndex < prefixLowestIndex) ?
121779 +                               dictBase + (repIndex - dictIndexDelta) :
121780 +                               base + repIndex;
121781 +        hashLong[h2] = hashSmall[h] = curr;   /* update hash tables */
121783 +        /* check dictMatchState repcode */
121784 +        if (dictMode == ZSTD_dictMatchState
121785 +            && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
121786 +            && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
121787 +            const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
121788 +            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
121789 +            ip++;
121790 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
121791 +            goto _match_stored;
121792 +        }
121794 +        /* check noDict repcode */
121795 +        if ( dictMode == ZSTD_noDict
121796 +          && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
121797 +            mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
121798 +            ip++;
121799 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
121800 +            goto _match_stored;
121801 +        }
121803 +        if (matchIndexL > prefixLowestIndex) {
121804 +            /* check prefix long match */
121805 +            if (MEM_read64(matchLong) == MEM_read64(ip)) {
121806 +                mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
121807 +                offset = (U32)(ip-matchLong);
121808 +                while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
121809 +                goto _match_found;
121810 +            }
121811 +        } else if (dictMode == ZSTD_dictMatchState) {
121812 +            /* check dictMatchState long match */
121813 +            U32 const dictMatchIndexL = dictHashLong[dictHL];
121814 +            const BYTE* dictMatchL = dictBase + dictMatchIndexL;
121815 +            assert(dictMatchL < dictEnd);
121817 +            if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
121818 +                mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
121819 +                offset = (U32)(curr - dictMatchIndexL - dictIndexDelta);
121820 +                while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
121821 +                goto _match_found;
121822 +        }   }
121824 +        if (matchIndexS > prefixLowestIndex) {
121825 +            /* check prefix short match */
121826 +            if (MEM_read32(match) == MEM_read32(ip)) {
121827 +                goto _search_next_long;
121828 +            }
121829 +        } else if (dictMode == ZSTD_dictMatchState) {
121830 +            /* check dictMatchState short match */
121831 +            U32 const dictMatchIndexS = dictHashSmall[dictHS];
121832 +            match = dictBase + dictMatchIndexS;
121833 +            matchIndexS = dictMatchIndexS + dictIndexDelta;
121835 +            if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {
121836 +                goto _search_next_long;
121837 +        }   }
121839 +        ip += ((ip-anchor) >> kSearchStrength) + 1;
121840 +#if defined(__aarch64__)
121841 +        PREFETCH_L1(ip+256);
121842 +#endif
121843 +        continue;
121845 +_search_next_long:
121847 +        {   size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
121848 +            size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
121849 +            U32 const matchIndexL3 = hashLong[hl3];
121850 +            const BYTE* matchL3 = base + matchIndexL3;
121851 +            hashLong[hl3] = curr + 1;
121853 +            /* check prefix long +1 match */
121854 +            if (matchIndexL3 > prefixLowestIndex) {
121855 +                if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
121856 +                    mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
121857 +                    ip++;
121858 +                    offset = (U32)(ip-matchL3);
121859 +                    while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
121860 +                    goto _match_found;
121861 +                }
121862 +            } else if (dictMode == ZSTD_dictMatchState) {
121863 +                /* check dict long +1 match */
121864 +                U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
121865 +                const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
121866 +                assert(dictMatchL3 < dictEnd);
121867 +                if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
121868 +                    mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
121869 +                    ip++;
121870 +                    offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta);
121871 +                    while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
121872 +                    goto _match_found;
121873 +        }   }   }
121875 +        /* if no long +1 match, explore the short match we found */
121876 +        if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) {
121877 +            mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
121878 +            offset = (U32)(curr - matchIndexS);
121879 +            while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
121880 +        } else {
121881 +            mLength = ZSTD_count(ip+4, match+4, iend) + 4;
121882 +            offset = (U32)(ip - match);
121883 +            while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
121884 +        }
121886 +        /* fall-through */
121888 +_match_found:
121889 +        offset_2 = offset_1;
121890 +        offset_1 = offset;
121892 +        ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
121894 +_match_stored:
121895 +        /* match found */
121896 +        ip += mLength;
121897 +        anchor = ip;
121899 +        if (ip <= ilimit) {
121900 +            /* Complementary insertion */
121901 +            /* done after iLimit test, as candidates could be > iend-8 */
121902 +            {   U32 const indexToInsert = curr+2;
121903 +                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
121904 +                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
121905 +                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
121906 +                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
121907 +            }
121909 +            /* check immediate repcode */
121910 +            if (dictMode == ZSTD_dictMatchState) {
121911 +                while (ip <= ilimit) {
121912 +                    U32 const current2 = (U32)(ip-base);
121913 +                    U32 const repIndex2 = current2 - offset_2;
121914 +                    const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState
121915 +                        && repIndex2 < prefixLowestIndex ?
121916 +                            dictBase + repIndex2 - dictIndexDelta :
121917 +                            base + repIndex2;
121918 +                    if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
121919 +                       && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
121920 +                        const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
121921 +                        size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
121922 +                        U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
121923 +                        ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
121924 +                        hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
121925 +                        hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
121926 +                        ip += repLength2;
121927 +                        anchor = ip;
121928 +                        continue;
121929 +                    }
121930 +                    break;
121931 +            }   }
121933 +            if (dictMode == ZSTD_noDict) {
121934 +                while ( (ip <= ilimit)
121935 +                     && ( (offset_2>0)
121936 +                        & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
121937 +                    /* store sequence */
121938 +                    size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
121939 +                    U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff;  /* swap offset_2 <=> offset_1 */
121940 +                    hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
121941 +                    hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
121942 +                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH);
121943 +                    ip += rLength;
121944 +                    anchor = ip;
121945 +                    continue;   /* faster when present ... (?) */
121946 +        }   }   }
121947 +    }   /* while (ip < ilimit) */
121949 +    /* save reps for next block */
121950 +    rep[0] = offset_1 ? offset_1 : offsetSaved;
121951 +    rep[1] = offset_2 ? offset_2 : offsetSaved;
121953 +    /* Return the last literals size */
121954 +    return (size_t)(iend - anchor);
121958 +size_t ZSTD_compressBlock_doubleFast(
121959 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
121960 +        void const* src, size_t srcSize)
121962 +    const U32 mls = ms->cParams.minMatch;
121963 +    switch(mls)
121964 +    {
121965 +    default: /* includes case 3 */
121966 +    case 4 :
121967 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
121968 +    case 5 :
121969 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
121970 +    case 6 :
121971 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
121972 +    case 7 :
121973 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
121974 +    }
121978 +size_t ZSTD_compressBlock_doubleFast_dictMatchState(
121979 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
121980 +        void const* src, size_t srcSize)
121982 +    const U32 mls = ms->cParams.minMatch;
121983 +    switch(mls)
121984 +    {
121985 +    default: /* includes case 3 */
121986 +    case 4 :
121987 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);
121988 +    case 5 :
121989 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);
121990 +    case 6 :
121991 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);
121992 +    case 7 :
121993 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);
121994 +    }
121998 +static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
121999 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
122000 +        void const* src, size_t srcSize,
122001 +        U32 const mls /* template */)
122003 +    ZSTD_compressionParameters const* cParams = &ms->cParams;
122004 +    U32* const hashLong = ms->hashTable;
122005 +    U32  const hBitsL = cParams->hashLog;
122006 +    U32* const hashSmall = ms->chainTable;
122007 +    U32  const hBitsS = cParams->chainLog;
122008 +    const BYTE* const istart = (const BYTE*)src;
122009 +    const BYTE* ip = istart;
122010 +    const BYTE* anchor = istart;
122011 +    const BYTE* const iend = istart + srcSize;
122012 +    const BYTE* const ilimit = iend - 8;
122013 +    const BYTE* const base = ms->window.base;
122014 +    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
122015 +    const U32   lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
122016 +    const U32   dictStartIndex = lowLimit;
122017 +    const U32   dictLimit = ms->window.dictLimit;
122018 +    const U32   prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;
122019 +    const BYTE* const prefixStart = base + prefixStartIndex;
122020 +    const BYTE* const dictBase = ms->window.dictBase;
122021 +    const BYTE* const dictStart = dictBase + dictStartIndex;
122022 +    const BYTE* const dictEnd = dictBase + prefixStartIndex;
122023 +    U32 offset_1=rep[0], offset_2=rep[1];
122025 +    DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
122027 +    /* if extDict is invalidated due to maxDistance, switch to "regular" variant */
122028 +    if (prefixStartIndex == dictStartIndex)
122029 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, mls, ZSTD_noDict);
122031 +    /* Search Loop */
122032 +    while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
122033 +        const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
122034 +        const U32 matchIndex = hashSmall[hSmall];
122035 +        const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
122036 +        const BYTE* match = matchBase + matchIndex;
122038 +        const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
122039 +        const U32 matchLongIndex = hashLong[hLong];
122040 +        const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
122041 +        const BYTE* matchLong = matchLongBase + matchLongIndex;
122043 +        const U32 curr = (U32)(ip-base);
122044 +        const U32 repIndex = curr + 1 - offset_1;   /* offset_1 expected <= curr +1 */
122045 +        const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
122046 +        const BYTE* const repMatch = repBase + repIndex;
122047 +        size_t mLength;
122048 +        hashSmall[hSmall] = hashLong[hLong] = curr;   /* update hash table */
122050 +        if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
122051 +            & (repIndex > dictStartIndex))
122052 +          && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
122053 +            const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
122054 +            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
122055 +            ip++;
122056 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
122057 +        } else {
122058 +            if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
122059 +                const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
122060 +                const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
122061 +                U32 offset;
122062 +                mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
122063 +                offset = curr - matchLongIndex;
122064 +                while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; }   /* catch up */
122065 +                offset_2 = offset_1;
122066 +                offset_1 = offset;
122067 +                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
122069 +            } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
122070 +                size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
122071 +                U32 const matchIndex3 = hashLong[h3];
122072 +                const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
122073 +                const BYTE* match3 = match3Base + matchIndex3;
122074 +                U32 offset;
122075 +                hashLong[h3] = curr + 1;
122076 +                if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
122077 +                    const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
122078 +                    const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
122079 +                    mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
122080 +                    ip++;
122081 +                    offset = curr+1 - matchIndex3;
122082 +                    while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
122083 +                } else {
122084 +                    const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
122085 +                    const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
122086 +                    mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
122087 +                    offset = curr - matchIndex;
122088 +                    while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
122089 +                }
122090 +                offset_2 = offset_1;
122091 +                offset_1 = offset;
122092 +                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
122094 +            } else {
122095 +                ip += ((ip-anchor) >> kSearchStrength) + 1;
122096 +                continue;
122097 +        }   }
122099 +        /* move to next sequence start */
122100 +        ip += mLength;
122101 +        anchor = ip;
122103 +        if (ip <= ilimit) {
122104 +            /* Complementary insertion */
122105 +            /* done after iLimit test, as candidates could be > iend-8 */
122106 +            {   U32 const indexToInsert = curr+2;
122107 +                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
122108 +                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
122109 +                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
122110 +                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
122111 +            }
122113 +            /* check immediate repcode */
122114 +            while (ip <= ilimit) {
122115 +                U32 const current2 = (U32)(ip-base);
122116 +                U32 const repIndex2 = current2 - offset_2;
122117 +                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
122118 +                if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3)   /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
122119 +                    & (repIndex2 > dictStartIndex))
122120 +                  && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
122121 +                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
122122 +                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
122123 +                    U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
122124 +                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
122125 +                    hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
122126 +                    hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
122127 +                    ip += repLength2;
122128 +                    anchor = ip;
122129 +                    continue;
122130 +                }
122131 +                break;
122132 +    }   }   }
122134 +    /* save reps for next block */
122135 +    rep[0] = offset_1;
122136 +    rep[1] = offset_2;
122138 +    /* Return the last literals size */
122139 +    return (size_t)(iend - anchor);
122143 +size_t ZSTD_compressBlock_doubleFast_extDict(
122144 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
122145 +        void const* src, size_t srcSize)
122147 +    U32 const mls = ms->cParams.minMatch;
122148 +    switch(mls)
122149 +    {
122150 +    default: /* includes case 3 */
122151 +    case 4 :
122152 +        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
122153 +    case 5 :
122154 +        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
122155 +    case 6 :
122156 +        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
122157 +    case 7 :
122158 +        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
122159 +    }
122161 diff --git a/lib/zstd/compress/zstd_double_fast.h b/lib/zstd/compress/zstd_double_fast.h
122162 new file mode 100644
122163 index 000000000000..6822bde65a1d
122164 --- /dev/null
122165 +++ b/lib/zstd/compress/zstd_double_fast.h
122166 @@ -0,0 +1,32 @@
122168 + * Copyright (c) Yann Collet, Facebook, Inc.
122169 + * All rights reserved.
122171 + * This source code is licensed under both the BSD-style license (found in the
122172 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
122173 + * in the COPYING file in the root directory of this source tree).
122174 + * You may select, at your option, one of the above-listed licenses.
122175 + */
122177 +#ifndef ZSTD_DOUBLE_FAST_H
122178 +#define ZSTD_DOUBLE_FAST_H
122181 +#include "../common/mem.h"      /* U32 */
122182 +#include "zstd_compress_internal.h"     /* ZSTD_CCtx, size_t */
122184 +void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
122185 +                              void const* end, ZSTD_dictTableLoadMethod_e dtlm);
122186 +size_t ZSTD_compressBlock_doubleFast(
122187 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
122188 +        void const* src, size_t srcSize);
122189 +size_t ZSTD_compressBlock_doubleFast_dictMatchState(
122190 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
122191 +        void const* src, size_t srcSize);
122192 +size_t ZSTD_compressBlock_doubleFast_extDict(
122193 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
122194 +        void const* src, size_t srcSize);
122198 +#endif /* ZSTD_DOUBLE_FAST_H */
122199 diff --git a/lib/zstd/compress/zstd_fast.c b/lib/zstd/compress/zstd_fast.c
122200 new file mode 100644
122201 index 000000000000..96b7d48e2868
122202 --- /dev/null
122203 +++ b/lib/zstd/compress/zstd_fast.c
122204 @@ -0,0 +1,496 @@
122206 + * Copyright (c) Yann Collet, Facebook, Inc.
122207 + * All rights reserved.
122209 + * This source code is licensed under both the BSD-style license (found in the
122210 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
122211 + * in the COPYING file in the root directory of this source tree).
122212 + * You may select, at your option, one of the above-listed licenses.
122213 + */
122215 +#include "zstd_compress_internal.h"  /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
122216 +#include "zstd_fast.h"
122219 +void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
122220 +                        const void* const end,
122221 +                        ZSTD_dictTableLoadMethod_e dtlm)
122223 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
122224 +    U32* const hashTable = ms->hashTable;
122225 +    U32  const hBits = cParams->hashLog;
122226 +    U32  const mls = cParams->minMatch;
122227 +    const BYTE* const base = ms->window.base;
122228 +    const BYTE* ip = base + ms->nextToUpdate;
122229 +    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
122230 +    const U32 fastHashFillStep = 3;
122232 +    /* Always insert every fastHashFillStep position into the hash table.
122233 +     * Insert the other positions if their hash entry is empty.
122234 +     */
122235 +    for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
122236 +        U32 const curr = (U32)(ip - base);
122237 +        size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
122238 +        hashTable[hash0] = curr;
122239 +        if (dtlm == ZSTD_dtlm_fast) continue;
122240 +        /* Only load extra positions for ZSTD_dtlm_full */
122241 +        {   U32 p;
122242 +            for (p = 1; p < fastHashFillStep; ++p) {
122243 +                size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
122244 +                if (hashTable[hash] == 0) {  /* not yet filled */
122245 +                    hashTable[hash] = curr + p;
122246 +    }   }   }   }
122250 +FORCE_INLINE_TEMPLATE size_t
122251 +ZSTD_compressBlock_fast_generic(
122252 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
122253 +        void const* src, size_t srcSize,
122254 +        U32 const mls)
122256 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
122257 +    U32* const hashTable = ms->hashTable;
122258 +    U32 const hlog = cParams->hashLog;
122259 +    /* support stepSize of 0 */
122260 +    size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
122261 +    const BYTE* const base = ms->window.base;
122262 +    const BYTE* const istart = (const BYTE*)src;
122263 +    /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
122264 +    const BYTE* ip0 = istart;
122265 +    const BYTE* ip1;
122266 +    const BYTE* anchor = istart;
122267 +    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
122268 +    const U32   prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
122269 +    const BYTE* const prefixStart = base + prefixStartIndex;
122270 +    const BYTE* const iend = istart + srcSize;
122271 +    const BYTE* const ilimit = iend - HASH_READ_SIZE;
122272 +    U32 offset_1=rep[0], offset_2=rep[1];
122273 +    U32 offsetSaved = 0;
122275 +    /* init */
122276 +    DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
122277 +    ip0 += (ip0 == prefixStart);
122278 +    ip1 = ip0 + 1;
122279 +    {   U32 const curr = (U32)(ip0 - base);
122280 +        U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
122281 +        U32 const maxRep = curr - windowLow;
122282 +        if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
122283 +        if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
122284 +    }
122286 +    /* Main Search Loop */
122287 +#ifdef __INTEL_COMPILER
122288 +    /* From intel 'The vector pragma indicates that the loop should be
122289 +     * vectorized if it is legal to do so'. Can be used together with
122290 +     * #pragma ivdep (but have opted to exclude that because intel
122291 +     * warns against using it).*/
122292 +    #pragma vector always
122293 +#endif
122294 +    while (ip1 < ilimit) {   /* < instead of <=, because check at ip0+2 */
122295 +        size_t mLength;
122296 +        BYTE const* ip2 = ip0 + 2;
122297 +        size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
122298 +        U32 const val0 = MEM_read32(ip0);
122299 +        size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
122300 +        U32 const val1 = MEM_read32(ip1);
122301 +        U32 const current0 = (U32)(ip0-base);
122302 +        U32 const current1 = (U32)(ip1-base);
122303 +        U32 const matchIndex0 = hashTable[h0];
122304 +        U32 const matchIndex1 = hashTable[h1];
122305 +        BYTE const* repMatch = ip2 - offset_1;
122306 +        const BYTE* match0 = base + matchIndex0;
122307 +        const BYTE* match1 = base + matchIndex1;
122308 +        U32 offcode;
122310 +#if defined(__aarch64__)
122311 +        PREFETCH_L1(ip0+256);
122312 +#endif
122314 +        hashTable[h0] = current0;   /* update hash table */
122315 +        hashTable[h1] = current1;   /* update hash table */
122317 +        assert(ip0 + 1 == ip1);
122319 +        if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
122320 +            mLength = (ip2[-1] == repMatch[-1]) ? 1 : 0;
122321 +            ip0 = ip2 - mLength;
122322 +            match0 = repMatch - mLength;
122323 +            mLength += 4;
122324 +            offcode = 0;
122325 +            goto _match;
122326 +        }
122327 +        if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
122328 +            /* found a regular match */
122329 +            goto _offset;
122330 +        }
122331 +        if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
122332 +            /* found a regular match after one literal */
122333 +            ip0 = ip1;
122334 +            match0 = match1;
122335 +            goto _offset;
122336 +        }
122337 +        {   size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
122338 +            assert(step >= 2);
122339 +            ip0 += step;
122340 +            ip1 += step;
122341 +            continue;
122342 +        }
122343 +_offset: /* Requires: ip0, match0 */
122344 +        /* Compute the offset code */
122345 +        offset_2 = offset_1;
122346 +        offset_1 = (U32)(ip0-match0);
122347 +        offcode = offset_1 + ZSTD_REP_MOVE;
122348 +        mLength = 4;
122349 +        /* Count the backwards match length */
122350 +        while (((ip0>anchor) & (match0>prefixStart))
122351 +             && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
122353 +_match: /* Requires: ip0, match0, offcode */
122354 +        /* Count the forward length */
122355 +        mLength += ZSTD_count(ip0+mLength, match0+mLength, iend);
122356 +        ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH);
122357 +        /* match found */
122358 +        ip0 += mLength;
122359 +        anchor = ip0;
122361 +        if (ip0 <= ilimit) {
122362 +            /* Fill Table */
122363 +            assert(base+current0+2 > istart);  /* check base overflow */
122364 +            hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2;  /* here because current+2 could be > iend-8 */
122365 +            hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
122367 +            if (offset_2 > 0) { /* offset_2==0 means offset_2 is invalidated */
122368 +                while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
122369 +                    /* store sequence */
122370 +                    size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
122371 +                    { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
122372 +                    hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
122373 +                    ip0 += rLength;
122374 +                    ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
122375 +                    anchor = ip0;
122376 +                    continue;   /* faster when present (confirmed on gcc-8) ... (?) */
122377 +        }   }   }
122378 +        ip1 = ip0 + 1;
122379 +    }
122381 +    /* save reps for next block */
122382 +    rep[0] = offset_1 ? offset_1 : offsetSaved;
122383 +    rep[1] = offset_2 ? offset_2 : offsetSaved;
122385 +    /* Return the last literals size */
122386 +    return (size_t)(iend - anchor);
122390 +size_t ZSTD_compressBlock_fast(
122391 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
122392 +        void const* src, size_t srcSize)
122394 +    U32 const mls = ms->cParams.minMatch;
122395 +    assert(ms->dictMatchState == NULL);
122396 +    switch(mls)
122397 +    {
122398 +    default: /* includes case 3 */
122399 +    case 4 :
122400 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
122401 +    case 5 :
122402 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
122403 +    case 6 :
122404 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
122405 +    case 7 :
122406 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
122407 +    }
122410 +FORCE_INLINE_TEMPLATE
122411 +size_t ZSTD_compressBlock_fast_dictMatchState_generic(
122412 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
122413 +        void const* src, size_t srcSize, U32 const mls)
122415 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
122416 +    U32* const hashTable = ms->hashTable;
122417 +    U32 const hlog = cParams->hashLog;
122418 +    /* support stepSize of 0 */
122419 +    U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
122420 +    const BYTE* const base = ms->window.base;
122421 +    const BYTE* const istart = (const BYTE*)src;
122422 +    const BYTE* ip = istart;
122423 +    const BYTE* anchor = istart;
122424 +    const U32   prefixStartIndex = ms->window.dictLimit;
122425 +    const BYTE* const prefixStart = base + prefixStartIndex;
122426 +    const BYTE* const iend = istart + srcSize;
122427 +    const BYTE* const ilimit = iend - HASH_READ_SIZE;
122428 +    U32 offset_1=rep[0], offset_2=rep[1];
122429 +    U32 offsetSaved = 0;
122431 +    const ZSTD_matchState_t* const dms = ms->dictMatchState;
122432 +    const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
122433 +    const U32* const dictHashTable = dms->hashTable;
122434 +    const U32 dictStartIndex       = dms->window.dictLimit;
122435 +    const BYTE* const dictBase     = dms->window.base;
122436 +    const BYTE* const dictStart    = dictBase + dictStartIndex;
122437 +    const BYTE* const dictEnd      = dms->window.nextSrc;
122438 +    const U32 dictIndexDelta       = prefixStartIndex - (U32)(dictEnd - dictBase);
122439 +    const U32 dictAndPrefixLength  = (U32)(ip - prefixStart + dictEnd - dictStart);
122440 +    const U32 dictHLog             = dictCParams->hashLog;
122442 +    /* if a dictionary is still attached, it necessarily means that
122443 +     * it is within window size. So we just check it. */
122444 +    const U32 maxDistance = 1U << cParams->windowLog;
122445 +    const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
122446 +    assert(endIndex - prefixStartIndex <= maxDistance);
122447 +    (void)maxDistance; (void)endIndex;   /* these variables are not used when assert() is disabled */
122449 +    /* ensure there will be no underflow
122450 +     * when translating a dict index into a local index */
122451 +    assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
122453 +    /* init */
122454 +    DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
122455 +    ip += (dictAndPrefixLength == 0);
122456 +    /* dictMatchState repCode checks don't currently handle repCode == 0
122457 +     * disabling. */
122458 +    assert(offset_1 <= dictAndPrefixLength);
122459 +    assert(offset_2 <= dictAndPrefixLength);
122461 +    /* Main Search Loop */
122462 +    while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
122463 +        size_t mLength;
122464 +        size_t const h = ZSTD_hashPtr(ip, hlog, mls);
122465 +        U32 const curr = (U32)(ip-base);
122466 +        U32 const matchIndex = hashTable[h];
122467 +        const BYTE* match = base + matchIndex;
122468 +        const U32 repIndex = curr + 1 - offset_1;
122469 +        const BYTE* repMatch = (repIndex < prefixStartIndex) ?
122470 +                               dictBase + (repIndex - dictIndexDelta) :
122471 +                               base + repIndex;
122472 +        hashTable[h] = curr;   /* update hash table */
122474 +        if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
122475 +          && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
122476 +            const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
122477 +            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
122478 +            ip++;
122479 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
122480 +        } else if ( (matchIndex <= prefixStartIndex) ) {
122481 +            size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
122482 +            U32 const dictMatchIndex = dictHashTable[dictHash];
122483 +            const BYTE* dictMatch = dictBase + dictMatchIndex;
122484 +            if (dictMatchIndex <= dictStartIndex ||
122485 +                MEM_read32(dictMatch) != MEM_read32(ip)) {
122486 +                assert(stepSize >= 1);
122487 +                ip += ((ip-anchor) >> kSearchStrength) + stepSize;
122488 +                continue;
122489 +            } else {
122490 +                /* found a dict match */
122491 +                U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta);
122492 +                mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
122493 +                while (((ip>anchor) & (dictMatch>dictStart))
122494 +                     && (ip[-1] == dictMatch[-1])) {
122495 +                    ip--; dictMatch--; mLength++;
122496 +                } /* catch up */
122497 +                offset_2 = offset_1;
122498 +                offset_1 = offset;
122499 +                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
122500 +            }
122501 +        } else if (MEM_read32(match) != MEM_read32(ip)) {
122502 +            /* it's not a match, and we're not going to check the dictionary */
122503 +            assert(stepSize >= 1);
122504 +            ip += ((ip-anchor) >> kSearchStrength) + stepSize;
122505 +            continue;
122506 +        } else {
122507 +            /* found a regular match */
122508 +            U32 const offset = (U32)(ip-match);
122509 +            mLength = ZSTD_count(ip+4, match+4, iend) + 4;
122510 +            while (((ip>anchor) & (match>prefixStart))
122511 +                 && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
122512 +            offset_2 = offset_1;
122513 +            offset_1 = offset;
122514 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
122515 +        }
122517 +        /* match found */
122518 +        ip += mLength;
122519 +        anchor = ip;
122521 +        if (ip <= ilimit) {
122522 +            /* Fill Table */
122523 +            assert(base+curr+2 > istart);  /* check base overflow */
122524 +            hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;  /* here because curr+2 could be > iend-8 */
122525 +            hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
122527 +            /* check immediate repcode */
122528 +            while (ip <= ilimit) {
122529 +                U32 const current2 = (U32)(ip-base);
122530 +                U32 const repIndex2 = current2 - offset_2;
122531 +                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
122532 +                        dictBase - dictIndexDelta + repIndex2 :
122533 +                        base + repIndex2;
122534 +                if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
122535 +                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
122536 +                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
122537 +                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
122538 +                    U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
122539 +                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
122540 +                    hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
122541 +                    ip += repLength2;
122542 +                    anchor = ip;
122543 +                    continue;
122544 +                }
122545 +                break;
122546 +            }
122547 +        }
122548 +    }
122550 +    /* save reps for next block */
122551 +    rep[0] = offset_1 ? offset_1 : offsetSaved;
122552 +    rep[1] = offset_2 ? offset_2 : offsetSaved;
122554 +    /* Return the last literals size */
122555 +    return (size_t)(iend - anchor);
122558 +size_t ZSTD_compressBlock_fast_dictMatchState(
122559 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
122560 +        void const* src, size_t srcSize)
122562 +    U32 const mls = ms->cParams.minMatch;
122563 +    assert(ms->dictMatchState != NULL);
122564 +    switch(mls)
122565 +    {
122566 +    default: /* includes case 3 */
122567 +    case 4 :
122568 +        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
122569 +    case 5 :
122570 +        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
122571 +    case 6 :
122572 +        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
122573 +    case 7 :
122574 +        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
122575 +    }
122579 +static size_t ZSTD_compressBlock_fast_extDict_generic(
122580 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
122581 +        void const* src, size_t srcSize, U32 const mls)
122583 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
122584 +    U32* const hashTable = ms->hashTable;
122585 +    U32 const hlog = cParams->hashLog;
122586 +    /* support stepSize of 0 */
122587 +    U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
122588 +    const BYTE* const base = ms->window.base;
122589 +    const BYTE* const dictBase = ms->window.dictBase;
122590 +    const BYTE* const istart = (const BYTE*)src;
122591 +    const BYTE* ip = istart;
122592 +    const BYTE* anchor = istart;
122593 +    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
122594 +    const U32   lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
122595 +    const U32   dictStartIndex = lowLimit;
122596 +    const BYTE* const dictStart = dictBase + dictStartIndex;
122597 +    const U32   dictLimit = ms->window.dictLimit;
122598 +    const U32   prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
122599 +    const BYTE* const prefixStart = base + prefixStartIndex;
122600 +    const BYTE* const dictEnd = dictBase + prefixStartIndex;
122601 +    const BYTE* const iend = istart + srcSize;
122602 +    const BYTE* const ilimit = iend - 8;
122603 +    U32 offset_1=rep[0], offset_2=rep[1];
122605 +    DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
122607 +    /* switch to "regular" variant if extDict is invalidated due to maxDistance */
122608 +    if (prefixStartIndex == dictStartIndex)
122609 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);
122611 +    /* Search Loop */
122612 +    while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
122613 +        const size_t h = ZSTD_hashPtr(ip, hlog, mls);
122614 +        const U32    matchIndex = hashTable[h];
122615 +        const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
122616 +        const BYTE*  match = matchBase + matchIndex;
122617 +        const U32    curr = (U32)(ip-base);
122618 +        const U32    repIndex = curr + 1 - offset_1;
122619 +        const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
122620 +        const BYTE* const repMatch = repBase + repIndex;
122621 +        hashTable[h] = curr;   /* update hash table */
122622 +        DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
122623 +        assert(offset_1 <= curr +1);   /* check repIndex */
122625 +        if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
122626 +           && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
122627 +            const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
122628 +            size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
122629 +            ip++;
122630 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);
122631 +            ip += rLength;
122632 +            anchor = ip;
122633 +        } else {
122634 +            if ( (matchIndex < dictStartIndex) ||
122635 +                 (MEM_read32(match) != MEM_read32(ip)) ) {
122636 +                assert(stepSize >= 1);
122637 +                ip += ((ip-anchor) >> kSearchStrength) + stepSize;
122638 +                continue;
122639 +            }
122640 +            {   const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
122641 +                const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
122642 +                U32 const offset = curr - matchIndex;
122643 +                size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
122644 +                while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
122645 +                offset_2 = offset_1; offset_1 = offset;  /* update offset history */
122646 +                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
122647 +                ip += mLength;
122648 +                anchor = ip;
122649 +        }   }
122651 +        if (ip <= ilimit) {
122652 +            /* Fill Table */
122653 +            hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;
122654 +            hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
122655 +            /* check immediate repcode */
122656 +            while (ip <= ilimit) {
122657 +                U32 const current2 = (U32)(ip-base);
122658 +                U32 const repIndex2 = current2 - offset_2;
122659 +                const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
122660 +                if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex))  /* intentional overflow */
122661 +                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
122662 +                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
122663 +                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
122664 +                    { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; }  /* swap offset_2 <=> offset_1 */
122665 +                    ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);
122666 +                    hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
122667 +                    ip += repLength2;
122668 +                    anchor = ip;
122669 +                    continue;
122670 +                }
122671 +                break;
122672 +    }   }   }
122674 +    /* save reps for next block */
122675 +    rep[0] = offset_1;
122676 +    rep[1] = offset_2;
122678 +    /* Return the last literals size */
122679 +    return (size_t)(iend - anchor);
122683 +size_t ZSTD_compressBlock_fast_extDict(
122684 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
122685 +        void const* src, size_t srcSize)
122687 +    U32 const mls = ms->cParams.minMatch;
122688 +    switch(mls)
122689 +    {
122690 +    default: /* includes case 3 */
122691 +    case 4 :
122692 +        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
122693 +    case 5 :
122694 +        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
122695 +    case 6 :
122696 +        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
122697 +    case 7 :
122698 +        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
122699 +    }
122701 diff --git a/lib/zstd/compress/zstd_fast.h b/lib/zstd/compress/zstd_fast.h
122702 new file mode 100644
122703 index 000000000000..fddc2f532d21
122704 --- /dev/null
122705 +++ b/lib/zstd/compress/zstd_fast.h
122706 @@ -0,0 +1,31 @@
122708 + * Copyright (c) Yann Collet, Facebook, Inc.
122709 + * All rights reserved.
122711 + * This source code is licensed under both the BSD-style license (found in the
122712 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
122713 + * in the COPYING file in the root directory of this source tree).
122714 + * You may select, at your option, one of the above-listed licenses.
122715 + */
122717 +#ifndef ZSTD_FAST_H
122718 +#define ZSTD_FAST_H
122721 +#include "../common/mem.h"      /* U32 */
122722 +#include "zstd_compress_internal.h"
122724 +void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
122725 +                        void const* end, ZSTD_dictTableLoadMethod_e dtlm);
122726 +size_t ZSTD_compressBlock_fast(
122727 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
122728 +        void const* src, size_t srcSize);
122729 +size_t ZSTD_compressBlock_fast_dictMatchState(
122730 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
122731 +        void const* src, size_t srcSize);
122732 +size_t ZSTD_compressBlock_fast_extDict(
122733 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
122734 +        void const* src, size_t srcSize);
122737 +#endif /* ZSTD_FAST_H */
122738 diff --git a/lib/zstd/compress/zstd_lazy.c b/lib/zstd/compress/zstd_lazy.c
122739 new file mode 100644
122740 index 000000000000..39aa2569aabc
122741 --- /dev/null
122742 +++ b/lib/zstd/compress/zstd_lazy.c
122743 @@ -0,0 +1,1412 @@
122745 + * Copyright (c) Yann Collet, Facebook, Inc.
122746 + * All rights reserved.
122748 + * This source code is licensed under both the BSD-style license (found in the
122749 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
122750 + * in the COPYING file in the root directory of this source tree).
122751 + * You may select, at your option, one of the above-listed licenses.
122752 + */
122754 +#include "zstd_compress_internal.h"
122755 +#include "zstd_lazy.h"
122758 +/*-*************************************
122759 +*  Binary Tree search
122760 +***************************************/
122762 +static void
122763 +ZSTD_updateDUBT(ZSTD_matchState_t* ms,
122764 +                const BYTE* ip, const BYTE* iend,
122765 +                U32 mls)
122767 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
122768 +    U32* const hashTable = ms->hashTable;
122769 +    U32  const hashLog = cParams->hashLog;
122771 +    U32* const bt = ms->chainTable;
122772 +    U32  const btLog  = cParams->chainLog - 1;
122773 +    U32  const btMask = (1 << btLog) - 1;
122775 +    const BYTE* const base = ms->window.base;
122776 +    U32 const target = (U32)(ip - base);
122777 +    U32 idx = ms->nextToUpdate;
122779 +    if (idx != target)
122780 +        DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
122781 +                    idx, target, ms->window.dictLimit);
122782 +    assert(ip + 8 <= iend);   /* condition for ZSTD_hashPtr */
122783 +    (void)iend;
122785 +    assert(idx >= ms->window.dictLimit);   /* condition for valid base+idx */
122786 +    for ( ; idx < target ; idx++) {
122787 +        size_t const h  = ZSTD_hashPtr(base + idx, hashLog, mls);   /* assumption : ip + 8 <= iend */
122788 +        U32    const matchIndex = hashTable[h];
122790 +        U32*   const nextCandidatePtr = bt + 2*(idx&btMask);
122791 +        U32*   const sortMarkPtr  = nextCandidatePtr + 1;
122793 +        DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
122794 +        hashTable[h] = idx;   /* Update Hash Table */
122795 +        *nextCandidatePtr = matchIndex;   /* update BT like a chain */
122796 +        *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
122797 +    }
122798 +    ms->nextToUpdate = target;
122802 +/** ZSTD_insertDUBT1() :
122803 + *  sort one already inserted but unsorted position
122804 + *  assumption : curr >= btlow == (curr - btmask)
122805 + *  doesn't fail */
122806 +static void
122807 +ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
122808 +                 U32 curr, const BYTE* inputEnd,
122809 +                 U32 nbCompares, U32 btLow,
122810 +                 const ZSTD_dictMode_e dictMode)
122812 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
122813 +    U32* const bt = ms->chainTable;
122814 +    U32  const btLog  = cParams->chainLog - 1;
122815 +    U32  const btMask = (1 << btLog) - 1;
122816 +    size_t commonLengthSmaller=0, commonLengthLarger=0;
122817 +    const BYTE* const base = ms->window.base;
122818 +    const BYTE* const dictBase = ms->window.dictBase;
122819 +    const U32 dictLimit = ms->window.dictLimit;
122820 +    const BYTE* const ip = (curr>=dictLimit) ? base + curr : dictBase + curr;
122821 +    const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit;
122822 +    const BYTE* const dictEnd = dictBase + dictLimit;
122823 +    const BYTE* const prefixStart = base + dictLimit;
122824 +    const BYTE* match;
122825 +    U32* smallerPtr = bt + 2*(curr&btMask);
122826 +    U32* largerPtr  = smallerPtr + 1;
122827 +    U32 matchIndex = *smallerPtr;   /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
122828 +    U32 dummy32;   /* to be nullified at the end */
122829 +    U32 const windowValid = ms->window.lowLimit;
122830 +    U32 const maxDistance = 1U << cParams->windowLog;
122831 +    U32 const windowLow = (curr - windowValid > maxDistance) ? curr - maxDistance : windowValid;
122834 +    DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
122835 +                curr, dictLimit, windowLow);
122836 +    assert(curr >= btLow);
122837 +    assert(ip < iend);   /* condition for ZSTD_count */
122839 +    while (nbCompares-- && (matchIndex > windowLow)) {
122840 +        U32* const nextPtr = bt + 2*(matchIndex & btMask);
122841 +        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
122842 +        assert(matchIndex < curr);
122843 +        /* note : all candidates are now supposed sorted,
122844 +         * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK
122845 +         * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */
122847 +        if ( (dictMode != ZSTD_extDict)
122848 +          || (matchIndex+matchLength >= dictLimit)  /* both in current segment*/
122849 +          || (curr < dictLimit) /* both in extDict */) {
122850 +            const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
122851 +                                     || (matchIndex+matchLength >= dictLimit)) ?
122852 +                                        base : dictBase;
122853 +            assert( (matchIndex+matchLength >= dictLimit)   /* might be wrong if extDict is incorrectly set to 0 */
122854 +                 || (curr < dictLimit) );
122855 +            match = mBase + matchIndex;
122856 +            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
122857 +        } else {
122858 +            match = dictBase + matchIndex;
122859 +            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
122860 +            if (matchIndex+matchLength >= dictLimit)
122861 +                match = base + matchIndex;   /* preparation for next read of match[matchLength] */
122862 +        }
122864 +        DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
122865 +                    curr, matchIndex, (U32)matchLength);
122867 +        if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
122868 +            break;   /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
122869 +        }
122871 +        if (match[matchLength] < ip[matchLength]) {  /* necessarily within buffer */
122872 +            /* match is smaller than current */
122873 +            *smallerPtr = matchIndex;             /* update smaller idx */
122874 +            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
122875 +            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
122876 +            DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
122877 +                        matchIndex, btLow, nextPtr[1]);
122878 +            smallerPtr = nextPtr+1;               /* new "candidate" => larger than match, which was smaller than target */
122879 +            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous and closer to current */
122880 +        } else {
122881 +            /* match is larger than current */
122882 +            *largerPtr = matchIndex;
122883 +            commonLengthLarger = matchLength;
122884 +            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
122885 +            DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
122886 +                        matchIndex, btLow, nextPtr[0]);
122887 +            largerPtr = nextPtr;
122888 +            matchIndex = nextPtr[0];
122889 +    }   }
122891 +    *smallerPtr = *largerPtr = 0;
122895 +static size_t
122896 +ZSTD_DUBT_findBetterDictMatch (
122897 +        ZSTD_matchState_t* ms,
122898 +        const BYTE* const ip, const BYTE* const iend,
122899 +        size_t* offsetPtr,
122900 +        size_t bestLength,
122901 +        U32 nbCompares,
122902 +        U32 const mls,
122903 +        const ZSTD_dictMode_e dictMode)
122905 +    const ZSTD_matchState_t * const dms = ms->dictMatchState;
122906 +    const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
122907 +    const U32 * const dictHashTable = dms->hashTable;
122908 +    U32         const hashLog = dmsCParams->hashLog;
122909 +    size_t      const h  = ZSTD_hashPtr(ip, hashLog, mls);
122910 +    U32               dictMatchIndex = dictHashTable[h];
122912 +    const BYTE* const base = ms->window.base;
122913 +    const BYTE* const prefixStart = base + ms->window.dictLimit;
122914 +    U32         const curr = (U32)(ip-base);
122915 +    const BYTE* const dictBase = dms->window.base;
122916 +    const BYTE* const dictEnd = dms->window.nextSrc;
122917 +    U32         const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
122918 +    U32         const dictLowLimit = dms->window.lowLimit;
122919 +    U32         const dictIndexDelta = ms->window.lowLimit - dictHighLimit;
122921 +    U32*        const dictBt = dms->chainTable;
122922 +    U32         const btLog  = dmsCParams->chainLog - 1;
122923 +    U32         const btMask = (1 << btLog) - 1;
122924 +    U32         const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;
122926 +    size_t commonLengthSmaller=0, commonLengthLarger=0;
122928 +    (void)dictMode;
122929 +    assert(dictMode == ZSTD_dictMatchState);
122931 +    while (nbCompares-- && (dictMatchIndex > dictLowLimit)) {
122932 +        U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
122933 +        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
122934 +        const BYTE* match = dictBase + dictMatchIndex;
122935 +        matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
122936 +        if (dictMatchIndex+matchLength >= dictHighLimit)
122937 +            match = base + dictMatchIndex + dictIndexDelta;   /* to prepare for next usage of match[matchLength] */
122939 +        if (matchLength > bestLength) {
122940 +            U32 matchIndex = dictMatchIndex + dictIndexDelta;
122941 +            if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
122942 +                DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
122943 +                    curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + curr - matchIndex, dictMatchIndex, matchIndex);
122944 +                bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
122945 +            }
122946 +            if (ip+matchLength == iend) {   /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
122947 +                break;   /* drop, to guarantee consistency (miss a little bit of compression) */
122948 +            }
122949 +        }
122951 +        if (match[matchLength] < ip[matchLength]) {
122952 +            if (dictMatchIndex <= btLow) { break; }   /* beyond tree size, stop the search */
122953 +            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
122954 +            dictMatchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
122955 +        } else {
122956 +            /* match is larger than current */
122957 +            if (dictMatchIndex <= btLow) { break; }   /* beyond tree size, stop the search */
122958 +            commonLengthLarger = matchLength;
122959 +            dictMatchIndex = nextPtr[0];
122960 +        }
122961 +    }
122963 +    if (bestLength >= MINMATCH) {
122964 +        U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
122965 +        DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
122966 +                    curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
122967 +    }
122968 +    return bestLength;
122973 +static size_t
122974 +ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
122975 +                        const BYTE* const ip, const BYTE* const iend,
122976 +                        size_t* offsetPtr,
122977 +                        U32 const mls,
122978 +                        const ZSTD_dictMode_e dictMode)
122980 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
122981 +    U32*   const hashTable = ms->hashTable;
122982 +    U32    const hashLog = cParams->hashLog;
122983 +    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
122984 +    U32          matchIndex  = hashTable[h];
122986 +    const BYTE* const base = ms->window.base;
122987 +    U32    const curr = (U32)(ip-base);
122988 +    U32    const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
122990 +    U32*   const bt = ms->chainTable;
122991 +    U32    const btLog  = cParams->chainLog - 1;
122992 +    U32    const btMask = (1 << btLog) - 1;
122993 +    U32    const btLow = (btMask >= curr) ? 0 : curr - btMask;
122994 +    U32    const unsortLimit = MAX(btLow, windowLow);
122996 +    U32*         nextCandidate = bt + 2*(matchIndex&btMask);
122997 +    U32*         unsortedMark = bt + 2*(matchIndex&btMask) + 1;
122998 +    U32          nbCompares = 1U << cParams->searchLog;
122999 +    U32          nbCandidates = nbCompares;
123000 +    U32          previousCandidate = 0;
123002 +    DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", curr);
123003 +    assert(ip <= iend-8);   /* required for h calculation */
123004 +    assert(dictMode != ZSTD_dedicatedDictSearch);
123006 +    /* reach end of unsorted candidates list */
123007 +    while ( (matchIndex > unsortLimit)
123008 +         && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
123009 +         && (nbCandidates > 1) ) {
123010 +        DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
123011 +                    matchIndex);
123012 +        *unsortedMark = previousCandidate;  /* the unsortedMark becomes a reversed chain, to move up back to original position */
123013 +        previousCandidate = matchIndex;
123014 +        matchIndex = *nextCandidate;
123015 +        nextCandidate = bt + 2*(matchIndex&btMask);
123016 +        unsortedMark = bt + 2*(matchIndex&btMask) + 1;
123017 +        nbCandidates --;
123018 +    }
123020 +    /* nullify last candidate if it's still unsorted
123021 +     * simplification, detrimental to compression ratio, beneficial for speed */
123022 +    if ( (matchIndex > unsortLimit)
123023 +      && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
123024 +        DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
123025 +                    matchIndex);
123026 +        *nextCandidate = *unsortedMark = 0;
123027 +    }
123029 +    /* batch sort stacked candidates */
123030 +    matchIndex = previousCandidate;
123031 +    while (matchIndex) {  /* will end on matchIndex == 0 */
123032 +        U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
123033 +        U32 const nextCandidateIdx = *nextCandidateIdxPtr;
123034 +        ZSTD_insertDUBT1(ms, matchIndex, iend,
123035 +                         nbCandidates, unsortLimit, dictMode);
123036 +        matchIndex = nextCandidateIdx;
123037 +        nbCandidates++;
123038 +    }
123040 +    /* find longest match */
123041 +    {   size_t commonLengthSmaller = 0, commonLengthLarger = 0;
123042 +        const BYTE* const dictBase = ms->window.dictBase;
123043 +        const U32 dictLimit = ms->window.dictLimit;
123044 +        const BYTE* const dictEnd = dictBase + dictLimit;
123045 +        const BYTE* const prefixStart = base + dictLimit;
123046 +        U32* smallerPtr = bt + 2*(curr&btMask);
123047 +        U32* largerPtr  = bt + 2*(curr&btMask) + 1;
123048 +        U32 matchEndIdx = curr + 8 + 1;
123049 +        U32 dummy32;   /* to be nullified at the end */
123050 +        size_t bestLength = 0;
123052 +        matchIndex  = hashTable[h];
123053 +        hashTable[h] = curr;   /* Update Hash Table */
123055 +        while (nbCompares-- && (matchIndex > windowLow)) {
123056 +            U32* const nextPtr = bt + 2*(matchIndex & btMask);
123057 +            size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
123058 +            const BYTE* match;
123060 +            if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {
123061 +                match = base + matchIndex;
123062 +                matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
123063 +            } else {
123064 +                match = dictBase + matchIndex;
123065 +                matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
123066 +                if (matchIndex+matchLength >= dictLimit)
123067 +                    match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
123068 +            }
123070 +            if (matchLength > bestLength) {
123071 +                if (matchLength > matchEndIdx - matchIndex)
123072 +                    matchEndIdx = matchIndex + (U32)matchLength;
123073 +                if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
123074 +                    bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
123075 +                if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
123076 +                    if (dictMode == ZSTD_dictMatchState) {
123077 +                        nbCompares = 0; /* in addition to avoiding checking any
123078 +                                         * further in this loop, make sure we
123079 +                                         * skip checking in the dictionary. */
123080 +                    }
123081 +                    break;   /* drop, to guarantee consistency (miss a little bit of compression) */
123082 +                }
123083 +            }
123085 +            if (match[matchLength] < ip[matchLength]) {
123086 +                /* match is smaller than current */
123087 +                *smallerPtr = matchIndex;             /* update smaller idx */
123088 +                commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
123089 +                if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
123090 +                smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
123091 +                matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
123092 +            } else {
123093 +                /* match is larger than current */
123094 +                *largerPtr = matchIndex;
123095 +                commonLengthLarger = matchLength;
123096 +                if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
123097 +                largerPtr = nextPtr;
123098 +                matchIndex = nextPtr[0];
123099 +        }   }
123101 +        *smallerPtr = *largerPtr = 0;
123103 +        if (dictMode == ZSTD_dictMatchState && nbCompares) {
123104 +            bestLength = ZSTD_DUBT_findBetterDictMatch(
123105 +                    ms, ip, iend,
123106 +                    offsetPtr, bestLength, nbCompares,
123107 +                    mls, dictMode);
123108 +        }
123110 +        assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */
123111 +        ms->nextToUpdate = matchEndIdx - 8;   /* skip repetitive patterns */
123112 +        if (bestLength >= MINMATCH) {
123113 +            U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
123114 +            DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
123115 +                        curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
123116 +        }
123117 +        return bestLength;
123118 +    }
123122 +/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
123123 +FORCE_INLINE_TEMPLATE size_t
123124 +ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
123125 +                const BYTE* const ip, const BYTE* const iLimit,
123126 +                      size_t* offsetPtr,
123127 +                const U32 mls /* template */,
123128 +                const ZSTD_dictMode_e dictMode)
123130 +    DEBUGLOG(7, "ZSTD_BtFindBestMatch");
123131 +    if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
123132 +    ZSTD_updateDUBT(ms, ip, iLimit, mls);
123133 +    return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
123137 +static size_t
123138 +ZSTD_BtFindBestMatch_selectMLS (  ZSTD_matchState_t* ms,
123139 +                            const BYTE* ip, const BYTE* const iLimit,
123140 +                                  size_t* offsetPtr)
123142 +    switch(ms->cParams.minMatch)
123143 +    {
123144 +    default : /* includes case 3 */
123145 +    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
123146 +    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
123147 +    case 7 :
123148 +    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
123149 +    }
123153 +static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (
123154 +                        ZSTD_matchState_t* ms,
123155 +                        const BYTE* ip, const BYTE* const iLimit,
123156 +                        size_t* offsetPtr)
123158 +    switch(ms->cParams.minMatch)
123159 +    {
123160 +    default : /* includes case 3 */
123161 +    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
123162 +    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
123163 +    case 7 :
123164 +    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
123165 +    }
123169 +static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
123170 +                        ZSTD_matchState_t* ms,
123171 +                        const BYTE* ip, const BYTE* const iLimit,
123172 +                        size_t* offsetPtr)
123174 +    switch(ms->cParams.minMatch)
123175 +    {
123176 +    default : /* includes case 3 */
123177 +    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
123178 +    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
123179 +    case 7 :
123180 +    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
123181 +    }
123186 +/* *********************************
123187 +*  Hash Chain
123188 +***********************************/
123189 +#define NEXT_IN_CHAIN(d, mask)   chainTable[(d) & (mask)]
123191 +/* Update chains up to ip (excluded)
123192 +   Assumption : always within prefix (i.e. not within extDict) */
123193 +FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
123194 +                        ZSTD_matchState_t* ms,
123195 +                        const ZSTD_compressionParameters* const cParams,
123196 +                        const BYTE* ip, U32 const mls)
123198 +    U32* const hashTable  = ms->hashTable;
123199 +    const U32 hashLog = cParams->hashLog;
123200 +    U32* const chainTable = ms->chainTable;
123201 +    const U32 chainMask = (1 << cParams->chainLog) - 1;
123202 +    const BYTE* const base = ms->window.base;
123203 +    const U32 target = (U32)(ip - base);
123204 +    U32 idx = ms->nextToUpdate;
123206 +    while(idx < target) { /* catch up */
123207 +        size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
123208 +        NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
123209 +        hashTable[h] = idx;
123210 +        idx++;
123211 +    }
123213 +    ms->nextToUpdate = target;
123214 +    return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
123217 +U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
123218 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
123219 +    return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
123222 +void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)
123224 +    const BYTE* const base = ms->window.base;
123225 +    U32 const target = (U32)(ip - base);
123226 +    U32* const hashTable = ms->hashTable;
123227 +    U32* const chainTable = ms->chainTable;
123228 +    U32 const chainSize = 1 << ms->cParams.chainLog;
123229 +    U32 idx = ms->nextToUpdate;
123230 +    U32 const minChain = chainSize < target ? target - chainSize : idx;
123231 +    U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG;
123232 +    U32 const cacheSize = bucketSize - 1;
123233 +    U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize;
123234 +    U32 const chainLimit = chainAttempts > 255 ? 255 : chainAttempts;
123236 +    /* We know the hashtable is oversized by a factor of `bucketSize`.
123237 +     * We are going to temporarily pretend `bucketSize == 1`, keeping only a
123238 +     * single entry. We will use the rest of the space to construct a temporary
123239 +     * chaintable.
123240 +     */
123241 +    U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
123242 +    U32* const tmpHashTable = hashTable;
123243 +    U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog);
123244 +    U32 const tmpChainSize = ((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog;
123245 +    U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx;
123247 +    U32 hashIdx;
123249 +    assert(ms->cParams.chainLog <= 24);
123250 +    assert(ms->cParams.hashLog >= ms->cParams.chainLog);
123251 +    assert(idx != 0);
123252 +    assert(tmpMinChain <= minChain);
123254 +    /* fill conventional hash table and conventional chain table */
123255 +    for ( ; idx < target; idx++) {
123256 +        U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch);
123257 +        if (idx >= tmpMinChain) {
123258 +            tmpChainTable[idx - tmpMinChain] = hashTable[h];
123259 +        }
123260 +        tmpHashTable[h] = idx;
123261 +    }
123263 +    /* sort chains into ddss chain table */
123264 +    {
123265 +        U32 chainPos = 0;
123266 +        for (hashIdx = 0; hashIdx < (1U << hashLog); hashIdx++) {
123267 +            U32 count;
123268 +            U32 countBeyondMinChain = 0;
123269 +            U32 i = tmpHashTable[hashIdx];
123270 +            for (count = 0; i >= tmpMinChain && count < cacheSize; count++) {
123271 +                /* skip through the chain to the first position that won't be
123272 +                 * in the hash cache bucket */
123273 +                if (i < minChain) {
123274 +                    countBeyondMinChain++;
123275 +                }
123276 +                i = tmpChainTable[i - tmpMinChain];
123277 +            }
123278 +            if (count == cacheSize) {
123279 +                for (count = 0; count < chainLimit;) {
123280 +                    if (i < minChain) {
123281 +                        if (!i || countBeyondMinChain++ > cacheSize) {
123282 +                            /* only allow pulling `cacheSize` number of entries
123283 +                             * into the cache or chainTable beyond `minChain`,
123284 +                             * to replace the entries pulled out of the
123285 +                             * chainTable into the cache. This lets us reach
123286 +                             * back further without increasing the total number
123287 +                             * of entries in the chainTable, guaranteeing the
123288 +                             * DDSS chain table will fit into the space
123289 +                             * allocated for the regular one. */
123290 +                            break;
123291 +                        }
123292 +                    }
123293 +                    chainTable[chainPos++] = i;
123294 +                    count++;
123295 +                    if (i < tmpMinChain) {
123296 +                        break;
123297 +                    }
123298 +                    i = tmpChainTable[i - tmpMinChain];
123299 +                }
123300 +            } else {
123301 +                count = 0;
123302 +            }
123303 +            if (count) {
123304 +                tmpHashTable[hashIdx] = ((chainPos - count) << 8) + count;
123305 +            } else {
123306 +                tmpHashTable[hashIdx] = 0;
123307 +            }
123308 +        }
123309 +        assert(chainPos <= chainSize); /* I believe this is guaranteed... */
123310 +    }
123312 +    /* move chain pointers into the last entry of each hash bucket */
123313 +    for (hashIdx = (1 << hashLog); hashIdx; ) {
123314 +        U32 const bucketIdx = --hashIdx << ZSTD_LAZY_DDSS_BUCKET_LOG;
123315 +        U32 const chainPackedPointer = tmpHashTable[hashIdx];
123316 +        U32 i;
123317 +        for (i = 0; i < cacheSize; i++) {
123318 +            hashTable[bucketIdx + i] = 0;
123319 +        }
123320 +        hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer;
123321 +    }
123323 +    /* fill the buckets of the hash table */
123324 +    for (idx = ms->nextToUpdate; idx < target; idx++) {
123325 +        U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch)
123326 +                   << ZSTD_LAZY_DDSS_BUCKET_LOG;
123327 +        U32 i;
123328 +        /* Shift hash cache down 1. */
123329 +        for (i = cacheSize - 1; i; i--)
123330 +            hashTable[h + i] = hashTable[h + i - 1];
123331 +        hashTable[h] = idx;
123332 +    }
123334 +    ms->nextToUpdate = target;
123338 +/* inlining is important to hardwire a hot branch (template emulation) */
123339 +FORCE_INLINE_TEMPLATE
123340 +size_t ZSTD_HcFindBestMatch_generic (
123341 +                        ZSTD_matchState_t* ms,
123342 +                        const BYTE* const ip, const BYTE* const iLimit,
123343 +                        size_t* offsetPtr,
123344 +                        const U32 mls, const ZSTD_dictMode_e dictMode)
123346 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
123347 +    U32* const chainTable = ms->chainTable;
123348 +    const U32 chainSize = (1 << cParams->chainLog);
123349 +    const U32 chainMask = chainSize-1;
123350 +    const BYTE* const base = ms->window.base;
123351 +    const BYTE* const dictBase = ms->window.dictBase;
123352 +    const U32 dictLimit = ms->window.dictLimit;
123353 +    const BYTE* const prefixStart = base + dictLimit;
123354 +    const BYTE* const dictEnd = dictBase + dictLimit;
123355 +    const U32 curr = (U32)(ip-base);
123356 +    const U32 maxDistance = 1U << cParams->windowLog;
123357 +    const U32 lowestValid = ms->window.lowLimit;
123358 +    const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
123359 +    const U32 isDictionary = (ms->loadedDictEnd != 0);
123360 +    const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
123361 +    const U32 minChain = curr > chainSize ? curr - chainSize : 0;
123362 +    U32 nbAttempts = 1U << cParams->searchLog;
123363 +    size_t ml=4-1;
123365 +    const ZSTD_matchState_t* const dms = ms->dictMatchState;
123366 +    const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch
123367 +                         ? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
123368 +    const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch
123369 +                        ? ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
123371 +    U32 matchIndex;
123373 +    if (dictMode == ZSTD_dedicatedDictSearch) {
123374 +        const U32* entry = &dms->hashTable[ddsIdx];
123375 +        PREFETCH_L1(entry);
123376 +    }
123378 +    /* HC4 match finder */
123379 +    matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
123381 +    for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) {
123382 +        size_t currentMl=0;
123383 +        if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
123384 +            const BYTE* const match = base + matchIndex;
123385 +            assert(matchIndex >= dictLimit);   /* ensures this is true if dictMode != ZSTD_extDict */
123386 +            if (match[ml] == ip[ml])   /* potentially better */
123387 +                currentMl = ZSTD_count(ip, match, iLimit);
123388 +        } else {
123389 +            const BYTE* const match = dictBase + matchIndex;
123390 +            assert(match+4 <= dictEnd);
123391 +            if (MEM_read32(match) == MEM_read32(ip))   /* assumption : matchIndex <= dictLimit-4 (by table construction) */
123392 +                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
123393 +        }
123395 +        /* save best solution */
123396 +        if (currentMl > ml) {
123397 +            ml = currentMl;
123398 +            *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
123399 +            if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
123400 +        }
123402 +        if (matchIndex <= minChain) break;
123403 +        matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
123404 +    }
123406 +    if (dictMode == ZSTD_dedicatedDictSearch) {
123407 +        const U32 ddsLowestIndex  = dms->window.dictLimit;
123408 +        const BYTE* const ddsBase = dms->window.base;
123409 +        const BYTE* const ddsEnd  = dms->window.nextSrc;
123410 +        const U32 ddsSize         = (U32)(ddsEnd - ddsBase);
123411 +        const U32 ddsIndexDelta   = dictLimit - ddsSize;
123412 +        const U32 bucketSize      = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);
123413 +        const U32 bucketLimit     = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;
123414 +        U32 ddsAttempt;
123416 +        for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {
123417 +            PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);
123418 +        }
123420 +        {
123421 +            U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
123422 +            U32 const chainIndex = chainPackedPointer >> 8;
123424 +            PREFETCH_L1(&dms->chainTable[chainIndex]);
123425 +        }
123427 +        for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {
123428 +            size_t currentMl=0;
123429 +            const BYTE* match;
123430 +            matchIndex = dms->hashTable[ddsIdx + ddsAttempt];
123431 +            match = ddsBase + matchIndex;
123433 +            if (!matchIndex) {
123434 +                return ml;
123435 +            }
123437 +            /* guaranteed by table construction */
123438 +            (void)ddsLowestIndex;
123439 +            assert(matchIndex >= ddsLowestIndex);
123440 +            assert(match+4 <= ddsEnd);
123441 +            if (MEM_read32(match) == MEM_read32(ip)) {
123442 +                /* assumption : matchIndex <= dictLimit-4 (by table construction) */
123443 +                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
123444 +            }
123446 +            /* save best solution */
123447 +            if (currentMl > ml) {
123448 +                ml = currentMl;
123449 +                *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
123450 +                if (ip+currentMl == iLimit) {
123451 +                    /* best possible, avoids read overflow on next attempt */
123452 +                    return ml;
123453 +                }
123454 +            }
123455 +        }
123457 +        {
123458 +            U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
123459 +            U32 chainIndex = chainPackedPointer >> 8;
123460 +            U32 const chainLength = chainPackedPointer & 0xFF;
123461 +            U32 const chainAttempts = nbAttempts - ddsAttempt;
123462 +            U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;
123463 +            U32 chainAttempt;
123465 +            for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {
123466 +                PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);
123467 +            }
123469 +            for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {
123470 +                size_t currentMl=0;
123471 +                const BYTE* match;
123472 +                matchIndex = dms->chainTable[chainIndex];
123473 +                match = ddsBase + matchIndex;
123475 +                /* guaranteed by table construction */
123476 +                assert(matchIndex >= ddsLowestIndex);
123477 +                assert(match+4 <= ddsEnd);
123478 +                if (MEM_read32(match) == MEM_read32(ip)) {
123479 +                    /* assumption : matchIndex <= dictLimit-4 (by table construction) */
123480 +                    currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
123481 +                }
123483 +                /* save best solution */
123484 +                if (currentMl > ml) {
123485 +                    ml = currentMl;
123486 +                    *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
123487 +                    if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
123488 +                }
123489 +            }
123490 +        }
123491 +    } else if (dictMode == ZSTD_dictMatchState) {
123492 +        const U32* const dmsChainTable = dms->chainTable;
123493 +        const U32 dmsChainSize         = (1 << dms->cParams.chainLog);
123494 +        const U32 dmsChainMask         = dmsChainSize - 1;
123495 +        const U32 dmsLowestIndex       = dms->window.dictLimit;
123496 +        const BYTE* const dmsBase      = dms->window.base;
123497 +        const BYTE* const dmsEnd       = dms->window.nextSrc;
123498 +        const U32 dmsSize              = (U32)(dmsEnd - dmsBase);
123499 +        const U32 dmsIndexDelta        = dictLimit - dmsSize;
123500 +        const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;
123502 +        matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
123504 +        for ( ; (matchIndex>=dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
123505 +            size_t currentMl=0;
123506 +            const BYTE* const match = dmsBase + matchIndex;
123507 +            assert(match+4 <= dmsEnd);
123508 +            if (MEM_read32(match) == MEM_read32(ip))   /* assumption : matchIndex <= dictLimit-4 (by table construction) */
123509 +                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
123511 +            /* save best solution */
123512 +            if (currentMl > ml) {
123513 +                ml = currentMl;
123514 +                *offsetPtr = curr - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
123515 +                if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
123516 +            }
123518 +            if (matchIndex <= dmsMinChain) break;
123520 +            matchIndex = dmsChainTable[matchIndex & dmsChainMask];
123521 +        }
123522 +    }
123524 +    return ml;
123528 +FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
123529 +                        ZSTD_matchState_t* ms,
123530 +                        const BYTE* ip, const BYTE* const iLimit,
123531 +                        size_t* offsetPtr)
123533 +    switch(ms->cParams.minMatch)
123534 +    {
123535 +    default : /* includes case 3 */
123536 +    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
123537 +    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
123538 +    case 7 :
123539 +    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
123540 +    }
123544 +static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (
123545 +                        ZSTD_matchState_t* ms,
123546 +                        const BYTE* ip, const BYTE* const iLimit,
123547 +                        size_t* offsetPtr)
123549 +    switch(ms->cParams.minMatch)
123550 +    {
123551 +    default : /* includes case 3 */
123552 +    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
123553 +    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
123554 +    case 7 :
123555 +    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
123556 +    }
123560 +static size_t ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS (
123561 +                        ZSTD_matchState_t* ms,
123562 +                        const BYTE* ip, const BYTE* const iLimit,
123563 +                        size_t* offsetPtr)
123565 +    switch(ms->cParams.minMatch)
123566 +    {
123567 +    default : /* includes case 3 */
123568 +    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dedicatedDictSearch);
123569 +    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dedicatedDictSearch);
123570 +    case 7 :
123571 +    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dedicatedDictSearch);
123572 +    }
123576 +FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
123577 +                        ZSTD_matchState_t* ms,
123578 +                        const BYTE* ip, const BYTE* const iLimit,
123579 +                        size_t* offsetPtr)
123581 +    switch(ms->cParams.minMatch)
123582 +    {
123583 +    default : /* includes case 3 */
123584 +    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
123585 +    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
123586 +    case 7 :
123587 +    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
123588 +    }
123592 +/* *******************************
123593 +*  Common parser - lazy strategy
123594 +*********************************/
123595 +typedef enum { search_hashChain, search_binaryTree } searchMethod_e;
123597 +FORCE_INLINE_TEMPLATE size_t
123598 +ZSTD_compressBlock_lazy_generic(
123599 +                        ZSTD_matchState_t* ms, seqStore_t* seqStore,
123600 +                        U32 rep[ZSTD_REP_NUM],
123601 +                        const void* src, size_t srcSize,
123602 +                        const searchMethod_e searchMethod, const U32 depth,
123603 +                        ZSTD_dictMode_e const dictMode)
123605 +    const BYTE* const istart = (const BYTE*)src;
123606 +    const BYTE* ip = istart;
123607 +    const BYTE* anchor = istart;
123608 +    const BYTE* const iend = istart + srcSize;
123609 +    const BYTE* const ilimit = iend - 8;
123610 +    const BYTE* const base = ms->window.base;
123611 +    const U32 prefixLowestIndex = ms->window.dictLimit;
123612 +    const BYTE* const prefixLowest = base + prefixLowestIndex;
123614 +    typedef size_t (*searchMax_f)(
123615 +                        ZSTD_matchState_t* ms,
123616 +                        const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
123618 +    /**
123619 +     * This table is indexed first by the four ZSTD_dictMode_e values, and then
123620 +     * by the two searchMethod_e values. NULLs are placed for configurations
123621 +     * that should never occur (extDict modes go to the other implementation
123622 +     * below and there is no DDSS for binary tree search yet).
123623 +     */
123624 +    const searchMax_f searchFuncs[4][2] = {
123625 +        {
123626 +            ZSTD_HcFindBestMatch_selectMLS,
123627 +            ZSTD_BtFindBestMatch_selectMLS
123628 +        },
123629 +        {
123630 +            NULL,
123631 +            NULL
123632 +        },
123633 +        {
123634 +            ZSTD_HcFindBestMatch_dictMatchState_selectMLS,
123635 +            ZSTD_BtFindBestMatch_dictMatchState_selectMLS
123636 +        },
123637 +        {
123638 +            ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS,
123639 +            NULL
123640 +        }
123641 +    };
123643 +    searchMax_f const searchMax = searchFuncs[dictMode][searchMethod == search_binaryTree];
123644 +    U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
123646 +    const int isDMS = dictMode == ZSTD_dictMatchState;
123647 +    const int isDDS = dictMode == ZSTD_dedicatedDictSearch;
123648 +    const int isDxS = isDMS || isDDS;
123649 +    const ZSTD_matchState_t* const dms = ms->dictMatchState;
123650 +    const U32 dictLowestIndex      = isDxS ? dms->window.dictLimit : 0;
123651 +    const BYTE* const dictBase     = isDxS ? dms->window.base : NULL;
123652 +    const BYTE* const dictLowest   = isDxS ? dictBase + dictLowestIndex : NULL;
123653 +    const BYTE* const dictEnd      = isDxS ? dms->window.nextSrc : NULL;
123654 +    const U32 dictIndexDelta       = isDxS ?
123655 +                                     prefixLowestIndex - (U32)(dictEnd - dictBase) :
123656 +                                     0;
123657 +    const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest));
123659 +    assert(searchMax != NULL);
123661 +    DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u)", (U32)dictMode);
123663 +    /* init */
123664 +    ip += (dictAndPrefixLength == 0);
123665 +    if (dictMode == ZSTD_noDict) {
123666 +        U32 const curr = (U32)(ip - base);
123667 +        U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog);
123668 +        U32 const maxRep = curr - windowLow;
123669 +        if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
123670 +        if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
123671 +    }
123672 +    if (isDxS) {
123673 +        /* dictMatchState repCode checks don't currently handle repCode == 0
123674 +         * disabling. */
123675 +        assert(offset_1 <= dictAndPrefixLength);
123676 +        assert(offset_2 <= dictAndPrefixLength);
123677 +    }
123679 +    /* Match Loop */
123680 +#if defined(__x86_64__)
123681 +    /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
123682 +     * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
123683 +     */
123684 +    __asm__(".p2align 5");
123685 +#endif
123686 +    while (ip < ilimit) {
123687 +        size_t matchLength=0;
123688 +        size_t offset=0;
123689 +        const BYTE* start=ip+1;
123691 +        /* check repCode */
123692 +        if (isDxS) {
123693 +            const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
123694 +            const BYTE* repMatch = ((dictMode == ZSTD_dictMatchState || dictMode == ZSTD_dedicatedDictSearch)
123695 +                                && repIndex < prefixLowestIndex) ?
123696 +                                   dictBase + (repIndex - dictIndexDelta) :
123697 +                                   base + repIndex;
123698 +            if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
123699 +                && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
123700 +                const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
123701 +                matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
123702 +                if (depth==0) goto _storeSequence;
123703 +            }
123704 +        }
123705 +        if ( dictMode == ZSTD_noDict
123706 +          && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
123707 +            matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
123708 +            if (depth==0) goto _storeSequence;
123709 +        }
123711 +        /* first search (depth 0) */
123712 +        {   size_t offsetFound = 999999999;
123713 +            size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
123714 +            if (ml2 > matchLength)
123715 +                matchLength = ml2, start = ip, offset=offsetFound;
123716 +        }
123718 +        if (matchLength < 4) {
123719 +            ip += ((ip-anchor) >> kSearchStrength) + 1;   /* jump faster over incompressible sections */
123720 +            continue;
123721 +        }
123723 +        /* let's try to find a better solution */
123724 +        if (depth>=1)
123725 +        while (ip<ilimit) {
123726 +            ip ++;
123727 +            if ( (dictMode == ZSTD_noDict)
123728 +              && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
123729 +                size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
123730 +                int const gain2 = (int)(mlRep * 3);
123731 +                int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
123732 +                if ((mlRep >= 4) && (gain2 > gain1))
123733 +                    matchLength = mlRep, offset = 0, start = ip;
123734 +            }
123735 +            if (isDxS) {
123736 +                const U32 repIndex = (U32)(ip - base) - offset_1;
123737 +                const BYTE* repMatch = repIndex < prefixLowestIndex ?
123738 +                               dictBase + (repIndex - dictIndexDelta) :
123739 +                               base + repIndex;
123740 +                if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
123741 +                    && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
123742 +                    const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
123743 +                    size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
123744 +                    int const gain2 = (int)(mlRep * 3);
123745 +                    int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
123746 +                    if ((mlRep >= 4) && (gain2 > gain1))
123747 +                        matchLength = mlRep, offset = 0, start = ip;
123748 +                }
123749 +            }
123750 +            {   size_t offset2=999999999;
123751 +                size_t const ml2 = searchMax(ms, ip, iend, &offset2);
123752 +                int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
123753 +                int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
123754 +                if ((ml2 >= 4) && (gain2 > gain1)) {
123755 +                    matchLength = ml2, offset = offset2, start = ip;
123756 +                    continue;   /* search a better one */
123757 +            }   }
123759 +            /* let's find an even better one */
123760 +            if ((depth==2) && (ip<ilimit)) {
123761 +                ip ++;
123762 +                if ( (dictMode == ZSTD_noDict)
123763 +                  && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
123764 +                    size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
123765 +                    int const gain2 = (int)(mlRep * 4);
123766 +                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
123767 +                    if ((mlRep >= 4) && (gain2 > gain1))
123768 +                        matchLength = mlRep, offset = 0, start = ip;
123769 +                }
123770 +                if (isDxS) {
123771 +                    const U32 repIndex = (U32)(ip - base) - offset_1;
123772 +                    const BYTE* repMatch = repIndex < prefixLowestIndex ?
123773 +                                   dictBase + (repIndex - dictIndexDelta) :
123774 +                                   base + repIndex;
123775 +                    if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
123776 +                        && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
123777 +                        const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
123778 +                        size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
123779 +                        int const gain2 = (int)(mlRep * 4);
123780 +                        int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
123781 +                        if ((mlRep >= 4) && (gain2 > gain1))
123782 +                            matchLength = mlRep, offset = 0, start = ip;
123783 +                    }
123784 +                }
123785 +                {   size_t offset2=999999999;
123786 +                    size_t const ml2 = searchMax(ms, ip, iend, &offset2);
123787 +                    int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
123788 +                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
123789 +                    if ((ml2 >= 4) && (gain2 > gain1)) {
123790 +                        matchLength = ml2, offset = offset2, start = ip;
123791 +                        continue;
123792 +            }   }   }
123793 +            break;  /* nothing found : store previous solution */
123794 +        }
123796 +        /* NOTE:
123797 +         * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
123798 +         * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
123799 +         * overflows the pointer, which is undefined behavior.
123800 +         */
123801 +        /* catch up */
123802 +        if (offset) {
123803 +            if (dictMode == ZSTD_noDict) {
123804 +                while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest))
123805 +                     && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) )  /* only search for offset within prefix */
123806 +                    { start--; matchLength++; }
123807 +            }
123808 +            if (isDxS) {
123809 +                U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
123810 +                const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
123811 +                const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
123812 +                while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; }  /* catch up */
123813 +            }
123814 +            offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
123815 +        }
123816 +        /* store sequence */
123817 +_storeSequence:
123818 +        {   size_t const litLength = start - anchor;
123819 +            ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
123820 +            anchor = ip = start + matchLength;
123821 +        }
123823 +        /* check immediate repcode */
123824 +        if (isDxS) {
123825 +            while (ip <= ilimit) {
123826 +                U32 const current2 = (U32)(ip-base);
123827 +                U32 const repIndex = current2 - offset_2;
123828 +                const BYTE* repMatch = repIndex < prefixLowestIndex ?
123829 +                        dictBase - dictIndexDelta + repIndex :
123830 +                        base + repIndex;
123831 +                if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
123832 +                   && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
123833 +                    const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
123834 +                    matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
123835 +                    offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset_2 <=> offset_1 */
123836 +                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
123837 +                    ip += matchLength;
123838 +                    anchor = ip;
123839 +                    continue;
123840 +                }
123841 +                break;
123842 +            }
123843 +        }
123845 +        if (dictMode == ZSTD_noDict) {
123846 +            while ( ((ip <= ilimit) & (offset_2>0))
123847 +                 && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
123848 +                /* store sequence */
123849 +                matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
123850 +                offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
123851 +                ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
123852 +                ip += matchLength;
123853 +                anchor = ip;
123854 +                continue;   /* faster when present ... (?) */
123855 +    }   }   }
123857 +    /* Save reps for next block */
123858 +    rep[0] = offset_1 ? offset_1 : savedOffset;
123859 +    rep[1] = offset_2 ? offset_2 : savedOffset;
123861 +    /* Return the last literals size */
123862 +    return (size_t)(iend - anchor);
123866 +size_t ZSTD_compressBlock_btlazy2(
123867 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
123868 +        void const* src, size_t srcSize)
123870 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
123873 +size_t ZSTD_compressBlock_lazy2(
123874 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
123875 +        void const* src, size_t srcSize)
123877 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
123880 +size_t ZSTD_compressBlock_lazy(
123881 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
123882 +        void const* src, size_t srcSize)
123884 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
123887 +size_t ZSTD_compressBlock_greedy(
123888 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
123889 +        void const* src, size_t srcSize)
123891 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
123894 +size_t ZSTD_compressBlock_btlazy2_dictMatchState(
123895 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
123896 +        void const* src, size_t srcSize)
123898 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
123901 +size_t ZSTD_compressBlock_lazy2_dictMatchState(
123902 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
123903 +        void const* src, size_t srcSize)
123905 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
123908 +size_t ZSTD_compressBlock_lazy_dictMatchState(
123909 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
123910 +        void const* src, size_t srcSize)
123912 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
123915 +size_t ZSTD_compressBlock_greedy_dictMatchState(
123916 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
123917 +        void const* src, size_t srcSize)
123919 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
123923 +size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
123924 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
123925 +        void const* src, size_t srcSize)
123927 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
123930 +size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
123931 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
123932 +        void const* src, size_t srcSize)
123934 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
123937 +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
123938 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
123939 +        void const* src, size_t srcSize)
123941 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
123945 +FORCE_INLINE_TEMPLATE
123946 +size_t ZSTD_compressBlock_lazy_extDict_generic(
123947 +                        ZSTD_matchState_t* ms, seqStore_t* seqStore,
123948 +                        U32 rep[ZSTD_REP_NUM],
123949 +                        const void* src, size_t srcSize,
123950 +                        const searchMethod_e searchMethod, const U32 depth)
123952 +    const BYTE* const istart = (const BYTE*)src;
123953 +    const BYTE* ip = istart;
123954 +    const BYTE* anchor = istart;
123955 +    const BYTE* const iend = istart + srcSize;
123956 +    const BYTE* const ilimit = iend - 8;
123957 +    const BYTE* const base = ms->window.base;
123958 +    const U32 dictLimit = ms->window.dictLimit;
123959 +    const BYTE* const prefixStart = base + dictLimit;
123960 +    const BYTE* const dictBase = ms->window.dictBase;
123961 +    const BYTE* const dictEnd  = dictBase + dictLimit;
123962 +    const BYTE* const dictStart  = dictBase + ms->window.lowLimit;
123963 +    const U32 windowLog = ms->cParams.windowLog;
123965 +    typedef size_t (*searchMax_f)(
123966 +                        ZSTD_matchState_t* ms,
123967 +                        const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
123968 +    searchMax_f searchMax = searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
123970 +    U32 offset_1 = rep[0], offset_2 = rep[1];
123972 +    DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic");
123974 +    /* init */
123975 +    ip += (ip == prefixStart);
123977 +    /* Match Loop */
123978 +#if defined(__x86_64__)
123979 +    /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
123980 +     * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
123981 +     */
123982 +    __asm__(".p2align 5");
123983 +#endif
123984 +    while (ip < ilimit) {
123985 +        size_t matchLength=0;
123986 +        size_t offset=0;
123987 +        const BYTE* start=ip+1;
123988 +        U32 curr = (U32)(ip-base);
123990 +        /* check repCode */
123991 +        {   const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr+1, windowLog);
123992 +            const U32 repIndex = (U32)(curr+1 - offset_1);
123993 +            const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
123994 +            const BYTE* const repMatch = repBase + repIndex;
123995 +            if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow))   /* intentional overflow */
123996 +            if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
123997 +                /* repcode detected we should take it */
123998 +                const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
123999 +                matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
124000 +                if (depth==0) goto _storeSequence;
124001 +        }   }
124003 +        /* first search (depth 0) */
124004 +        {   size_t offsetFound = 999999999;
124005 +            size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
124006 +            if (ml2 > matchLength)
124007 +                matchLength = ml2, start = ip, offset=offsetFound;
124008 +        }
124010 +         if (matchLength < 4) {
124011 +            ip += ((ip-anchor) >> kSearchStrength) + 1;   /* jump faster over incompressible sections */
124012 +            continue;
124013 +        }
124015 +        /* let's try to find a better solution */
124016 +        if (depth>=1)
124017 +        while (ip<ilimit) {
124018 +            ip ++;
124019 +            curr++;
124020 +            /* check repCode */
124021 +            if (offset) {
124022 +                const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
124023 +                const U32 repIndex = (U32)(curr - offset_1);
124024 +                const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
124025 +                const BYTE* const repMatch = repBase + repIndex;
124026 +                if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow))  /* intentional overflow */
124027 +                if (MEM_read32(ip) == MEM_read32(repMatch)) {
124028 +                    /* repcode detected */
124029 +                    const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
124030 +                    size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
124031 +                    int const gain2 = (int)(repLength * 3);
124032 +                    int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
124033 +                    if ((repLength >= 4) && (gain2 > gain1))
124034 +                        matchLength = repLength, offset = 0, start = ip;
124035 +            }   }
124037 +            /* search match, depth 1 */
124038 +            {   size_t offset2=999999999;
124039 +                size_t const ml2 = searchMax(ms, ip, iend, &offset2);
124040 +                int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
124041 +                int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
124042 +                if ((ml2 >= 4) && (gain2 > gain1)) {
124043 +                    matchLength = ml2, offset = offset2, start = ip;
124044 +                    continue;   /* search a better one */
124045 +            }   }
124047 +            /* let's find an even better one */
124048 +            if ((depth==2) && (ip<ilimit)) {
124049 +                ip ++;
124050 +                curr++;
124051 +                /* check repCode */
124052 +                if (offset) {
124053 +                    const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
124054 +                    const U32 repIndex = (U32)(curr - offset_1);
124055 +                    const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
124056 +                    const BYTE* const repMatch = repBase + repIndex;
124057 +                    if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow))  /* intentional overflow */
124058 +                    if (MEM_read32(ip) == MEM_read32(repMatch)) {
124059 +                        /* repcode detected */
124060 +                        const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
124061 +                        size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
124062 +                        int const gain2 = (int)(repLength * 4);
124063 +                        int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
124064 +                        if ((repLength >= 4) && (gain2 > gain1))
124065 +                            matchLength = repLength, offset = 0, start = ip;
124066 +                }   }
124068 +                /* search match, depth 2 */
124069 +                {   size_t offset2=999999999;
124070 +                    size_t const ml2 = searchMax(ms, ip, iend, &offset2);
124071 +                    int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
124072 +                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
124073 +                    if ((ml2 >= 4) && (gain2 > gain1)) {
124074 +                        matchLength = ml2, offset = offset2, start = ip;
124075 +                        continue;
124076 +            }   }   }
124077 +            break;  /* nothing found : store previous solution */
124078 +        }
124080 +        /* catch up */
124081 +        if (offset) {
124082 +            U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
124083 +            const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
124084 +            const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
124085 +            while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; }  /* catch up */
124086 +            offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
124087 +        }
124089 +        /* store sequence */
124090 +_storeSequence:
124091 +        {   size_t const litLength = start - anchor;
124092 +            ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
124093 +            anchor = ip = start + matchLength;
124094 +        }
124096 +        /* check immediate repcode */
124097 +        while (ip <= ilimit) {
124098 +            const U32 repCurrent = (U32)(ip-base);
124099 +            const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog);
124100 +            const U32 repIndex = repCurrent - offset_2;
124101 +            const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
124102 +            const BYTE* const repMatch = repBase + repIndex;
124103 +            if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow))  /* intentional overflow */
124104 +            if (MEM_read32(ip) == MEM_read32(repMatch)) {
124105 +                /* repcode detected we should take it */
124106 +                const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
124107 +                matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
124108 +                offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset history */
124109 +                ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
124110 +                ip += matchLength;
124111 +                anchor = ip;
124112 +                continue;   /* faster when present ... (?) */
124113 +            }
124114 +            break;
124115 +    }   }
124117 +    /* Save reps for next block */
124118 +    rep[0] = offset_1;
124119 +    rep[1] = offset_2;
124121 +    /* Return the last literals size */
124122 +    return (size_t)(iend - anchor);
124126 +size_t ZSTD_compressBlock_greedy_extDict(
124127 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124128 +        void const* src, size_t srcSize)
124130 +    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
124133 +size_t ZSTD_compressBlock_lazy_extDict(
124134 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124135 +        void const* src, size_t srcSize)
124138 +    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
124141 +size_t ZSTD_compressBlock_lazy2_extDict(
124142 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124143 +        void const* src, size_t srcSize)
124146 +    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
124149 +size_t ZSTD_compressBlock_btlazy2_extDict(
124150 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124151 +        void const* src, size_t srcSize)
124154 +    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
124156 diff --git a/lib/zstd/compress/zstd_lazy.h b/lib/zstd/compress/zstd_lazy.h
124157 new file mode 100644
124158 index 000000000000..1fb7621e6a88
124159 --- /dev/null
124160 +++ b/lib/zstd/compress/zstd_lazy.h
124161 @@ -0,0 +1,81 @@
124163 + * Copyright (c) Yann Collet, Facebook, Inc.
124164 + * All rights reserved.
124166 + * This source code is licensed under both the BSD-style license (found in the
124167 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
124168 + * in the COPYING file in the root directory of this source tree).
124169 + * You may select, at your option, one of the above-listed licenses.
124170 + */
124172 +#ifndef ZSTD_LAZY_H
124173 +#define ZSTD_LAZY_H
124176 +#include "zstd_compress_internal.h"
124179 + * Dedicated Dictionary Search Structure bucket log. In the
124180 + * ZSTD_dedicatedDictSearch mode, the hashTable has
124181 + * 2 ** ZSTD_LAZY_DDSS_BUCKET_LOG entries in each bucket, rather than just
124182 + * one.
124183 + */
124184 +#define ZSTD_LAZY_DDSS_BUCKET_LOG 2
124186 +U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
124188 +void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip);
124190 +void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue);  /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
124192 +size_t ZSTD_compressBlock_btlazy2(
124193 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124194 +        void const* src, size_t srcSize);
124195 +size_t ZSTD_compressBlock_lazy2(
124196 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124197 +        void const* src, size_t srcSize);
124198 +size_t ZSTD_compressBlock_lazy(
124199 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124200 +        void const* src, size_t srcSize);
124201 +size_t ZSTD_compressBlock_greedy(
124202 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124203 +        void const* src, size_t srcSize);
124205 +size_t ZSTD_compressBlock_btlazy2_dictMatchState(
124206 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124207 +        void const* src, size_t srcSize);
124208 +size_t ZSTD_compressBlock_lazy2_dictMatchState(
124209 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124210 +        void const* src, size_t srcSize);
124211 +size_t ZSTD_compressBlock_lazy_dictMatchState(
124212 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124213 +        void const* src, size_t srcSize);
124214 +size_t ZSTD_compressBlock_greedy_dictMatchState(
124215 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124216 +        void const* src, size_t srcSize);
124218 +size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
124219 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124220 +        void const* src, size_t srcSize);
124221 +size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
124222 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124223 +        void const* src, size_t srcSize);
124224 +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
124225 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124226 +        void const* src, size_t srcSize);
124228 +size_t ZSTD_compressBlock_greedy_extDict(
124229 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124230 +        void const* src, size_t srcSize);
124231 +size_t ZSTD_compressBlock_lazy_extDict(
124232 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124233 +        void const* src, size_t srcSize);
124234 +size_t ZSTD_compressBlock_lazy2_extDict(
124235 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124236 +        void const* src, size_t srcSize);
124237 +size_t ZSTD_compressBlock_btlazy2_extDict(
124238 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124239 +        void const* src, size_t srcSize);
124242 +#endif /* ZSTD_LAZY_H */
124243 diff --git a/lib/zstd/compress/zstd_ldm.c b/lib/zstd/compress/zstd_ldm.c
124244 new file mode 100644
124245 index 000000000000..084fd24fdca8
124246 --- /dev/null
124247 +++ b/lib/zstd/compress/zstd_ldm.c
124248 @@ -0,0 +1,686 @@
124250 + * Copyright (c) Yann Collet, Facebook, Inc.
124251 + * All rights reserved.
124253 + * This source code is licensed under both the BSD-style license (found in the
124254 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
124255 + * in the COPYING file in the root directory of this source tree).
124256 + * You may select, at your option, one of the above-listed licenses.
124257 + */
124259 +#include "zstd_ldm.h"
124261 +#include "../common/debug.h"
124262 +#include <linux/xxhash.h>
124263 +#include "zstd_fast.h"          /* ZSTD_fillHashTable() */
124264 +#include "zstd_double_fast.h"   /* ZSTD_fillDoubleHashTable() */
124265 +#include "zstd_ldm_geartab.h"
124267 +#define LDM_BUCKET_SIZE_LOG 3
124268 +#define LDM_MIN_MATCH_LENGTH 64
124269 +#define LDM_HASH_RLOG 7
124271 +typedef struct {
124272 +    U64 rolling;
124273 +    U64 stopMask;
124274 +} ldmRollingHashState_t;
124276 +/** ZSTD_ldm_gear_init():
124278 + * Initializes the rolling hash state such that it will honor the
124279 + * settings in params. */
124280 +static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
124282 +    unsigned maxBitsInMask = MIN(params->minMatchLength, 64);
124283 +    unsigned hashRateLog = params->hashRateLog;
124285 +    state->rolling = ~(U32)0;
124287 +    /* The choice of the splitting criterion is subject to two conditions:
124288 +     *   1. it has to trigger on average every 2^(hashRateLog) bytes;
124289 +     *   2. ideally, it has to depend on a window of minMatchLength bytes.
124290 +     *
124291 +     * In the gear hash algorithm, bit n depends on the last n bytes;
124292 +     * so in order to obtain a good quality splitting criterion it is
124293 +     * preferable to use bits with high weight.
124294 +     *
124295 +     * To match condition 1 we use a mask with hashRateLog bits set
124296 +     * and, because of the previous remark, we make sure these bits
124297 +     * have the highest possible weight while still respecting
124298 +     * condition 2.
124299 +     */
124300 +    if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
124301 +        state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
124302 +    } else {
124303 +        /* In this degenerate case we simply honor the hash rate. */
124304 +        state->stopMask = ((U64)1 << hashRateLog) - 1;
124305 +    }
124308 +/** ZSTD_ldm_gear_feed():
124310 + * Registers in the splits array all the split points found in the first
124311 + * size bytes following the data pointer. This function terminates when
124312 + * either all the data has been processed or LDM_BATCH_SIZE splits are
124313 + * present in the splits array.
124315 + * Precondition: The splits array must not be full.
124316 + * Returns: The number of bytes processed. */
124317 +static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
124318 +                                 BYTE const* data, size_t size,
124319 +                                 size_t* splits, unsigned* numSplits)
124321 +    size_t n;
124322 +    U64 hash, mask;
124324 +    hash = state->rolling;
124325 +    mask = state->stopMask;
124326 +    n = 0;
124328 +#define GEAR_ITER_ONCE() do { \
124329 +        hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
124330 +        n += 1; \
124331 +        if (UNLIKELY((hash & mask) == 0)) { \
124332 +            splits[*numSplits] = n; \
124333 +            *numSplits += 1; \
124334 +            if (*numSplits == LDM_BATCH_SIZE) \
124335 +                goto done; \
124336 +        } \
124337 +    } while (0)
124339 +    while (n + 3 < size) {
124340 +        GEAR_ITER_ONCE();
124341 +        GEAR_ITER_ONCE();
124342 +        GEAR_ITER_ONCE();
124343 +        GEAR_ITER_ONCE();
124344 +    }
124345 +    while (n < size) {
124346 +        GEAR_ITER_ONCE();
124347 +    }
124349 +#undef GEAR_ITER_ONCE
124351 +done:
124352 +    state->rolling = hash;
124353 +    return n;
124356 +void ZSTD_ldm_adjustParameters(ldmParams_t* params,
124357 +                               ZSTD_compressionParameters const* cParams)
124359 +    params->windowLog = cParams->windowLog;
124360 +    ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
124361 +    DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
124362 +    if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
124363 +    if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
124364 +    if (params->hashLog == 0) {
124365 +        params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
124366 +        assert(params->hashLog <= ZSTD_HASHLOG_MAX);
124367 +    }
124368 +    if (params->hashRateLog == 0) {
124369 +        params->hashRateLog = params->windowLog < params->hashLog
124370 +                                   ? 0
124371 +                                   : params->windowLog - params->hashLog;
124372 +    }
124373 +    params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
124376 +size_t ZSTD_ldm_getTableSize(ldmParams_t params)
124378 +    size_t const ldmHSize = ((size_t)1) << params.hashLog;
124379 +    size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
124380 +    size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
124381 +    size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
124382 +                           + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
124383 +    return params.enableLdm ? totalSize : 0;
124386 +size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
124388 +    return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;
124391 +/** ZSTD_ldm_getBucket() :
124392 + *  Returns a pointer to the start of the bucket associated with hash. */
124393 +static ldmEntry_t* ZSTD_ldm_getBucket(
124394 +        ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
124396 +    return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
124399 +/** ZSTD_ldm_insertEntry() :
124400 + *  Insert the entry with corresponding hash into the hash table */
124401 +static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
124402 +                                 size_t const hash, const ldmEntry_t entry,
124403 +                                 ldmParams_t const ldmParams)
124405 +    BYTE* const pOffset = ldmState->bucketOffsets + hash;
124406 +    unsigned const offset = *pOffset;
124408 +    *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
124409 +    *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
124413 +/** ZSTD_ldm_countBackwardsMatch() :
124414 + *  Returns the number of bytes that match backwards before pIn and pMatch.
124416 + *  We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
124417 +static size_t ZSTD_ldm_countBackwardsMatch(
124418 +            const BYTE* pIn, const BYTE* pAnchor,
124419 +            const BYTE* pMatch, const BYTE* pMatchBase)
124421 +    size_t matchLength = 0;
124422 +    while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
124423 +        pIn--;
124424 +        pMatch--;
124425 +        matchLength++;
124426 +    }
124427 +    return matchLength;
124430 +/** ZSTD_ldm_countBackwardsMatch_2segments() :
124431 + *  Returns the number of bytes that match backwards from pMatch,
124432 + *  even with the backwards match spanning 2 different segments.
124434 + *  On reaching `pMatchBase`, start counting from mEnd */
124435 +static size_t ZSTD_ldm_countBackwardsMatch_2segments(
124436 +                    const BYTE* pIn, const BYTE* pAnchor,
124437 +                    const BYTE* pMatch, const BYTE* pMatchBase,
124438 +                    const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
124440 +    size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase);
124441 +    if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) {
124442 +        /* If backwards match is entirely in the extDict or prefix, immediately return */
124443 +        return matchLength;
124444 +    }
124445 +    DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
124446 +    matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
124447 +    DEBUGLOG(7, "final backwards match length = %zu", matchLength);
124448 +    return matchLength;
124451 +/** ZSTD_ldm_fillFastTables() :
124453 + *  Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
124454 + *  This is similar to ZSTD_loadDictionaryContent.
124456 + *  The tables for the other strategies are filled within their
124457 + *  block compressors. */
124458 +static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
124459 +                                      void const* end)
124461 +    const BYTE* const iend = (const BYTE*)end;
124463 +    switch(ms->cParams.strategy)
124464 +    {
124465 +    case ZSTD_fast:
124466 +        ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
124467 +        break;
124469 +    case ZSTD_dfast:
124470 +        ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
124471 +        break;
124473 +    case ZSTD_greedy:
124474 +    case ZSTD_lazy:
124475 +    case ZSTD_lazy2:
124476 +    case ZSTD_btlazy2:
124477 +    case ZSTD_btopt:
124478 +    case ZSTD_btultra:
124479 +    case ZSTD_btultra2:
124480 +        break;
124481 +    default:
124482 +        assert(0);  /* not possible : not a valid strategy id */
124483 +    }
124485 +    return 0;
124488 +void ZSTD_ldm_fillHashTable(
124489 +            ldmState_t* ldmState, const BYTE* ip,
124490 +            const BYTE* iend, ldmParams_t const* params)
124492 +    U32 const minMatchLength = params->minMatchLength;
124493 +    U32 const hBits = params->hashLog - params->bucketSizeLog;
124494 +    BYTE const* const base = ldmState->window.base;
124495 +    BYTE const* const istart = ip;
124496 +    ldmRollingHashState_t hashState;
124497 +    size_t* const splits = ldmState->splitIndices;
124498 +    unsigned numSplits;
124500 +    DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
124502 +    ZSTD_ldm_gear_init(&hashState, params);
124503 +    while (ip < iend) {
124504 +        size_t hashed;
124505 +        unsigned n;
124507 +        numSplits = 0;
124508 +        hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
124510 +        for (n = 0; n < numSplits; n++) {
124511 +            if (ip + splits[n] >= istart + minMatchLength) {
124512 +                BYTE const* const split = ip + splits[n] - minMatchLength;
124513 +                U64 const xxhash = xxh64(split, minMatchLength, 0);
124514 +                U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
124515 +                ldmEntry_t entry;
124517 +                entry.offset = (U32)(split - base);
124518 +                entry.checksum = (U32)(xxhash >> 32);
124519 +                ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
124520 +            }
124521 +        }
124523 +        ip += hashed;
124524 +    }
124528 +/** ZSTD_ldm_limitTableUpdate() :
124530 + *  Sets cctx->nextToUpdate to a position corresponding closer to anchor
124531 + *  if it is far way
124532 + *  (after a long match, only update tables a limited amount). */
124533 +static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
124535 +    U32 const curr = (U32)(anchor - ms->window.base);
124536 +    if (curr > ms->nextToUpdate + 1024) {
124537 +        ms->nextToUpdate =
124538 +            curr - MIN(512, curr - ms->nextToUpdate - 1024);
124539 +    }
124542 +static size_t ZSTD_ldm_generateSequences_internal(
124543 +        ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
124544 +        ldmParams_t const* params, void const* src, size_t srcSize)
124546 +    /* LDM parameters */
124547 +    int const extDict = ZSTD_window_hasExtDict(ldmState->window);
124548 +    U32 const minMatchLength = params->minMatchLength;
124549 +    U32 const entsPerBucket = 1U << params->bucketSizeLog;
124550 +    U32 const hBits = params->hashLog - params->bucketSizeLog;
124551 +    /* Prefix and extDict parameters */
124552 +    U32 const dictLimit = ldmState->window.dictLimit;
124553 +    U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
124554 +    BYTE const* const base = ldmState->window.base;
124555 +    BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
124556 +    BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
124557 +    BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
124558 +    BYTE const* const lowPrefixPtr = base + dictLimit;
124559 +    /* Input bounds */
124560 +    BYTE const* const istart = (BYTE const*)src;
124561 +    BYTE const* const iend = istart + srcSize;
124562 +    BYTE const* const ilimit = iend - HASH_READ_SIZE;
124563 +    /* Input positions */
124564 +    BYTE const* anchor = istart;
124565 +    BYTE const* ip = istart;
124566 +    /* Rolling hash state */
124567 +    ldmRollingHashState_t hashState;
124568 +    /* Arrays for staged-processing */
124569 +    size_t* const splits = ldmState->splitIndices;
124570 +    ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
124571 +    unsigned numSplits;
124573 +    if (srcSize < minMatchLength)
124574 +        return iend - anchor;
124576 +    /* Initialize the rolling hash state with the first minMatchLength bytes */
124577 +    ZSTD_ldm_gear_init(&hashState, params);
124578 +    {
124579 +        size_t n = 0;
124581 +        while (n < minMatchLength) {
124582 +            numSplits = 0;
124583 +            n += ZSTD_ldm_gear_feed(&hashState, ip + n, minMatchLength - n,
124584 +                                    splits, &numSplits);
124585 +        }
124586 +        ip += minMatchLength;
124587 +    }
124589 +    while (ip < ilimit) {
124590 +        size_t hashed;
124591 +        unsigned n;
124593 +        numSplits = 0;
124594 +        hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip,
124595 +                                    splits, &numSplits);
124597 +        for (n = 0; n < numSplits; n++) {
124598 +            BYTE const* const split = ip + splits[n] - minMatchLength;
124599 +            U64 const xxhash = xxh64(split, minMatchLength, 0);
124600 +            U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
124602 +            candidates[n].split = split;
124603 +            candidates[n].hash = hash;
124604 +            candidates[n].checksum = (U32)(xxhash >> 32);
124605 +            candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
124606 +            PREFETCH_L1(candidates[n].bucket);
124607 +        }
124609 +        for (n = 0; n < numSplits; n++) {
124610 +            size_t forwardMatchLength = 0, backwardMatchLength = 0,
124611 +                   bestMatchLength = 0, mLength;
124612 +            BYTE const* const split = candidates[n].split;
124613 +            U32 const checksum = candidates[n].checksum;
124614 +            U32 const hash = candidates[n].hash;
124615 +            ldmEntry_t* const bucket = candidates[n].bucket;
124616 +            ldmEntry_t const* cur;
124617 +            ldmEntry_t const* bestEntry = NULL;
124618 +            ldmEntry_t newEntry;
124620 +            newEntry.offset = (U32)(split - base);
124621 +            newEntry.checksum = checksum;
124623 +            /* If a split point would generate a sequence overlapping with
124624 +             * the previous one, we merely register it in the hash table and
124625 +             * move on */
124626 +            if (split < anchor) {
124627 +                ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
124628 +                continue;
124629 +            }
124631 +            for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
124632 +                size_t curForwardMatchLength, curBackwardMatchLength,
124633 +                       curTotalMatchLength;
124634 +                if (cur->checksum != checksum || cur->offset <= lowestIndex) {
124635 +                    continue;
124636 +                }
124637 +                if (extDict) {
124638 +                    BYTE const* const curMatchBase =
124639 +                        cur->offset < dictLimit ? dictBase : base;
124640 +                    BYTE const* const pMatch = curMatchBase + cur->offset;
124641 +                    BYTE const* const matchEnd =
124642 +                        cur->offset < dictLimit ? dictEnd : iend;
124643 +                    BYTE const* const lowMatchPtr =
124644 +                        cur->offset < dictLimit ? dictStart : lowPrefixPtr;
124645 +                    curForwardMatchLength =
124646 +                        ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr);
124647 +                    if (curForwardMatchLength < minMatchLength) {
124648 +                        continue;
124649 +                    }
124650 +                    curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
124651 +                            split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
124652 +                } else { /* !extDict */
124653 +                    BYTE const* const pMatch = base + cur->offset;
124654 +                    curForwardMatchLength = ZSTD_count(split, pMatch, iend);
124655 +                    if (curForwardMatchLength < minMatchLength) {
124656 +                        continue;
124657 +                    }
124658 +                    curBackwardMatchLength =
124659 +                        ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr);
124660 +                }
124661 +                curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength;
124663 +                if (curTotalMatchLength > bestMatchLength) {
124664 +                    bestMatchLength = curTotalMatchLength;
124665 +                    forwardMatchLength = curForwardMatchLength;
124666 +                    backwardMatchLength = curBackwardMatchLength;
124667 +                    bestEntry = cur;
124668 +                }
124669 +            }
124671 +            /* No match found -- insert an entry into the hash table
124672 +             * and process the next candidate match */
124673 +            if (bestEntry == NULL) {
124674 +                ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
124675 +                continue;
124676 +            }
124678 +            /* Match found */
124679 +            mLength = forwardMatchLength + backwardMatchLength;
124680 +            {
124681 +                U32 const offset = (U32)(split - base) - bestEntry->offset;
124682 +                rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
124684 +                /* Out of sequence storage */
124685 +                if (rawSeqStore->size == rawSeqStore->capacity)
124686 +                    return ERROR(dstSize_tooSmall);
124687 +                seq->litLength = (U32)(split - backwardMatchLength - anchor);
124688 +                seq->matchLength = (U32)mLength;
124689 +                seq->offset = offset;
124690 +                rawSeqStore->size++;
124691 +            }
124693 +            /* Insert the current entry into the hash table --- it must be
124694 +             * done after the previous block to avoid clobbering bestEntry */
124695 +            ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
124697 +            anchor = split + forwardMatchLength;
124698 +        }
124700 +        ip += hashed;
124701 +    }
124703 +    return iend - anchor;
124706 +/*! ZSTD_ldm_reduceTable() :
124707 + *  reduce table indexes by `reducerValue` */
124708 +static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
124709 +                                 U32 const reducerValue)
124711 +    U32 u;
124712 +    for (u = 0; u < size; u++) {
124713 +        if (table[u].offset < reducerValue) table[u].offset = 0;
124714 +        else table[u].offset -= reducerValue;
124715 +    }
124718 +size_t ZSTD_ldm_generateSequences(
124719 +        ldmState_t* ldmState, rawSeqStore_t* sequences,
124720 +        ldmParams_t const* params, void const* src, size_t srcSize)
124722 +    U32 const maxDist = 1U << params->windowLog;
124723 +    BYTE const* const istart = (BYTE const*)src;
124724 +    BYTE const* const iend = istart + srcSize;
124725 +    size_t const kMaxChunkSize = 1 << 20;
124726 +    size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
124727 +    size_t chunk;
124728 +    size_t leftoverSize = 0;
124730 +    assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
124731 +    /* Check that ZSTD_window_update() has been called for this chunk prior
124732 +     * to passing it to this function.
124733 +     */
124734 +    assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
124735 +    /* The input could be very large (in zstdmt), so it must be broken up into
124736 +     * chunks to enforce the maximum distance and handle overflow correction.
124737 +     */
124738 +    assert(sequences->pos <= sequences->size);
124739 +    assert(sequences->size <= sequences->capacity);
124740 +    for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
124741 +        BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
124742 +        size_t const remaining = (size_t)(iend - chunkStart);
124743 +        BYTE const *const chunkEnd =
124744 +            (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
124745 +        size_t const chunkSize = chunkEnd - chunkStart;
124746 +        size_t newLeftoverSize;
124747 +        size_t const prevSize = sequences->size;
124749 +        assert(chunkStart < iend);
124750 +        /* 1. Perform overflow correction if necessary. */
124751 +        if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {
124752 +            U32 const ldmHSize = 1U << params->hashLog;
124753 +            U32 const correction = ZSTD_window_correctOverflow(
124754 +                &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
124755 +            ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
124756 +            /* invalidate dictionaries on overflow correction */
124757 +            ldmState->loadedDictEnd = 0;
124758 +        }
124759 +        /* 2. We enforce the maximum offset allowed.
124760 +         *
124761 +         * kMaxChunkSize should be small enough that we don't lose too much of
124762 +         * the window through early invalidation.
124763 +         * TODO: * Test the chunk size.
124764 +         *       * Try invalidation after the sequence generation and test the
124765 +         *         the offset against maxDist directly.
124766 +         *
124767 +         * NOTE: Because of dictionaries + sequence splitting we MUST make sure
124768 +         * that any offset used is valid at the END of the sequence, since it may
124769 +         * be split into two sequences. This condition holds when using
124770 +         * ZSTD_window_enforceMaxDist(), but if we move to checking offsets
124771 +         * against maxDist directly, we'll have to carefully handle that case.
124772 +         */
124773 +        ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL);
124774 +        /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
124775 +        newLeftoverSize = ZSTD_ldm_generateSequences_internal(
124776 +            ldmState, sequences, params, chunkStart, chunkSize);
124777 +        if (ZSTD_isError(newLeftoverSize))
124778 +            return newLeftoverSize;
124779 +        /* 4. We add the leftover literals from previous iterations to the first
124780 +         *    newly generated sequence, or add the `newLeftoverSize` if none are
124781 +         *    generated.
124782 +         */
124783 +        /* Prepend the leftover literals from the last call */
124784 +        if (prevSize < sequences->size) {
124785 +            sequences->seq[prevSize].litLength += (U32)leftoverSize;
124786 +            leftoverSize = newLeftoverSize;
124787 +        } else {
124788 +            assert(newLeftoverSize == chunkSize);
124789 +            leftoverSize += chunkSize;
124790 +        }
124791 +    }
124792 +    return 0;
124795 +void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) {
124796 +    while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
124797 +        rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
124798 +        if (srcSize <= seq->litLength) {
124799 +            /* Skip past srcSize literals */
124800 +            seq->litLength -= (U32)srcSize;
124801 +            return;
124802 +        }
124803 +        srcSize -= seq->litLength;
124804 +        seq->litLength = 0;
124805 +        if (srcSize < seq->matchLength) {
124806 +            /* Skip past the first srcSize of the match */
124807 +            seq->matchLength -= (U32)srcSize;
124808 +            if (seq->matchLength < minMatch) {
124809 +                /* The match is too short, omit it */
124810 +                if (rawSeqStore->pos + 1 < rawSeqStore->size) {
124811 +                    seq[1].litLength += seq[0].matchLength;
124812 +                }
124813 +                rawSeqStore->pos++;
124814 +            }
124815 +            return;
124816 +        }
124817 +        srcSize -= seq->matchLength;
124818 +        seq->matchLength = 0;
124819 +        rawSeqStore->pos++;
124820 +    }
124824 + * If the sequence length is longer than remaining then the sequence is split
124825 + * between this block and the next.
124827 + * Returns the current sequence to handle, or if the rest of the block should
124828 + * be literals, it returns a sequence with offset == 0.
124829 + */
124830 +static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
124831 +                                 U32 const remaining, U32 const minMatch)
124833 +    rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
124834 +    assert(sequence.offset > 0);
124835 +    /* Likely: No partial sequence */
124836 +    if (remaining >= sequence.litLength + sequence.matchLength) {
124837 +        rawSeqStore->pos++;
124838 +        return sequence;
124839 +    }
124840 +    /* Cut the sequence short (offset == 0 ==> rest is literals). */
124841 +    if (remaining <= sequence.litLength) {
124842 +        sequence.offset = 0;
124843 +    } else if (remaining < sequence.litLength + sequence.matchLength) {
124844 +        sequence.matchLength = remaining - sequence.litLength;
124845 +        if (sequence.matchLength < minMatch) {
124846 +            sequence.offset = 0;
124847 +        }
124848 +    }
124849 +    /* Skip past `remaining` bytes for the future sequences. */
124850 +    ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
124851 +    return sequence;
124854 +void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
124855 +    U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
124856 +    while (currPos && rawSeqStore->pos < rawSeqStore->size) {
124857 +        rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
124858 +        if (currPos >= currSeq.litLength + currSeq.matchLength) {
124859 +            currPos -= currSeq.litLength + currSeq.matchLength;
124860 +            rawSeqStore->pos++;
124861 +        } else {
124862 +            rawSeqStore->posInSequence = currPos;
124863 +            break;
124864 +        }
124865 +    }
124866 +    if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
124867 +        rawSeqStore->posInSequence = 0;
124868 +    }
124871 +size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
124872 +    ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124873 +    void const* src, size_t srcSize)
124875 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
124876 +    unsigned const minMatch = cParams->minMatch;
124877 +    ZSTD_blockCompressor const blockCompressor =
124878 +        ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms));
124879 +    /* Input bounds */
124880 +    BYTE const* const istart = (BYTE const*)src;
124881 +    BYTE const* const iend = istart + srcSize;
124882 +    /* Input positions */
124883 +    BYTE const* ip = istart;
124885 +    DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
124886 +    /* If using opt parser, use LDMs only as candidates rather than always accepting them */
124887 +    if (cParams->strategy >= ZSTD_btopt) {
124888 +        size_t lastLLSize;
124889 +        ms->ldmSeqStore = rawSeqStore;
124890 +        lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
124891 +        ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
124892 +        return lastLLSize;
124893 +    }
124895 +    assert(rawSeqStore->pos <= rawSeqStore->size);
124896 +    assert(rawSeqStore->size <= rawSeqStore->capacity);
124897 +    /* Loop through each sequence and apply the block compressor to the literals */
124898 +    while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
124899 +        /* maybeSplitSequence updates rawSeqStore->pos */
124900 +        rawSeq const sequence = maybeSplitSequence(rawSeqStore,
124901 +                                                   (U32)(iend - ip), minMatch);
124902 +        int i;
124903 +        /* End signal */
124904 +        if (sequence.offset == 0)
124905 +            break;
124907 +        assert(ip + sequence.litLength + sequence.matchLength <= iend);
124909 +        /* Fill tables for block compressor */
124910 +        ZSTD_ldm_limitTableUpdate(ms, ip);
124911 +        ZSTD_ldm_fillFastTables(ms, ip);
124912 +        /* Run the block compressor */
124913 +        DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
124914 +        {
124915 +            size_t const newLitLength =
124916 +                blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
124917 +            ip += sequence.litLength;
124918 +            /* Update the repcodes */
124919 +            for (i = ZSTD_REP_NUM - 1; i > 0; i--)
124920 +                rep[i] = rep[i-1];
124921 +            rep[0] = sequence.offset;
124922 +            /* Store the sequence */
124923 +            ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
124924 +                          sequence.offset + ZSTD_REP_MOVE,
124925 +                          sequence.matchLength - MINMATCH);
124926 +            ip += sequence.matchLength;
124927 +        }
124928 +    }
124929 +    /* Fill the tables for the block compressor */
124930 +    ZSTD_ldm_limitTableUpdate(ms, ip);
124931 +    ZSTD_ldm_fillFastTables(ms, ip);
124932 +    /* Compress the last literals */
124933 +    return blockCompressor(ms, seqStore, rep, ip, iend - ip);
124935 diff --git a/lib/zstd/compress/zstd_ldm.h b/lib/zstd/compress/zstd_ldm.h
124936 new file mode 100644
124937 index 000000000000..5ee467eaca2e
124938 --- /dev/null
124939 +++ b/lib/zstd/compress/zstd_ldm.h
124940 @@ -0,0 +1,110 @@
124942 + * Copyright (c) Yann Collet, Facebook, Inc.
124943 + * All rights reserved.
124945 + * This source code is licensed under both the BSD-style license (found in the
124946 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
124947 + * in the COPYING file in the root directory of this source tree).
124948 + * You may select, at your option, one of the above-listed licenses.
124949 + */
124951 +#ifndef ZSTD_LDM_H
124952 +#define ZSTD_LDM_H
124955 +#include "zstd_compress_internal.h"   /* ldmParams_t, U32 */
124956 +#include <linux/zstd.h>   /* ZSTD_CCtx, size_t */
124958 +/*-*************************************
124959 +*  Long distance matching
124960 +***************************************/
124962 +#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT
124964 +void ZSTD_ldm_fillHashTable(
124965 +            ldmState_t* state, const BYTE* ip,
124966 +            const BYTE* iend, ldmParams_t const* params);
124969 + * ZSTD_ldm_generateSequences():
124971 + * Generates the sequences using the long distance match finder.
124972 + * Generates long range matching sequences in `sequences`, which parse a prefix
124973 + * of the source. `sequences` must be large enough to store every sequence,
124974 + * which can be checked with `ZSTD_ldm_getMaxNbSeq()`.
124975 + * @returns 0 or an error code.
124977 + * NOTE: The user must have called ZSTD_window_update() for all of the input
124978 + * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks.
124979 + * NOTE: This function returns an error if it runs out of space to store
124980 + *       sequences.
124981 + */
124982 +size_t ZSTD_ldm_generateSequences(
124983 +            ldmState_t* ldms, rawSeqStore_t* sequences,
124984 +            ldmParams_t const* params, void const* src, size_t srcSize);
124987 + * ZSTD_ldm_blockCompress():
124989 + * Compresses a block using the predefined sequences, along with a secondary
124990 + * block compressor. The literals section of every sequence is passed to the
124991 + * secondary block compressor, and those sequences are interspersed with the
124992 + * predefined sequences. Returns the length of the last literals.
124993 + * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed.
124994 + * `rawSeqStore.seq` may also be updated to split the last sequence between two
124995 + * blocks.
124996 + * @return The length of the last literals.
124998 + * NOTE: The source must be at most the maximum block size, but the predefined
124999 + * sequences can be any size, and may be longer than the block. In the case that
125000 + * they are longer than the block, the last sequences may need to be split into
125001 + * two. We handle that case correctly, and update `rawSeqStore` appropriately.
125002 + * NOTE: This function does not return any errors.
125003 + */
125004 +size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
125005 +            ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
125006 +            void const* src, size_t srcSize);
125009 + * ZSTD_ldm_skipSequences():
125011 + * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`.
125012 + * Avoids emitting matches less than `minMatch` bytes.
125013 + * Must be called for data that is not passed to ZSTD_ldm_blockCompress().
125014 + */
125015 +void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
125016 +    U32 const minMatch);
125018 +/* ZSTD_ldm_skipRawSeqStoreBytes():
125019 + * Moves forward in rawSeqStore by nbBytes, updating fields 'pos' and 'posInSequence'.
125020 + * Not to be used in conjunction with ZSTD_ldm_skipSequences().
125021 + * Must be called for data with is not passed to ZSTD_ldm_blockCompress().
125022 + */
125023 +void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes);
125025 +/** ZSTD_ldm_getTableSize() :
125026 + *  Estimate the space needed for long distance matching tables or 0 if LDM is
125027 + *  disabled.
125028 + */
125029 +size_t ZSTD_ldm_getTableSize(ldmParams_t params);
125031 +/** ZSTD_ldm_getSeqSpace() :
125032 + *  Return an upper bound on the number of sequences that can be produced by
125033 + *  the long distance matcher, or 0 if LDM is disabled.
125034 + */
125035 +size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
125037 +/** ZSTD_ldm_adjustParameters() :
125038 + *  If the params->hashRateLog is not set, set it to its default value based on
125039 + *  windowLog and params->hashLog.
125041 + *  Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
125042 + *  params->hashLog if it is not).
125044 + *  Ensures that the minMatchLength >= targetLength during optimal parsing.
125045 + */
125046 +void ZSTD_ldm_adjustParameters(ldmParams_t* params,
125047 +                               ZSTD_compressionParameters const* cParams);
125050 +#endif /* ZSTD_FAST_H */
125051 diff --git a/lib/zstd/compress/zstd_ldm_geartab.h b/lib/zstd/compress/zstd_ldm_geartab.h
125052 new file mode 100644
125053 index 000000000000..e5c24d856b0a
125054 --- /dev/null
125055 +++ b/lib/zstd/compress/zstd_ldm_geartab.h
125056 @@ -0,0 +1,103 @@
125058 + * Copyright (c) Yann Collet, Facebook, Inc.
125059 + * All rights reserved.
125061 + * This source code is licensed under both the BSD-style license (found in the
125062 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
125063 + * in the COPYING file in the root directory of this source tree).
125064 + * You may select, at your option, one of the above-listed licenses.
125065 + */
125067 +#ifndef ZSTD_LDM_GEARTAB_H
125068 +#define ZSTD_LDM_GEARTAB_H
125070 +static U64 ZSTD_ldm_gearTab[256] = {
125071 +    0xf5b8f72c5f77775c, 0x84935f266b7ac412, 0xb647ada9ca730ccc,
125072 +    0xb065bb4b114fb1de, 0x34584e7e8c3a9fd0, 0x4e97e17c6ae26b05,
125073 +    0x3a03d743bc99a604, 0xcecd042422c4044f, 0x76de76c58524259e,
125074 +    0x9c8528f65badeaca, 0x86563706e2097529, 0x2902475fa375d889,
125075 +    0xafb32a9739a5ebe6, 0xce2714da3883e639, 0x21eaf821722e69e,
125076 +    0x37b628620b628,    0x49a8d455d88caf5,  0x8556d711e6958140,
125077 +    0x4f7ae74fc605c1f,  0x829f0c3468bd3a20, 0x4ffdc885c625179e,
125078 +    0x8473de048a3daf1b, 0x51008822b05646b2, 0x69d75d12b2d1cc5f,
125079 +    0x8c9d4a19159154bc, 0xc3cc10f4abbd4003, 0xd06ddc1cecb97391,
125080 +    0xbe48e6e7ed80302e, 0x3481db31cee03547, 0xacc3f67cdaa1d210,
125081 +    0x65cb771d8c7f96cc, 0x8eb27177055723dd, 0xc789950d44cd94be,
125082 +    0x934feadc3700b12b, 0x5e485f11edbdf182, 0x1e2e2a46fd64767a,
125083 +    0x2969ca71d82efa7c, 0x9d46e9935ebbba2e, 0xe056b67e05e6822b,
125084 +    0x94d73f55739d03a0, 0xcd7010bdb69b5a03, 0x455ef9fcd79b82f4,
125085 +    0x869cb54a8749c161, 0x38d1a4fa6185d225, 0xb475166f94bbe9bb,
125086 +    0xa4143548720959f1, 0x7aed4780ba6b26ba, 0xd0ce264439e02312,
125087 +    0x84366d746078d508, 0xa8ce973c72ed17be, 0x21c323a29a430b01,
125088 +    0x9962d617e3af80ee, 0xab0ce91d9c8cf75b, 0x530e8ee6d19a4dbc,
125089 +    0x2ef68c0cf53f5d72, 0xc03a681640a85506, 0x496e4e9f9c310967,
125090 +    0x78580472b59b14a0, 0x273824c23b388577, 0x66bf923ad45cb553,
125091 +    0x47ae1a5a2492ba86, 0x35e304569e229659, 0x4765182a46870b6f,
125092 +    0x6cbab625e9099412, 0xddac9a2e598522c1, 0x7172086e666624f2,
125093 +    0xdf5003ca503b7837, 0x88c0c1db78563d09, 0x58d51865acfc289d,
125094 +    0x177671aec65224f1, 0xfb79d8a241e967d7, 0x2be1e101cad9a49a,
125095 +    0x6625682f6e29186b, 0x399553457ac06e50, 0x35dffb4c23abb74,
125096 +    0x429db2591f54aade, 0xc52802a8037d1009, 0x6acb27381f0b25f3,
125097 +    0xf45e2551ee4f823b, 0x8b0ea2d99580c2f7, 0x3bed519cbcb4e1e1,
125098 +    0xff452823dbb010a,  0x9d42ed614f3dd267, 0x5b9313c06257c57b,
125099 +    0xa114b8008b5e1442, 0xc1fe311c11c13d4b, 0x66e8763ea34c5568,
125100 +    0x8b982af1c262f05d, 0xee8876faaa75fbb7, 0x8a62a4d0d172bb2a,
125101 +    0xc13d94a3b7449a97, 0x6dbbba9dc15d037c, 0xc786101f1d92e0f1,
125102 +    0xd78681a907a0b79b, 0xf61aaf2962c9abb9, 0x2cfd16fcd3cb7ad9,
125103 +    0x868c5b6744624d21, 0x25e650899c74ddd7, 0xba042af4a7c37463,
125104 +    0x4eb1a539465a3eca, 0xbe09dbf03b05d5ca, 0x774e5a362b5472ba,
125105 +    0x47a1221229d183cd, 0x504b0ca18ef5a2df, 0xdffbdfbde2456eb9,
125106 +    0x46cd2b2fbee34634, 0xf2aef8fe819d98c3, 0x357f5276d4599d61,
125107 +    0x24a5483879c453e3, 0x88026889192b4b9,  0x28da96671782dbec,
125108 +    0x4ef37c40588e9aaa, 0x8837b90651bc9fb3, 0xc164f741d3f0e5d6,
125109 +    0xbc135a0a704b70ba, 0x69cd868f7622ada,  0xbc37ba89e0b9c0ab,
125110 +    0x47c14a01323552f6, 0x4f00794bacee98bb, 0x7107de7d637a69d5,
125111 +    0x88af793bb6f2255e, 0xf3c6466b8799b598, 0xc288c616aa7f3b59,
125112 +    0x81ca63cf42fca3fd, 0x88d85ace36a2674b, 0xd056bd3792389e7,
125113 +    0xe55c396c4e9dd32d, 0xbefb504571e6c0a6, 0x96ab32115e91e8cc,
125114 +    0xbf8acb18de8f38d1, 0x66dae58801672606, 0x833b6017872317fb,
125115 +    0xb87c16f2d1c92864, 0xdb766a74e58b669c, 0x89659f85c61417be,
125116 +    0xc8daad856011ea0c, 0x76a4b565b6fe7eae, 0xa469d085f6237312,
125117 +    0xaaf0365683a3e96c, 0x4dbb746f8424f7b8, 0x638755af4e4acc1,
125118 +    0x3d7807f5bde64486, 0x17be6d8f5bbb7639, 0x903f0cd44dc35dc,
125119 +    0x67b672eafdf1196c, 0xa676ff93ed4c82f1, 0x521d1004c5053d9d,
125120 +    0x37ba9ad09ccc9202, 0x84e54d297aacfb51, 0xa0b4b776a143445,
125121 +    0x820d471e20b348e,  0x1874383cb83d46dc, 0x97edeec7a1efe11c,
125122 +    0xb330e50b1bdc42aa, 0x1dd91955ce70e032, 0xa514cdb88f2939d5,
125123 +    0x2791233fd90db9d3, 0x7b670a4cc50f7a9b, 0x77c07d2a05c6dfa5,
125124 +    0xe3778b6646d0a6fa, 0xb39c8eda47b56749, 0x933ed448addbef28,
125125 +    0xaf846af6ab7d0bf4, 0xe5af208eb666e49,  0x5e6622f73534cd6a,
125126 +    0x297daeca42ef5b6e, 0x862daef3d35539a6, 0xe68722498f8e1ea9,
125127 +    0x981c53093dc0d572, 0xfa09b0bfbf86fbf5, 0x30b1e96166219f15,
125128 +    0x70e7d466bdc4fb83, 0x5a66736e35f2a8e9, 0xcddb59d2b7c1baef,
125129 +    0xd6c7d247d26d8996, 0xea4e39eac8de1ba3, 0x539c8bb19fa3aff2,
125130 +    0x9f90e4c5fd508d8,  0xa34e5956fbaf3385, 0x2e2f8e151d3ef375,
125131 +    0x173691e9b83faec1, 0xb85a8d56bf016379, 0x8382381267408ae3,
125132 +    0xb90f901bbdc0096d, 0x7c6ad32933bcec65, 0x76bb5e2f2c8ad595,
125133 +    0x390f851a6cf46d28, 0xc3e6064da1c2da72, 0xc52a0c101cfa5389,
125134 +    0xd78eaf84a3fbc530, 0x3781b9e2288b997e, 0x73c2f6dea83d05c4,
125135 +    0x4228e364c5b5ed7,  0x9d7a3edf0da43911, 0x8edcfeda24686756,
125136 +    0x5e7667a7b7a9b3a1, 0x4c4f389fa143791d, 0xb08bc1023da7cddc,
125137 +    0x7ab4be3ae529b1cc, 0x754e6132dbe74ff9, 0x71635442a839df45,
125138 +    0x2f6fb1643fbe52de, 0x961e0a42cf7a8177, 0xf3b45d83d89ef2ea,
125139 +    0xee3de4cf4a6e3e9b, 0xcd6848542c3295e7, 0xe4cee1664c78662f,
125140 +    0x9947548b474c68c4, 0x25d73777a5ed8b0b, 0xc915b1d636b7fc,
125141 +    0x21c2ba75d9b0d2da, 0x5f6b5dcf608a64a1, 0xdcf333255ff9570c,
125142 +    0x633b922418ced4ee, 0xc136dde0b004b34a, 0x58cc83b05d4b2f5a,
125143 +    0x5eb424dda28e42d2, 0x62df47369739cd98, 0xb4e0b42485e4ce17,
125144 +    0x16e1f0c1f9a8d1e7, 0x8ec3916707560ebf, 0x62ba6e2df2cc9db3,
125145 +    0xcbf9f4ff77d83a16, 0x78d9d7d07d2bbcc4, 0xef554ce1e02c41f4,
125146 +    0x8d7581127eccf94d, 0xa9b53336cb3c8a05, 0x38c42c0bf45c4f91,
125147 +    0x640893cdf4488863, 0x80ec34bc575ea568, 0x39f324f5b48eaa40,
125148 +    0xe9d9ed1f8eff527f, 0x9224fc058cc5a214, 0xbaba00b04cfe7741,
125149 +    0x309a9f120fcf52af, 0xa558f3ec65626212, 0x424bec8b7adabe2f,
125150 +    0x41622513a6aea433, 0xb88da2d5324ca798, 0xd287733b245528a4,
125151 +    0x9a44697e6d68aec3, 0x7b1093be2f49bb28, 0x50bbec632e3d8aad,
125152 +    0x6cd90723e1ea8283, 0x897b9e7431b02bf3, 0x219efdcb338a7047,
125153 +    0x3b0311f0a27c0656, 0xdb17bf91c0db96e7, 0x8cd4fd6b4e85a5b2,
125154 +    0xfab071054ba6409d, 0x40d6fe831fa9dfd9, 0xaf358debad7d791e,
125155 +    0xeb8d0e25a65e3e58, 0xbbcbd3df14e08580, 0xcf751f27ecdab2b,
125156 +    0x2b4da14f2613d8f4
125159 +#endif /* ZSTD_LDM_GEARTAB_H */
125160 diff --git a/lib/zstd/compress/zstd_opt.c b/lib/zstd/compress/zstd_opt.c
125161 new file mode 100644
125162 index 000000000000..9ab92d4ef499
125163 --- /dev/null
125164 +++ b/lib/zstd/compress/zstd_opt.c
125165 @@ -0,0 +1,1345 @@
125167 + * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
125168 + * All rights reserved.
125170 + * This source code is licensed under both the BSD-style license (found in the
125171 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
125172 + * in the COPYING file in the root directory of this source tree).
125173 + * You may select, at your option, one of the above-listed licenses.
125174 + */
125176 +#include "zstd_compress_internal.h"
125177 +#include "hist.h"
125178 +#include "zstd_opt.h"
125181 +#define ZSTD_LITFREQ_ADD    2   /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
125182 +#define ZSTD_FREQ_DIV       4   /* log factor when using previous stats to init next stats */
125183 +#define ZSTD_MAX_PRICE     (1<<30)
125185 +#define ZSTD_PREDEF_THRESHOLD 1024   /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
125188 +/*-*************************************
125189 +*  Price functions for optimal parser
125190 +***************************************/
125192 +#if 0    /* approximation at bit level */
125193 +#  define BITCOST_ACCURACY 0
125194 +#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
125195 +#  define WEIGHT(stat)  ((void)opt, ZSTD_bitWeight(stat))
125196 +#elif 0  /* fractional bit accuracy */
125197 +#  define BITCOST_ACCURACY 8
125198 +#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
125199 +#  define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
125200 +#else    /* opt==approx, ultra==accurate */
125201 +#  define BITCOST_ACCURACY 8
125202 +#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
125203 +#  define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
125204 +#endif
125206 +MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
125208 +    return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
125211 +MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
125213 +    U32 const stat = rawStat + 1;
125214 +    U32 const hb = ZSTD_highbit32(stat);
125215 +    U32 const BWeight = hb * BITCOST_MULTIPLIER;
125216 +    U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
125217 +    U32 const weight = BWeight + FWeight;
125218 +    assert(hb + BITCOST_ACCURACY < 31);
125219 +    return weight;
125222 +#if (DEBUGLEVEL>=2)
125223 +/* debugging function,
125224 + * @return price in bytes as fractional value
125225 + * for debug messages only */
125226 +MEM_STATIC double ZSTD_fCost(U32 price)
125228 +    return (double)price / (BITCOST_MULTIPLIER*8);
125230 +#endif
125232 +static int ZSTD_compressedLiterals(optState_t const* const optPtr)
125234 +    return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed;
125237 +static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
125239 +    if (ZSTD_compressedLiterals(optPtr))
125240 +        optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
125241 +    optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
125242 +    optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
125243 +    optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
125247 +/* ZSTD_downscaleStat() :
125248 + * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus)
125249 + * return the resulting sum of elements */
125250 +static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus)
125252 +    U32 s, sum=0;
125253 +    DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1);
125254 +    assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31);
125255 +    for (s=0; s<lastEltIndex+1; s++) {
125256 +        table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus));
125257 +        sum += table[s];
125258 +    }
125259 +    return sum;
125262 +/* ZSTD_rescaleFreqs() :
125263 + * if first block (detected by optPtr->litLengthSum == 0) : init statistics
125264 + *    take hints from dictionary if there is one
125265 + *    or init from zero, using src for literals stats, or flat 1 for match symbols
125266 + * otherwise downscale existing stats, to be used as seed for next block.
125267 + */
125268 +static void
125269 +ZSTD_rescaleFreqs(optState_t* const optPtr,
125270 +            const BYTE* const src, size_t const srcSize,
125271 +                  int const optLevel)
125273 +    int const compressedLiterals = ZSTD_compressedLiterals(optPtr);
125274 +    DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
125275 +    optPtr->priceType = zop_dynamic;
125277 +    if (optPtr->litLengthSum == 0) {  /* first block : init */
125278 +        if (srcSize <= ZSTD_PREDEF_THRESHOLD) {  /* heuristic */
125279 +            DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef");
125280 +            optPtr->priceType = zop_predef;
125281 +        }
125283 +        assert(optPtr->symbolCosts != NULL);
125284 +        if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {
125285 +            /* huffman table presumed generated by dictionary */
125286 +            optPtr->priceType = zop_dynamic;
125288 +            if (compressedLiterals) {
125289 +                unsigned lit;
125290 +                assert(optPtr->litFreq != NULL);
125291 +                optPtr->litSum = 0;
125292 +                for (lit=0; lit<=MaxLit; lit++) {
125293 +                    U32 const scaleLog = 11;   /* scale to 2K */
125294 +                    U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);
125295 +                    assert(bitCost <= scaleLog);
125296 +                    optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
125297 +                    optPtr->litSum += optPtr->litFreq[lit];
125298 +            }   }
125300 +            {   unsigned ll;
125301 +                FSE_CState_t llstate;
125302 +                FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable);
125303 +                optPtr->litLengthSum = 0;
125304 +                for (ll=0; ll<=MaxLL; ll++) {
125305 +                    U32 const scaleLog = 10;   /* scale to 1K */
125306 +                    U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll);
125307 +                    assert(bitCost < scaleLog);
125308 +                    optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
125309 +                    optPtr->litLengthSum += optPtr->litLengthFreq[ll];
125310 +            }   }
125312 +            {   unsigned ml;
125313 +                FSE_CState_t mlstate;
125314 +                FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable);
125315 +                optPtr->matchLengthSum = 0;
125316 +                for (ml=0; ml<=MaxML; ml++) {
125317 +                    U32 const scaleLog = 10;
125318 +                    U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml);
125319 +                    assert(bitCost < scaleLog);
125320 +                    optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
125321 +                    optPtr->matchLengthSum += optPtr->matchLengthFreq[ml];
125322 +            }   }
125324 +            {   unsigned of;
125325 +                FSE_CState_t ofstate;
125326 +                FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable);
125327 +                optPtr->offCodeSum = 0;
125328 +                for (of=0; of<=MaxOff; of++) {
125329 +                    U32 const scaleLog = 10;
125330 +                    U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of);
125331 +                    assert(bitCost < scaleLog);
125332 +                    optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
125333 +                    optPtr->offCodeSum += optPtr->offCodeFreq[of];
125334 +            }   }
125336 +        } else {  /* not a dictionary */
125338 +            assert(optPtr->litFreq != NULL);
125339 +            if (compressedLiterals) {
125340 +                unsigned lit = MaxLit;
125341 +                HIST_count_simple(optPtr->litFreq, &lit, src, srcSize);   /* use raw first block to init statistics */
125342 +                optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
125343 +            }
125345 +            {   unsigned ll;
125346 +                for (ll=0; ll<=MaxLL; ll++)
125347 +                    optPtr->litLengthFreq[ll] = 1;
125348 +            }
125349 +            optPtr->litLengthSum = MaxLL+1;
125351 +            {   unsigned ml;
125352 +                for (ml=0; ml<=MaxML; ml++)
125353 +                    optPtr->matchLengthFreq[ml] = 1;
125354 +            }
125355 +            optPtr->matchLengthSum = MaxML+1;
125357 +            {   unsigned of;
125358 +                for (of=0; of<=MaxOff; of++)
125359 +                    optPtr->offCodeFreq[of] = 1;
125360 +            }
125361 +            optPtr->offCodeSum = MaxOff+1;
125363 +        }
125365 +    } else {   /* new block : re-use previous statistics, scaled down */
125367 +        if (compressedLiterals)
125368 +            optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
125369 +        optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
125370 +        optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
125371 +        optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
125372 +    }
125374 +    ZSTD_setBasePrices(optPtr, optLevel);
125377 +/* ZSTD_rawLiteralsCost() :
125378 + * price of literals (only) in specified segment (which length can be 0).
125379 + * does not include price of literalLength symbol */
125380 +static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
125381 +                                const optState_t* const optPtr,
125382 +                                int optLevel)
125384 +    if (litLength == 0) return 0;
125386 +    if (!ZSTD_compressedLiterals(optPtr))
125387 +        return (litLength << 3) * BITCOST_MULTIPLIER;  /* Uncompressed - 8 bytes per literal. */
125389 +    if (optPtr->priceType == zop_predef)
125390 +        return (litLength*6) * BITCOST_MULTIPLIER;  /* 6 bit per literal - no statistic used */
125392 +    /* dynamic statistics */
125393 +    {   U32 price = litLength * optPtr->litSumBasePrice;
125394 +        U32 u;
125395 +        for (u=0; u < litLength; u++) {
125396 +            assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice);   /* literal cost should never be negative */
125397 +            price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
125398 +        }
125399 +        return price;
125400 +    }
125403 +/* ZSTD_litLengthPrice() :
125404 + * cost of literalLength symbol */
125405 +static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
125407 +    if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel);
125409 +    /* dynamic statistics */
125410 +    {   U32 const llCode = ZSTD_LLcode(litLength);
125411 +        return (LL_bits[llCode] * BITCOST_MULTIPLIER)
125412 +             + optPtr->litLengthSumBasePrice
125413 +             - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
125414 +    }
125417 +/* ZSTD_getMatchPrice() :
125418 + * Provides the cost of the match part (offset + matchLength) of a sequence
125419 + * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
125420 + * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */
125421 +FORCE_INLINE_TEMPLATE U32
125422 +ZSTD_getMatchPrice(U32 const offset,
125423 +                   U32 const matchLength,
125424 +             const optState_t* const optPtr,
125425 +                   int const optLevel)
125427 +    U32 price;
125428 +    U32 const offCode = ZSTD_highbit32(offset+1);
125429 +    U32 const mlBase = matchLength - MINMATCH;
125430 +    assert(matchLength >= MINMATCH);
125432 +    if (optPtr->priceType == zop_predef)  /* fixed scheme, do not use statistics */
125433 +        return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
125435 +    /* dynamic statistics */
125436 +    price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
125437 +    if ((optLevel<2) /*static*/ && offCode >= 20)
125438 +        price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */
125440 +    /* match Length */
125441 +    {   U32 const mlCode = ZSTD_MLcode(mlBase);
125442 +        price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel));
125443 +    }
125445 +    price += BITCOST_MULTIPLIER / 5;   /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
125447 +    DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
125448 +    return price;
125451 +/* ZSTD_updateStats() :
125452 + * assumption : literals + litLengtn <= iend */
125453 +static void ZSTD_updateStats(optState_t* const optPtr,
125454 +                             U32 litLength, const BYTE* literals,
125455 +                             U32 offsetCode, U32 matchLength)
125457 +    /* literals */
125458 +    if (ZSTD_compressedLiterals(optPtr)) {
125459 +        U32 u;
125460 +        for (u=0; u < litLength; u++)
125461 +            optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
125462 +        optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
125463 +    }
125465 +    /* literal Length */
125466 +    {   U32 const llCode = ZSTD_LLcode(litLength);
125467 +        optPtr->litLengthFreq[llCode]++;
125468 +        optPtr->litLengthSum++;
125469 +    }
125471 +    /* match offset code (0-2=>repCode; 3+=>offset+2) */
125472 +    {   U32 const offCode = ZSTD_highbit32(offsetCode+1);
125473 +        assert(offCode <= MaxOff);
125474 +        optPtr->offCodeFreq[offCode]++;
125475 +        optPtr->offCodeSum++;
125476 +    }
125478 +    /* match Length */
125479 +    {   U32 const mlBase = matchLength - MINMATCH;
125480 +        U32 const mlCode = ZSTD_MLcode(mlBase);
125481 +        optPtr->matchLengthFreq[mlCode]++;
125482 +        optPtr->matchLengthSum++;
125483 +    }
125487 +/* ZSTD_readMINMATCH() :
125488 + * function safe only for comparisons
125489 + * assumption : memPtr must be at least 4 bytes before end of buffer */
125490 +MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
125492 +    switch (length)
125493 +    {
125494 +    default :
125495 +    case 4 : return MEM_read32(memPtr);
125496 +    case 3 : if (MEM_isLittleEndian())
125497 +                return MEM_read32(memPtr)<<8;
125498 +             else
125499 +                return MEM_read32(memPtr)>>8;
125500 +    }
125504 +/* Update hashTable3 up to ip (excluded)
125505 +   Assumption : always within prefix (i.e. not within extDict) */
125506 +static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms,
125507 +                                              U32* nextToUpdate3,
125508 +                                              const BYTE* const ip)
125510 +    U32* const hashTable3 = ms->hashTable3;
125511 +    U32 const hashLog3 = ms->hashLog3;
125512 +    const BYTE* const base = ms->window.base;
125513 +    U32 idx = *nextToUpdate3;
125514 +    U32 const target = (U32)(ip - base);
125515 +    size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
125516 +    assert(hashLog3 > 0);
125518 +    while(idx < target) {
125519 +        hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
125520 +        idx++;
125521 +    }
125523 +    *nextToUpdate3 = target;
125524 +    return hashTable3[hash3];
125528 +/*-*************************************
125529 +*  Binary Tree search
125530 +***************************************/
125531 +/** ZSTD_insertBt1() : add one or multiple positions to tree.
125532 + *  ip : assumed <= iend-8 .
125533 + * @return : nb of positions added */
125534 +static U32 ZSTD_insertBt1(
125535 +                ZSTD_matchState_t* ms,
125536 +                const BYTE* const ip, const BYTE* const iend,
125537 +                U32 const mls, const int extDict)
125539 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
125540 +    U32*   const hashTable = ms->hashTable;
125541 +    U32    const hashLog = cParams->hashLog;
125542 +    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
125543 +    U32*   const bt = ms->chainTable;
125544 +    U32    const btLog  = cParams->chainLog - 1;
125545 +    U32    const btMask = (1 << btLog) - 1;
125546 +    U32 matchIndex = hashTable[h];
125547 +    size_t commonLengthSmaller=0, commonLengthLarger=0;
125548 +    const BYTE* const base = ms->window.base;
125549 +    const BYTE* const dictBase = ms->window.dictBase;
125550 +    const U32 dictLimit = ms->window.dictLimit;
125551 +    const BYTE* const dictEnd = dictBase + dictLimit;
125552 +    const BYTE* const prefixStart = base + dictLimit;
125553 +    const BYTE* match;
125554 +    const U32 curr = (U32)(ip-base);
125555 +    const U32 btLow = btMask >= curr ? 0 : curr - btMask;
125556 +    U32* smallerPtr = bt + 2*(curr&btMask);
125557 +    U32* largerPtr  = smallerPtr + 1;
125558 +    U32 dummy32;   /* to be nullified at the end */
125559 +    U32 const windowLow = ms->window.lowLimit;
125560 +    U32 matchEndIdx = curr+8+1;
125561 +    size_t bestLength = 8;
125562 +    U32 nbCompares = 1U << cParams->searchLog;
125563 +#ifdef ZSTD_C_PREDICT
125564 +    U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0);
125565 +    U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1);
125566 +    predictedSmall += (predictedSmall>0);
125567 +    predictedLarge += (predictedLarge>0);
125568 +#endif /* ZSTD_C_PREDICT */
125570 +    DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr);
125572 +    assert(ip <= iend-8);   /* required for h calculation */
125573 +    hashTable[h] = curr;   /* Update Hash Table */
125575 +    assert(windowLow > 0);
125576 +    while (nbCompares-- && (matchIndex >= windowLow)) {
125577 +        U32* const nextPtr = bt + 2*(matchIndex & btMask);
125578 +        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
125579 +        assert(matchIndex < curr);
125581 +#ifdef ZSTD_C_PREDICT   /* note : can create issues when hlog small <= 11 */
125582 +        const U32* predictPtr = bt + 2*((matchIndex-1) & btMask);   /* written this way, as bt is a roll buffer */
125583 +        if (matchIndex == predictedSmall) {
125584 +            /* no need to check length, result known */
125585 +            *smallerPtr = matchIndex;
125586 +            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
125587 +            smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
125588 +            matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
125589 +            predictedSmall = predictPtr[1] + (predictPtr[1]>0);
125590 +            continue;
125591 +        }
125592 +        if (matchIndex == predictedLarge) {
125593 +            *largerPtr = matchIndex;
125594 +            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
125595 +            largerPtr = nextPtr;
125596 +            matchIndex = nextPtr[0];
125597 +            predictedLarge = predictPtr[0] + (predictPtr[0]>0);
125598 +            continue;
125599 +        }
125600 +#endif
125602 +        if (!extDict || (matchIndex+matchLength >= dictLimit)) {
125603 +            assert(matchIndex+matchLength >= dictLimit);   /* might be wrong if actually extDict */
125604 +            match = base + matchIndex;
125605 +            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
125606 +        } else {
125607 +            match = dictBase + matchIndex;
125608 +            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
125609 +            if (matchIndex+matchLength >= dictLimit)
125610 +                match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
125611 +        }
125613 +        if (matchLength > bestLength) {
125614 +            bestLength = matchLength;
125615 +            if (matchLength > matchEndIdx - matchIndex)
125616 +                matchEndIdx = matchIndex + (U32)matchLength;
125617 +        }
125619 +        if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
125620 +            break;   /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
125621 +        }
125623 +        if (match[matchLength] < ip[matchLength]) {  /* necessarily within buffer */
125624 +            /* match is smaller than current */
125625 +            *smallerPtr = matchIndex;             /* update smaller idx */
125626 +            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
125627 +            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
125628 +            smallerPtr = nextPtr+1;               /* new "candidate" => larger than match, which was smaller than target */
125629 +            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous and closer to current */
125630 +        } else {
125631 +            /* match is larger than current */
125632 +            *largerPtr = matchIndex;
125633 +            commonLengthLarger = matchLength;
125634 +            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
125635 +            largerPtr = nextPtr;
125636 +            matchIndex = nextPtr[0];
125637 +    }   }
125639 +    *smallerPtr = *largerPtr = 0;
125640 +    {   U32 positions = 0;
125641 +        if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384));   /* speed optimization */
125642 +        assert(matchEndIdx > curr + 8);
125643 +        return MAX(positions, matchEndIdx - (curr + 8));
125644 +    }
125647 +FORCE_INLINE_TEMPLATE
125648 +void ZSTD_updateTree_internal(
125649 +                ZSTD_matchState_t* ms,
125650 +                const BYTE* const ip, const BYTE* const iend,
125651 +                const U32 mls, const ZSTD_dictMode_e dictMode)
125653 +    const BYTE* const base = ms->window.base;
125654 +    U32 const target = (U32)(ip - base);
125655 +    U32 idx = ms->nextToUpdate;
125656 +    DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u  (dictMode:%u)",
125657 +                idx, target, dictMode);
125659 +    while(idx < target) {
125660 +        U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict);
125661 +        assert(idx < (U32)(idx + forward));
125662 +        idx += forward;
125663 +    }
125664 +    assert((size_t)(ip - base) <= (size_t)(U32)(-1));
125665 +    assert((size_t)(iend - base) <= (size_t)(U32)(-1));
125666 +    ms->nextToUpdate = target;
125669 +void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
125670 +    ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
125673 +FORCE_INLINE_TEMPLATE
125674 +U32 ZSTD_insertBtAndGetAllMatches (
125675 +                    ZSTD_match_t* matches,   /* store result (found matches) in this table (presumed large enough) */
125676 +                    ZSTD_matchState_t* ms,
125677 +                    U32* nextToUpdate3,
125678 +                    const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
125679 +                    const U32 rep[ZSTD_REP_NUM],
125680 +                    U32 const ll0,   /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
125681 +                    const U32 lengthToBeat,
125682 +                    U32 const mls /* template */)
125684 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
125685 +    U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
125686 +    const BYTE* const base = ms->window.base;
125687 +    U32 const curr = (U32)(ip-base);
125688 +    U32 const hashLog = cParams->hashLog;
125689 +    U32 const minMatch = (mls==3) ? 3 : 4;
125690 +    U32* const hashTable = ms->hashTable;
125691 +    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
125692 +    U32 matchIndex  = hashTable[h];
125693 +    U32* const bt   = ms->chainTable;
125694 +    U32 const btLog = cParams->chainLog - 1;
125695 +    U32 const btMask= (1U << btLog) - 1;
125696 +    size_t commonLengthSmaller=0, commonLengthLarger=0;
125697 +    const BYTE* const dictBase = ms->window.dictBase;
125698 +    U32 const dictLimit = ms->window.dictLimit;
125699 +    const BYTE* const dictEnd = dictBase + dictLimit;
125700 +    const BYTE* const prefixStart = base + dictLimit;
125701 +    U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
125702 +    U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
125703 +    U32 const matchLow = windowLow ? windowLow : 1;
125704 +    U32* smallerPtr = bt + 2*(curr&btMask);
125705 +    U32* largerPtr  = bt + 2*(curr&btMask) + 1;
125706 +    U32 matchEndIdx = curr+8+1;   /* farthest referenced position of any match => detects repetitive patterns */
125707 +    U32 dummy32;   /* to be nullified at the end */
125708 +    U32 mnum = 0;
125709 +    U32 nbCompares = 1U << cParams->searchLog;
125711 +    const ZSTD_matchState_t* dms    = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
125712 +    const ZSTD_compressionParameters* const dmsCParams =
125713 +                                      dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
125714 +    const BYTE* const dmsBase       = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
125715 +    const BYTE* const dmsEnd        = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL;
125716 +    U32         const dmsHighLimit  = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0;
125717 +    U32         const dmsLowLimit   = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0;
125718 +    U32         const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0;
125719 +    U32         const dmsHashLog    = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog;
125720 +    U32         const dmsBtLog      = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog;
125721 +    U32         const dmsBtMask     = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0;
125722 +    U32         const dmsBtLow      = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
125724 +    size_t bestLength = lengthToBeat-1;
125725 +    DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", curr);
125727 +    /* check repCode */
125728 +    assert(ll0 <= 1);   /* necessarily 1 or 0 */
125729 +    {   U32 const lastR = ZSTD_REP_NUM + ll0;
125730 +        U32 repCode;
125731 +        for (repCode = ll0; repCode < lastR; repCode++) {
125732 +            U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
125733 +            U32 const repIndex = curr - repOffset;
125734 +            U32 repLen = 0;
125735 +            assert(curr >= dictLimit);
125736 +            if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < curr-dictLimit) {  /* equivalent to `curr > repIndex >= dictLimit` */
125737 +                /* We must validate the repcode offset because when we're using a dictionary the
125738 +                 * valid offset range shrinks when the dictionary goes out of bounds.
125739 +                 */
125740 +                if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) {
125741 +                    repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
125742 +                }
125743 +            } else {  /* repIndex < dictLimit || repIndex >= curr */
125744 +                const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
125745 +                                             dmsBase + repIndex - dmsIndexDelta :
125746 +                                             dictBase + repIndex;
125747 +                assert(curr >= windowLow);
125748 +                if ( dictMode == ZSTD_extDict
125749 +                  && ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow)  /* equivalent to `curr > repIndex >= windowLow` */
125750 +                     & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
125751 +                  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
125752 +                    repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
125753 +                }
125754 +                if (dictMode == ZSTD_dictMatchState
125755 +                  && ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta))  /* equivalent to `curr > repIndex >= dmsLowLimit` */
125756 +                     & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
125757 +                  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
125758 +                    repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
125759 +            }   }
125760 +            /* save longer solution */
125761 +            if (repLen > bestLength) {
125762 +                DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
125763 +                            repCode, ll0, repOffset, repLen);
125764 +                bestLength = repLen;
125765 +                matches[mnum].off = repCode - ll0;
125766 +                matches[mnum].len = (U32)repLen;
125767 +                mnum++;
125768 +                if ( (repLen > sufficient_len)
125769 +                   | (ip+repLen == iLimit) ) {  /* best possible */
125770 +                    return mnum;
125771 +    }   }   }   }
125773 +    /* HC3 match finder */
125774 +    if ((mls == 3) /*static*/ && (bestLength < mls)) {
125775 +        U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip);
125776 +        if ((matchIndex3 >= matchLow)
125777 +          & (curr - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
125778 +            size_t mlen;
125779 +            if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
125780 +                const BYTE* const match = base + matchIndex3;
125781 +                mlen = ZSTD_count(ip, match, iLimit);
125782 +            } else {
125783 +                const BYTE* const match = dictBase + matchIndex3;
125784 +                mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart);
125785 +            }
125787 +            /* save best solution */
125788 +            if (mlen >= mls /* == 3 > bestLength */) {
125789 +                DEBUGLOG(8, "found small match with hlog3, of length %u",
125790 +                            (U32)mlen);
125791 +                bestLength = mlen;
125792 +                assert(curr > matchIndex3);
125793 +                assert(mnum==0);  /* no prior solution */
125794 +                matches[0].off = (curr - matchIndex3) + ZSTD_REP_MOVE;
125795 +                matches[0].len = (U32)mlen;
125796 +                mnum = 1;
125797 +                if ( (mlen > sufficient_len) |
125798 +                     (ip+mlen == iLimit) ) {  /* best possible length */
125799 +                    ms->nextToUpdate = curr+1;  /* skip insertion */
125800 +                    return 1;
125801 +        }   }   }
125802 +        /* no dictMatchState lookup: dicts don't have a populated HC3 table */
125803 +    }
125805 +    hashTable[h] = curr;   /* Update Hash Table */
125807 +    while (nbCompares-- && (matchIndex >= matchLow)) {
125808 +        U32* const nextPtr = bt + 2*(matchIndex & btMask);
125809 +        const BYTE* match;
125810 +        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
125811 +        assert(curr > matchIndex);
125813 +        if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
125814 +            assert(matchIndex+matchLength >= dictLimit);  /* ensure the condition is correct when !extDict */
125815 +            match = base + matchIndex;
125816 +            if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0);  /* ensure early section of match is equal as expected */
125817 +            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
125818 +        } else {
125819 +            match = dictBase + matchIndex;
125820 +            assert(memcmp(match, ip, matchLength) == 0);  /* ensure early section of match is equal as expected */
125821 +            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
125822 +            if (matchIndex+matchLength >= dictLimit)
125823 +                match = base + matchIndex;   /* prepare for match[matchLength] read */
125824 +        }
125826 +        if (matchLength > bestLength) {
125827 +            DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
125828 +                    (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE);
125829 +            assert(matchEndIdx > matchIndex);
125830 +            if (matchLength > matchEndIdx - matchIndex)
125831 +                matchEndIdx = matchIndex + (U32)matchLength;
125832 +            bestLength = matchLength;
125833 +            matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE;
125834 +            matches[mnum].len = (U32)matchLength;
125835 +            mnum++;
125836 +            if ( (matchLength > ZSTD_OPT_NUM)
125837 +               | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
125838 +                if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
125839 +                break; /* drop, to preserve bt consistency (miss a little bit of compression) */
125840 +            }
125841 +        }
125843 +        if (match[matchLength] < ip[matchLength]) {
125844 +            /* match smaller than current */
125845 +            *smallerPtr = matchIndex;             /* update smaller idx */
125846 +            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
125847 +            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
125848 +            smallerPtr = nextPtr+1;               /* new candidate => larger than match, which was smaller than current */
125849 +            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous, closer to current */
125850 +        } else {
125851 +            *largerPtr = matchIndex;
125852 +            commonLengthLarger = matchLength;
125853 +            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
125854 +            largerPtr = nextPtr;
125855 +            matchIndex = nextPtr[0];
125856 +    }   }
125858 +    *smallerPtr = *largerPtr = 0;
125860 +    if (dictMode == ZSTD_dictMatchState && nbCompares) {
125861 +        size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
125862 +        U32 dictMatchIndex = dms->hashTable[dmsH];
125863 +        const U32* const dmsBt = dms->chainTable;
125864 +        commonLengthSmaller = commonLengthLarger = 0;
125865 +        while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) {
125866 +            const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
125867 +            size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
125868 +            const BYTE* match = dmsBase + dictMatchIndex;
125869 +            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart);
125870 +            if (dictMatchIndex+matchLength >= dmsHighLimit)
125871 +                match = base + dictMatchIndex + dmsIndexDelta;   /* to prepare for next usage of match[matchLength] */
125873 +            if (matchLength > bestLength) {
125874 +                matchIndex = dictMatchIndex + dmsIndexDelta;
125875 +                DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
125876 +                        (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE);
125877 +                if (matchLength > matchEndIdx - matchIndex)
125878 +                    matchEndIdx = matchIndex + (U32)matchLength;
125879 +                bestLength = matchLength;
125880 +                matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE;
125881 +                matches[mnum].len = (U32)matchLength;
125882 +                mnum++;
125883 +                if ( (matchLength > ZSTD_OPT_NUM)
125884 +                   | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
125885 +                    break;   /* drop, to guarantee consistency (miss a little bit of compression) */
125886 +                }
125887 +            }
125889 +            if (dictMatchIndex <= dmsBtLow) { break; }   /* beyond tree size, stop the search */
125890 +            if (match[matchLength] < ip[matchLength]) {
125891 +                commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
125892 +                dictMatchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
125893 +            } else {
125894 +                /* match is larger than current */
125895 +                commonLengthLarger = matchLength;
125896 +                dictMatchIndex = nextPtr[0];
125897 +            }
125898 +        }
125899 +    }
125901 +    assert(matchEndIdx > curr+8);
125902 +    ms->nextToUpdate = matchEndIdx - 8;  /* skip repetitive patterns */
125903 +    return mnum;
125907 +FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
125908 +                        ZSTD_match_t* matches,   /* store result (match found, increasing size) in this table */
125909 +                        ZSTD_matchState_t* ms,
125910 +                        U32* nextToUpdate3,
125911 +                        const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode,
125912 +                        const U32 rep[ZSTD_REP_NUM],
125913 +                        U32 const ll0,
125914 +                        U32 const lengthToBeat)
125916 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
125917 +    U32 const matchLengthSearch = cParams->minMatch;
125918 +    DEBUGLOG(8, "ZSTD_BtGetAllMatches");
125919 +    if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
125920 +    ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode);
125921 +    switch(matchLengthSearch)
125922 +    {
125923 +    case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3);
125924 +    default :
125925 +    case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4);
125926 +    case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5);
125927 +    case 7 :
125928 +    case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6);
125929 +    }
125932 +/*************************
125933 +*  LDM helper functions  *
125934 +*************************/
125936 +/* Struct containing info needed to make decision about ldm inclusion */
125937 +typedef struct {
125938 +    rawSeqStore_t seqStore;         /* External match candidates store for this block */
125939 +    U32 startPosInBlock;            /* Start position of the current match candidate */
125940 +    U32 endPosInBlock;              /* End position of the current match candidate */
125941 +    U32 offset;                     /* Offset of the match candidate */
125942 +} ZSTD_optLdm_t;
125944 +/* ZSTD_optLdm_skipRawSeqStoreBytes():
125945 + * Moves forward in rawSeqStore by nbBytes, which will update the fields 'pos' and 'posInSequence'.
125946 + */
125947 +static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
125948 +    U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
125949 +    while (currPos && rawSeqStore->pos < rawSeqStore->size) {
125950 +        rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
125951 +        if (currPos >= currSeq.litLength + currSeq.matchLength) {
125952 +            currPos -= currSeq.litLength + currSeq.matchLength;
125953 +            rawSeqStore->pos++;
125954 +        } else {
125955 +            rawSeqStore->posInSequence = currPos;
125956 +            break;
125957 +        }
125958 +    }
125959 +    if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
125960 +        rawSeqStore->posInSequence = 0;
125961 +    }
125964 +/* ZSTD_opt_getNextMatchAndUpdateSeqStore():
125965 + * Calculates the beginning and end of the next match in the current block.
125966 + * Updates 'pos' and 'posInSequence' of the ldmSeqStore.
125967 + */
125968 +static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
125969 +                                                   U32 blockBytesRemaining) {
125970 +    rawSeq currSeq;
125971 +    U32 currBlockEndPos;
125972 +    U32 literalsBytesRemaining;
125973 +    U32 matchBytesRemaining;
125975 +    /* Setting match end position to MAX to ensure we never use an LDM during this block */
125976 +    if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
125977 +        optLdm->startPosInBlock = UINT_MAX;
125978 +        optLdm->endPosInBlock = UINT_MAX;
125979 +        return;
125980 +    }
125981 +    /* Calculate appropriate bytes left in matchLength and litLength after adjusting
125982 +       based on ldmSeqStore->posInSequence */
125983 +    currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos];
125984 +    assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength);
125985 +    currBlockEndPos = currPosInBlock + blockBytesRemaining;
125986 +    literalsBytesRemaining = (optLdm->seqStore.posInSequence < currSeq.litLength) ?
125987 +            currSeq.litLength - (U32)optLdm->seqStore.posInSequence :
125988 +            0;
125989 +    matchBytesRemaining = (literalsBytesRemaining == 0) ?
125990 +            currSeq.matchLength - ((U32)optLdm->seqStore.posInSequence - currSeq.litLength) :
125991 +            currSeq.matchLength;
125993 +    /* If there are more literal bytes than bytes remaining in block, no ldm is possible */
125994 +    if (literalsBytesRemaining >= blockBytesRemaining) {
125995 +        optLdm->startPosInBlock = UINT_MAX;
125996 +        optLdm->endPosInBlock = UINT_MAX;
125997 +        ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining);
125998 +        return;
125999 +    }
126001 +    /* Matches may be < MINMATCH by this process. In that case, we will reject them
126002 +       when we are deciding whether or not to add the ldm */
126003 +    optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining;
126004 +    optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining;
126005 +    optLdm->offset = currSeq.offset;
126007 +    if (optLdm->endPosInBlock > currBlockEndPos) {
126008 +        /* Match ends after the block ends, we can't use the whole match */
126009 +        optLdm->endPosInBlock = currBlockEndPos;
126010 +        ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock);
126011 +    } else {
126012 +        /* Consume nb of bytes equal to size of sequence left */
126013 +        ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining);
126014 +    }
126017 +/* ZSTD_optLdm_maybeAddMatch():
126018 + * Adds a match if it's long enough, based on it's 'matchStartPosInBlock'
126019 + * and 'matchEndPosInBlock', into 'matches'. Maintains the correct ordering of 'matches'
126020 + */
126021 +static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
126022 +                                      ZSTD_optLdm_t* optLdm, U32 currPosInBlock) {
126023 +    U32 posDiff = currPosInBlock - optLdm->startPosInBlock;
126024 +    /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */
126025 +    U32 candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
126026 +    U32 candidateOffCode = optLdm->offset + ZSTD_REP_MOVE;
126028 +    /* Ensure that current block position is not outside of the match */
126029 +    if (currPosInBlock < optLdm->startPosInBlock
126030 +      || currPosInBlock >= optLdm->endPosInBlock
126031 +      || candidateMatchLength < MINMATCH) {
126032 +        return;
126033 +    }
126035 +    if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) {
126036 +        DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u",
126037 +                 candidateOffCode, candidateMatchLength, currPosInBlock);
126038 +        matches[*nbMatches].len = candidateMatchLength;
126039 +        matches[*nbMatches].off = candidateOffCode;
126040 +        (*nbMatches)++;
126041 +    }
126044 +/* ZSTD_optLdm_processMatchCandidate():
126045 + * Wrapper function to update ldm seq store and call ldm functions as necessary.
126046 + */
126047 +static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_t* matches, U32* nbMatches,
126048 +                                              U32 currPosInBlock, U32 remainingBytes) {
126049 +    if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
126050 +        return;
126051 +    }
126053 +    if (currPosInBlock >= optLdm->endPosInBlock) {
126054 +        if (currPosInBlock > optLdm->endPosInBlock) {
126055 +            /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily
126056 +             * at the end of a match from the ldm seq store, and will often be some bytes
126057 +             * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots"
126058 +             */
126059 +            U32 posOvershoot = currPosInBlock - optLdm->endPosInBlock;
126060 +            ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot);
126061 +        }
126062 +        ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes);
126063 +    }
126064 +    ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock);
126067 +/*-*******************************
126068 +*  Optimal parser
126069 +*********************************/
126072 +static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
126074 +    return sol.litlen + sol.mlen;
126077 +#if 0 /* debug */
126079 +static void
126080 +listStats(const U32* table, int lastEltID)
126082 +    int const nbElts = lastEltID + 1;
126083 +    int enb;
126084 +    for (enb=0; enb < nbElts; enb++) {
126085 +        (void)table;
126086 +        /* RAWLOG(2, "%3i:%3i,  ", enb, table[enb]); */
126087 +        RAWLOG(2, "%4i,", table[enb]);
126088 +    }
126089 +    RAWLOG(2, " \n");
126092 +#endif
126094 +FORCE_INLINE_TEMPLATE size_t
126095 +ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
126096 +                               seqStore_t* seqStore,
126097 +                               U32 rep[ZSTD_REP_NUM],
126098 +                         const void* src, size_t srcSize,
126099 +                         const int optLevel,
126100 +                         const ZSTD_dictMode_e dictMode)
126102 +    optState_t* const optStatePtr = &ms->opt;
126103 +    const BYTE* const istart = (const BYTE*)src;
126104 +    const BYTE* ip = istart;
126105 +    const BYTE* anchor = istart;
126106 +    const BYTE* const iend = istart + srcSize;
126107 +    const BYTE* const ilimit = iend - 8;
126108 +    const BYTE* const base = ms->window.base;
126109 +    const BYTE* const prefixStart = base + ms->window.dictLimit;
126110 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
126112 +    U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
126113 +    U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
126114 +    U32 nextToUpdate3 = ms->nextToUpdate;
126116 +    ZSTD_optimal_t* const opt = optStatePtr->priceTable;
126117 +    ZSTD_match_t* const matches = optStatePtr->matchTable;
126118 +    ZSTD_optimal_t lastSequence;
126119 +    ZSTD_optLdm_t optLdm;
126121 +    optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore;
126122 +    optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0;
126123 +    ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip));
126125 +    /* init */
126126 +    DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
126127 +                (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate);
126128 +    assert(optLevel <= 2);
126129 +    ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);
126130 +    ip += (ip==prefixStart);
126132 +    /* Match Loop */
126133 +    while (ip < ilimit) {
126134 +        U32 cur, last_pos = 0;
126136 +        /* find first match */
126137 +        {   U32 const litlen = (U32)(ip - anchor);
126138 +            U32 const ll0 = !litlen;
126139 +            U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch);
126140 +            ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
126141 +                                              (U32)(ip-istart), (U32)(iend - ip));
126142 +            if (!nbMatches) { ip++; continue; }
126144 +            /* initialize opt[0] */
126145 +            { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
126146 +            opt[0].mlen = 0;  /* means is_a_literal */
126147 +            opt[0].litlen = litlen;
126148 +            /* We don't need to include the actual price of the literals because
126149 +             * it is static for the duration of the forward pass, and is included
126150 +             * in every price. We include the literal length to avoid negative
126151 +             * prices when we subtract the previous literal length.
126152 +             */
126153 +            opt[0].price = ZSTD_litLengthPrice(litlen, optStatePtr, optLevel);
126155 +            /* large match -> immediate encoding */
126156 +            {   U32 const maxML = matches[nbMatches-1].len;
126157 +                U32 const maxOffset = matches[nbMatches-1].off;
126158 +                DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
126159 +                            nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
126161 +                if (maxML > sufficient_len) {
126162 +                    lastSequence.litlen = litlen;
126163 +                    lastSequence.mlen = maxML;
126164 +                    lastSequence.off = maxOffset;
126165 +                    DEBUGLOG(6, "large match (%u>%u), immediate encoding",
126166 +                                maxML, sufficient_len);
126167 +                    cur = 0;
126168 +                    last_pos = ZSTD_totalLen(lastSequence);
126169 +                    goto _shortestPath;
126170 +            }   }
126172 +            /* set prices for first matches starting position == 0 */
126173 +            {   U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
126174 +                U32 pos;
126175 +                U32 matchNb;
126176 +                for (pos = 1; pos < minMatch; pos++) {
126177 +                    opt[pos].price = ZSTD_MAX_PRICE;   /* mlen, litlen and price will be fixed during forward scanning */
126178 +                }
126179 +                for (matchNb = 0; matchNb < nbMatches; matchNb++) {
126180 +                    U32 const offset = matches[matchNb].off;
126181 +                    U32 const end = matches[matchNb].len;
126182 +                    for ( ; pos <= end ; pos++ ) {
126183 +                        U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
126184 +                        U32 const sequencePrice = literalsPrice + matchPrice;
126185 +                        DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
126186 +                                    pos, ZSTD_fCost(sequencePrice));
126187 +                        opt[pos].mlen = pos;
126188 +                        opt[pos].off = offset;
126189 +                        opt[pos].litlen = litlen;
126190 +                        opt[pos].price = sequencePrice;
126191 +                }   }
126192 +                last_pos = pos-1;
126193 +            }
126194 +        }
126196 +        /* check further positions */
126197 +        for (cur = 1; cur <= last_pos; cur++) {
126198 +            const BYTE* const inr = ip + cur;
126199 +            assert(cur < ZSTD_OPT_NUM);
126200 +            DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
126202 +            /* Fix current position with one literal if cheaper */
126203 +            {   U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
126204 +                int const price = opt[cur-1].price
126205 +                                + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
126206 +                                + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
126207 +                                - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
126208 +                assert(price < 1000000000); /* overflow check */
126209 +                if (price <= opt[cur].price) {
126210 +                    DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
126211 +                                inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
126212 +                                opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
126213 +                    opt[cur].mlen = 0;
126214 +                    opt[cur].off = 0;
126215 +                    opt[cur].litlen = litlen;
126216 +                    opt[cur].price = price;
126217 +                } else {
126218 +                    DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
126219 +                                inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
126220 +                                opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
126221 +                }
126222 +            }
126224 +            /* Set the repcodes of the current position. We must do it here
126225 +             * because we rely on the repcodes of the 2nd to last sequence being
126226 +             * correct to set the next chunks repcodes during the backward
126227 +             * traversal.
126228 +             */
126229 +            ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t));
126230 +            assert(cur >= opt[cur].mlen);
126231 +            if (opt[cur].mlen != 0) {
126232 +                U32 const prev = cur - opt[cur].mlen;
126233 +                repcodes_t newReps = ZSTD_updateRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0);
126234 +                ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
126235 +            } else {
126236 +                ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
126237 +            }
126239 +            /* last match must start at a minimum distance of 8 from oend */
126240 +            if (inr > ilimit) continue;
126242 +            if (cur == last_pos) break;
126244 +            if ( (optLevel==0) /*static_test*/
126245 +              && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
126246 +                DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
126247 +                continue;  /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
126248 +            }
126250 +            {   U32 const ll0 = (opt[cur].mlen != 0);
126251 +                U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
126252 +                U32 const previousPrice = opt[cur].price;
126253 +                U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
126254 +                U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch);
126255 +                U32 matchNb;
126257 +                ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
126258 +                                                  (U32)(inr-istart), (U32)(iend-inr));
126260 +                if (!nbMatches) {
126261 +                    DEBUGLOG(7, "rPos:%u : no match found", cur);
126262 +                    continue;
126263 +                }
126265 +                {   U32 const maxML = matches[nbMatches-1].len;
126266 +                    DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
126267 +                                inr-istart, cur, nbMatches, maxML);
126269 +                    if ( (maxML > sufficient_len)
126270 +                      || (cur + maxML >= ZSTD_OPT_NUM) ) {
126271 +                        lastSequence.mlen = maxML;
126272 +                        lastSequence.off = matches[nbMatches-1].off;
126273 +                        lastSequence.litlen = litlen;
126274 +                        cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0;  /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
126275 +                        last_pos = cur + ZSTD_totalLen(lastSequence);
126276 +                        if (cur > ZSTD_OPT_NUM) cur = 0;   /* underflow => first match */
126277 +                        goto _shortestPath;
126278 +                }   }
126280 +                /* set prices using matches found at position == cur */
126281 +                for (matchNb = 0; matchNb < nbMatches; matchNb++) {
126282 +                    U32 const offset = matches[matchNb].off;
126283 +                    U32 const lastML = matches[matchNb].len;
126284 +                    U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
126285 +                    U32 mlen;
126287 +                    DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
126288 +                                matchNb, matches[matchNb].off, lastML, litlen);
126290 +                    for (mlen = lastML; mlen >= startML; mlen--) {  /* scan downward */
126291 +                        U32 const pos = cur + mlen;
126292 +                        int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
126294 +                        if ((pos > last_pos) || (price < opt[pos].price)) {
126295 +                            DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
126296 +                                        pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
126297 +                            while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; }   /* fill empty positions */
126298 +                            opt[pos].mlen = mlen;
126299 +                            opt[pos].off = offset;
126300 +                            opt[pos].litlen = litlen;
126301 +                            opt[pos].price = price;
126302 +                        } else {
126303 +                            DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
126304 +                                        pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
126305 +                            if (optLevel==0) break;  /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
126306 +                        }
126307 +            }   }   }
126308 +        }  /* for (cur = 1; cur <= last_pos; cur++) */
126310 +        lastSequence = opt[last_pos];
126311 +        cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0;  /* single sequence, and it starts before `ip` */
126312 +        assert(cur < ZSTD_OPT_NUM);  /* control overflow*/
126314 +_shortestPath:   /* cur, last_pos, best_mlen, best_off have to be set */
126315 +        assert(opt[0].mlen == 0);
126317 +        /* Set the next chunk's repcodes based on the repcodes of the beginning
126318 +         * of the last match, and the last sequence. This avoids us having to
126319 +         * update them while traversing the sequences.
126320 +         */
126321 +        if (lastSequence.mlen != 0) {
126322 +            repcodes_t reps = ZSTD_updateRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0);
126323 +            ZSTD_memcpy(rep, &reps, sizeof(reps));
126324 +        } else {
126325 +            ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t));
126326 +        }
126328 +        {   U32 const storeEnd = cur + 1;
126329 +            U32 storeStart = storeEnd;
126330 +            U32 seqPos = cur;
126332 +            DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
126333 +                        last_pos, cur); (void)last_pos;
126334 +            assert(storeEnd < ZSTD_OPT_NUM);
126335 +            DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
126336 +                        storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);
126337 +            opt[storeEnd] = lastSequence;
126338 +            while (seqPos > 0) {
126339 +                U32 const backDist = ZSTD_totalLen(opt[seqPos]);
126340 +                storeStart--;
126341 +                DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
126342 +                            seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);
126343 +                opt[storeStart] = opt[seqPos];
126344 +                seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
126345 +            }
126347 +            /* save sequences */
126348 +            DEBUGLOG(6, "sending selected sequences into seqStore")
126349 +            {   U32 storePos;
126350 +                for (storePos=storeStart; storePos <= storeEnd; storePos++) {
126351 +                    U32 const llen = opt[storePos].litlen;
126352 +                    U32 const mlen = opt[storePos].mlen;
126353 +                    U32 const offCode = opt[storePos].off;
126354 +                    U32 const advance = llen + mlen;
126355 +                    DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
126356 +                                anchor - istart, (unsigned)llen, (unsigned)mlen);
126358 +                    if (mlen==0) {  /* only literals => must be last "sequence", actually starting a new stream of sequences */
126359 +                        assert(storePos == storeEnd);   /* must be last sequence */
126360 +                        ip = anchor + llen;     /* last "sequence" is a bunch of literals => don't progress anchor */
126361 +                        continue;   /* will finish */
126362 +                    }
126364 +                    assert(anchor + llen <= iend);
126365 +                    ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
126366 +                    ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH);
126367 +                    anchor += advance;
126368 +                    ip = anchor;
126369 +            }   }
126370 +            ZSTD_setBasePrices(optStatePtr, optLevel);
126371 +        }
126372 +    }   /* while (ip < ilimit) */
126374 +    /* Return the last literals size */
126375 +    return (size_t)(iend - anchor);
126379 +size_t ZSTD_compressBlock_btopt(
126380 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126381 +        const void* src, size_t srcSize)
126383 +    DEBUGLOG(5, "ZSTD_compressBlock_btopt");
126384 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict);
126388 +/* used in 2-pass strategy */
126389 +static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
126391 +    U32 s, sum=0;
126392 +    assert(ZSTD_FREQ_DIV+bonus >= 0);
126393 +    for (s=0; s<lastEltIndex+1; s++) {
126394 +        table[s] <<= ZSTD_FREQ_DIV+bonus;
126395 +        table[s]--;
126396 +        sum += table[s];
126397 +    }
126398 +    return sum;
126401 +/* used in 2-pass strategy */
126402 +MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
126404 +    if (ZSTD_compressedLiterals(optPtr))
126405 +        optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
126406 +    optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);
126407 +    optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);
126408 +    optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);
126411 +/* ZSTD_initStats_ultra():
126412 + * make a first compression pass, just to seed stats with more accurate starting values.
126413 + * only works on first block, with no dictionary and no ldm.
126414 + * this function cannot error, hence its contract must be respected.
126415 + */
126416 +static void
126417 +ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
126418 +                     seqStore_t* seqStore,
126419 +                     U32 rep[ZSTD_REP_NUM],
126420 +               const void* src, size_t srcSize)
126422 +    U32 tmpRep[ZSTD_REP_NUM];  /* updated rep codes will sink here */
126423 +    ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep));
126425 +    DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize);
126426 +    assert(ms->opt.litLengthSum == 0);    /* first block */
126427 +    assert(seqStore->sequences == seqStore->sequencesStart);   /* no ldm */
126428 +    assert(ms->window.dictLimit == ms->window.lowLimit);   /* no dictionary */
126429 +    assert(ms->window.dictLimit - ms->nextToUpdate <= 1);  /* no prefix (note: intentional overflow, defined as 2-complement) */
126431 +    ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);   /* generate stats into ms->opt*/
126433 +    /* invalidate first scan from history */
126434 +    ZSTD_resetSeqStore(seqStore);
126435 +    ms->window.base -= srcSize;
126436 +    ms->window.dictLimit += (U32)srcSize;
126437 +    ms->window.lowLimit = ms->window.dictLimit;
126438 +    ms->nextToUpdate = ms->window.dictLimit;
126440 +    /* re-inforce weight of collected statistics */
126441 +    ZSTD_upscaleStats(&ms->opt);
126444 +size_t ZSTD_compressBlock_btultra(
126445 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126446 +        const void* src, size_t srcSize)
126448 +    DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
126449 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
126452 +size_t ZSTD_compressBlock_btultra2(
126453 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126454 +        const void* src, size_t srcSize)
126456 +    U32 const curr = (U32)((const BYTE*)src - ms->window.base);
126457 +    DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
126459 +    /* 2-pass strategy:
126460 +     * this strategy makes a first pass over first block to collect statistics
126461 +     * and seed next round's statistics with it.
126462 +     * After 1st pass, function forgets everything, and starts a new block.
126463 +     * Consequently, this can only work if no data has been previously loaded in tables,
126464 +     * aka, no dictionary, no prefix, no ldm preprocessing.
126465 +     * The compression ratio gain is generally small (~0.5% on first block),
126466 +     * the cost is 2x cpu time on first block. */
126467 +    assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
126468 +    if ( (ms->opt.litLengthSum==0)   /* first block */
126469 +      && (seqStore->sequences == seqStore->sequencesStart)  /* no ldm */
126470 +      && (ms->window.dictLimit == ms->window.lowLimit)   /* no dictionary */
126471 +      && (curr == ms->window.dictLimit)   /* start of frame, nothing already loaded nor skipped */
126472 +      && (srcSize > ZSTD_PREDEF_THRESHOLD)
126473 +      ) {
126474 +        ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
126475 +    }
126477 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
126480 +size_t ZSTD_compressBlock_btopt_dictMatchState(
126481 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126482 +        const void* src, size_t srcSize)
126484 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState);
126487 +size_t ZSTD_compressBlock_btultra_dictMatchState(
126488 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126489 +        const void* src, size_t srcSize)
126491 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState);
126494 +size_t ZSTD_compressBlock_btopt_extDict(
126495 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126496 +        const void* src, size_t srcSize)
126498 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict);
126501 +size_t ZSTD_compressBlock_btultra_extDict(
126502 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126503 +        const void* src, size_t srcSize)
126505 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict);
126508 +/* note : no btultra2 variant for extDict nor dictMatchState,
126509 + * because btultra2 is not meant to work with dictionaries
126510 + * and is only specific for the first block (no prefix) */
126511 diff --git a/lib/zstd/compress/zstd_opt.h b/lib/zstd/compress/zstd_opt.h
126512 new file mode 100644
126513 index 000000000000..22b862858ba7
126514 --- /dev/null
126515 +++ b/lib/zstd/compress/zstd_opt.h
126516 @@ -0,0 +1,50 @@
126518 + * Copyright (c) Yann Collet, Facebook, Inc.
126519 + * All rights reserved.
126521 + * This source code is licensed under both the BSD-style license (found in the
126522 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
126523 + * in the COPYING file in the root directory of this source tree).
126524 + * You may select, at your option, one of the above-listed licenses.
126525 + */
126527 +#ifndef ZSTD_OPT_H
126528 +#define ZSTD_OPT_H
126531 +#include "zstd_compress_internal.h"
126533 +/* used in ZSTD_loadDictionaryContent() */
126534 +void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
126536 +size_t ZSTD_compressBlock_btopt(
126537 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126538 +        void const* src, size_t srcSize);
126539 +size_t ZSTD_compressBlock_btultra(
126540 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126541 +        void const* src, size_t srcSize);
126542 +size_t ZSTD_compressBlock_btultra2(
126543 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126544 +        void const* src, size_t srcSize);
126547 +size_t ZSTD_compressBlock_btopt_dictMatchState(
126548 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126549 +        void const* src, size_t srcSize);
126550 +size_t ZSTD_compressBlock_btultra_dictMatchState(
126551 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126552 +        void const* src, size_t srcSize);
126554 +size_t ZSTD_compressBlock_btopt_extDict(
126555 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126556 +        void const* src, size_t srcSize);
126557 +size_t ZSTD_compressBlock_btultra_extDict(
126558 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126559 +        void const* src, size_t srcSize);
126561 +        /* note : no btultra2 variant for extDict nor dictMatchState,
126562 +         * because btultra2 is not meant to work with dictionaries
126563 +         * and is only specific for the first block (no prefix) */
126566 +#endif /* ZSTD_OPT_H */
126567 diff --git a/lib/zstd/decompress.c b/lib/zstd/decompress.c
126568 deleted file mode 100644
126569 index 66cd487a326a..000000000000
126570 --- a/lib/zstd/decompress.c
126571 +++ /dev/null
126572 @@ -1,2531 +0,0 @@
126574 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
126575 - * All rights reserved.
126577 - * This source code is licensed under the BSD-style license found in the
126578 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
126579 - * An additional grant of patent rights can be found in the PATENTS file in the
126580 - * same directory.
126582 - * This program is free software; you can redistribute it and/or modify it under
126583 - * the terms of the GNU General Public License version 2 as published by the
126584 - * Free Software Foundation. This program is dual-licensed; you may select
126585 - * either version 2 of the GNU General Public License ("GPL") or BSD license
126586 - * ("BSD").
126587 - */
126589 -/* ***************************************************************
126590 -*  Tuning parameters
126591 -*****************************************************************/
126593 -*  MAXWINDOWSIZE_DEFAULT :
126594 -*  maximum window size accepted by DStream, by default.
126595 -*  Frames requiring more memory will be rejected.
126597 -#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
126598 -#define ZSTD_MAXWINDOWSIZE_DEFAULT ((1 << ZSTD_WINDOWLOG_MAX) + 1) /* defined within zstd.h */
126599 -#endif
126601 -/*-*******************************************************
126602 -*  Dependencies
126603 -*********************************************************/
126604 -#include "fse.h"
126605 -#include "huf.h"
126606 -#include "mem.h" /* low level memory routines */
126607 -#include "zstd_internal.h"
126608 -#include <linux/kernel.h>
126609 -#include <linux/module.h>
126610 -#include <linux/string.h> /* memcpy, memmove, memset */
126612 -#define ZSTD_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0)
126614 -/*-*************************************
126615 -*  Macros
126616 -***************************************/
126617 -#define ZSTD_isError ERR_isError /* for inlining */
126618 -#define FSE_isError ERR_isError
126619 -#define HUF_isError ERR_isError
126621 -/*_*******************************************************
126622 -*  Memory operations
126623 -**********************************************************/
126624 -static void ZSTD_copy4(void *dst, const void *src) { memcpy(dst, src, 4); }
126626 -/*-*************************************************************
126627 -*   Context management
126628 -***************************************************************/
126629 -typedef enum {
126630 -       ZSTDds_getFrameHeaderSize,
126631 -       ZSTDds_decodeFrameHeader,
126632 -       ZSTDds_decodeBlockHeader,
126633 -       ZSTDds_decompressBlock,
126634 -       ZSTDds_decompressLastBlock,
126635 -       ZSTDds_checkChecksum,
126636 -       ZSTDds_decodeSkippableHeader,
126637 -       ZSTDds_skipFrame
126638 -} ZSTD_dStage;
126640 -typedef struct {
126641 -       FSE_DTable LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
126642 -       FSE_DTable OFTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
126643 -       FSE_DTable MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
126644 -       HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
126645 -       U64 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32 / 2];
126646 -       U32 rep[ZSTD_REP_NUM];
126647 -} ZSTD_entropyTables_t;
126649 -struct ZSTD_DCtx_s {
126650 -       const FSE_DTable *LLTptr;
126651 -       const FSE_DTable *MLTptr;
126652 -       const FSE_DTable *OFTptr;
126653 -       const HUF_DTable *HUFptr;
126654 -       ZSTD_entropyTables_t entropy;
126655 -       const void *previousDstEnd; /* detect continuity */
126656 -       const void *base;          /* start of curr segment */
126657 -       const void *vBase;        /* virtual start of previous segment if it was just before curr one */
126658 -       const void *dictEnd;    /* end of previous segment */
126659 -       size_t expected;
126660 -       ZSTD_frameParams fParams;
126661 -       blockType_e bType; /* used in ZSTD_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
126662 -       ZSTD_dStage stage;
126663 -       U32 litEntropy;
126664 -       U32 fseEntropy;
126665 -       struct xxh64_state xxhState;
126666 -       size_t headerSize;
126667 -       U32 dictID;
126668 -       const BYTE *litPtr;
126669 -       ZSTD_customMem customMem;
126670 -       size_t litSize;
126671 -       size_t rleSize;
126672 -       BYTE litBuffer[ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH];
126673 -       BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
126674 -}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
126676 -size_t ZSTD_DCtxWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DCtx)); }
126678 -size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx)
126680 -       dctx->expected = ZSTD_frameHeaderSize_prefix;
126681 -       dctx->stage = ZSTDds_getFrameHeaderSize;
126682 -       dctx->previousDstEnd = NULL;
126683 -       dctx->base = NULL;
126684 -       dctx->vBase = NULL;
126685 -       dctx->dictEnd = NULL;
126686 -       dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
126687 -       dctx->litEntropy = dctx->fseEntropy = 0;
126688 -       dctx->dictID = 0;
126689 -       ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
126690 -       memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
126691 -       dctx->LLTptr = dctx->entropy.LLTable;
126692 -       dctx->MLTptr = dctx->entropy.MLTable;
126693 -       dctx->OFTptr = dctx->entropy.OFTable;
126694 -       dctx->HUFptr = dctx->entropy.hufTable;
126695 -       return 0;
126698 -ZSTD_DCtx *ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
126700 -       ZSTD_DCtx *dctx;
126702 -       if (!customMem.customAlloc || !customMem.customFree)
126703 -               return NULL;
126705 -       dctx = (ZSTD_DCtx *)ZSTD_malloc(sizeof(ZSTD_DCtx), customMem);
126706 -       if (!dctx)
126707 -               return NULL;
126708 -       memcpy(&dctx->customMem, &customMem, sizeof(customMem));
126709 -       ZSTD_decompressBegin(dctx);
126710 -       return dctx;
126713 -ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize)
126715 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
126716 -       return ZSTD_createDCtx_advanced(stackMem);
126719 -size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx)
126721 -       if (dctx == NULL)
126722 -               return 0; /* support free on NULL */
126723 -       ZSTD_free(dctx, dctx->customMem);
126724 -       return 0; /* reserved as a potential error code in the future */
126727 -void ZSTD_copyDCtx(ZSTD_DCtx *dstDCtx, const ZSTD_DCtx *srcDCtx)
126729 -       size_t const workSpaceSize = (ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH) + ZSTD_frameHeaderSize_max;
126730 -       memcpy(dstDCtx, srcDCtx, sizeof(ZSTD_DCtx) - workSpaceSize); /* no need to copy workspace */
126733 -static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict);
126735 -/*-*************************************************************
126736 -*   Decompression section
126737 -***************************************************************/
126739 -/*! ZSTD_isFrame() :
126740 - *  Tells if the content of `buffer` starts with a valid Frame Identifier.
126741 - *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
126742 - *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
126743 - *  Note 3 : Skippable Frame Identifiers are considered valid. */
126744 -unsigned ZSTD_isFrame(const void *buffer, size_t size)
126746 -       if (size < 4)
126747 -               return 0;
126748 -       {
126749 -               U32 const magic = ZSTD_readLE32(buffer);
126750 -               if (magic == ZSTD_MAGICNUMBER)
126751 -                       return 1;
126752 -               if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START)
126753 -                       return 1;
126754 -       }
126755 -       return 0;
126758 -/** ZSTD_frameHeaderSize() :
126759 -*   srcSize must be >= ZSTD_frameHeaderSize_prefix.
126760 -*   @return : size of the Frame Header */
126761 -static size_t ZSTD_frameHeaderSize(const void *src, size_t srcSize)
126763 -       if (srcSize < ZSTD_frameHeaderSize_prefix)
126764 -               return ERROR(srcSize_wrong);
126765 -       {
126766 -               BYTE const fhd = ((const BYTE *)src)[4];
126767 -               U32 const dictID = fhd & 3;
126768 -               U32 const singleSegment = (fhd >> 5) & 1;
126769 -               U32 const fcsId = fhd >> 6;
126770 -               return ZSTD_frameHeaderSize_prefix + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + (singleSegment && !fcsId);
126771 -       }
126774 -/** ZSTD_getFrameParams() :
126775 -*   decode Frame Header, or require larger `srcSize`.
126776 -*   @return : 0, `fparamsPtr` is correctly filled,
126777 -*            >0, `srcSize` is too small, result is expected `srcSize`,
126778 -*             or an error code, which can be tested using ZSTD_isError() */
126779 -size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src, size_t srcSize)
126781 -       const BYTE *ip = (const BYTE *)src;
126783 -       if (srcSize < ZSTD_frameHeaderSize_prefix)
126784 -               return ZSTD_frameHeaderSize_prefix;
126785 -       if (ZSTD_readLE32(src) != ZSTD_MAGICNUMBER) {
126786 -               if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
126787 -                       if (srcSize < ZSTD_skippableHeaderSize)
126788 -                               return ZSTD_skippableHeaderSize; /* magic number + skippable frame length */
126789 -                       memset(fparamsPtr, 0, sizeof(*fparamsPtr));
126790 -                       fparamsPtr->frameContentSize = ZSTD_readLE32((const char *)src + 4);
126791 -                       fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */
126792 -                       return 0;
126793 -               }
126794 -               return ERROR(prefix_unknown);
126795 -       }
126797 -       /* ensure there is enough `srcSize` to fully read/decode frame header */
126798 -       {
126799 -               size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize);
126800 -               if (srcSize < fhsize)
126801 -                       return fhsize;
126802 -       }
126804 -       {
126805 -               BYTE const fhdByte = ip[4];
126806 -               size_t pos = 5;
126807 -               U32 const dictIDSizeCode = fhdByte & 3;
126808 -               U32 const checksumFlag = (fhdByte >> 2) & 1;
126809 -               U32 const singleSegment = (fhdByte >> 5) & 1;
126810 -               U32 const fcsID = fhdByte >> 6;
126811 -               U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;
126812 -               U32 windowSize = 0;
126813 -               U32 dictID = 0;
126814 -               U64 frameContentSize = 0;
126815 -               if ((fhdByte & 0x08) != 0)
126816 -                       return ERROR(frameParameter_unsupported); /* reserved bits, which must be zero */
126817 -               if (!singleSegment) {
126818 -                       BYTE const wlByte = ip[pos++];
126819 -                       U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
126820 -                       if (windowLog > ZSTD_WINDOWLOG_MAX)
126821 -                               return ERROR(frameParameter_windowTooLarge); /* avoids issue with 1 << windowLog */
126822 -                       windowSize = (1U << windowLog);
126823 -                       windowSize += (windowSize >> 3) * (wlByte & 7);
126824 -               }
126826 -               switch (dictIDSizeCode) {
126827 -               default: /* impossible */
126828 -               case 0: break;
126829 -               case 1:
126830 -                       dictID = ip[pos];
126831 -                       pos++;
126832 -                       break;
126833 -               case 2:
126834 -                       dictID = ZSTD_readLE16(ip + pos);
126835 -                       pos += 2;
126836 -                       break;
126837 -               case 3:
126838 -                       dictID = ZSTD_readLE32(ip + pos);
126839 -                       pos += 4;
126840 -                       break;
126841 -               }
126842 -               switch (fcsID) {
126843 -               default: /* impossible */
126844 -               case 0:
126845 -                       if (singleSegment)
126846 -                               frameContentSize = ip[pos];
126847 -                       break;
126848 -               case 1: frameContentSize = ZSTD_readLE16(ip + pos) + 256; break;
126849 -               case 2: frameContentSize = ZSTD_readLE32(ip + pos); break;
126850 -               case 3: frameContentSize = ZSTD_readLE64(ip + pos); break;
126851 -               }
126852 -               if (!windowSize)
126853 -                       windowSize = (U32)frameContentSize;
126854 -               if (windowSize > windowSizeMax)
126855 -                       return ERROR(frameParameter_windowTooLarge);
126856 -               fparamsPtr->frameContentSize = frameContentSize;
126857 -               fparamsPtr->windowSize = windowSize;
126858 -               fparamsPtr->dictID = dictID;
126859 -               fparamsPtr->checksumFlag = checksumFlag;
126860 -       }
126861 -       return 0;
126864 -/** ZSTD_getFrameContentSize() :
126865 -*   compatible with legacy mode
126866 -*   @return : decompressed size of the single frame pointed to be `src` if known, otherwise
126867 -*             - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
126868 -*             - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
126869 -unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
126871 -       {
126872 -               ZSTD_frameParams fParams;
126873 -               if (ZSTD_getFrameParams(&fParams, src, srcSize) != 0)
126874 -                       return ZSTD_CONTENTSIZE_ERROR;
126875 -               if (fParams.windowSize == 0) {
126876 -                       /* Either skippable or empty frame, size == 0 either way */
126877 -                       return 0;
126878 -               } else if (fParams.frameContentSize != 0) {
126879 -                       return fParams.frameContentSize;
126880 -               } else {
126881 -                       return ZSTD_CONTENTSIZE_UNKNOWN;
126882 -               }
126883 -       }
126886 -/** ZSTD_findDecompressedSize() :
126887 - *  compatible with legacy mode
126888 - *  `srcSize` must be the exact length of some number of ZSTD compressed and/or
126889 - *      skippable frames
126890 - *  @return : decompressed size of the frames contained */
126891 -unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize)
126893 -       {
126894 -               unsigned long long totalDstSize = 0;
126895 -               while (srcSize >= ZSTD_frameHeaderSize_prefix) {
126896 -                       const U32 magicNumber = ZSTD_readLE32(src);
126898 -                       if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
126899 -                               size_t skippableSize;
126900 -                               if (srcSize < ZSTD_skippableHeaderSize)
126901 -                                       return ERROR(srcSize_wrong);
126902 -                               skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize;
126903 -                               if (srcSize < skippableSize) {
126904 -                                       return ZSTD_CONTENTSIZE_ERROR;
126905 -                               }
126907 -                               src = (const BYTE *)src + skippableSize;
126908 -                               srcSize -= skippableSize;
126909 -                               continue;
126910 -                       }
126912 -                       {
126913 -                               unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
126914 -                               if (ret >= ZSTD_CONTENTSIZE_ERROR)
126915 -                                       return ret;
126917 -                               /* check for overflow */
126918 -                               if (totalDstSize + ret < totalDstSize)
126919 -                                       return ZSTD_CONTENTSIZE_ERROR;
126920 -                               totalDstSize += ret;
126921 -                       }
126922 -                       {
126923 -                               size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
126924 -                               if (ZSTD_isError(frameSrcSize)) {
126925 -                                       return ZSTD_CONTENTSIZE_ERROR;
126926 -                               }
126928 -                               src = (const BYTE *)src + frameSrcSize;
126929 -                               srcSize -= frameSrcSize;
126930 -                       }
126931 -               }
126933 -               if (srcSize) {
126934 -                       return ZSTD_CONTENTSIZE_ERROR;
126935 -               }
126937 -               return totalDstSize;
126938 -       }
126941 -/** ZSTD_decodeFrameHeader() :
126942 -*   `headerSize` must be the size provided by ZSTD_frameHeaderSize().
126943 -*   @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
126944 -static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx *dctx, const void *src, size_t headerSize)
126946 -       size_t const result = ZSTD_getFrameParams(&(dctx->fParams), src, headerSize);
126947 -       if (ZSTD_isError(result))
126948 -               return result; /* invalid header */
126949 -       if (result > 0)
126950 -               return ERROR(srcSize_wrong); /* headerSize too small */
126951 -       if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID))
126952 -               return ERROR(dictionary_wrong);
126953 -       if (dctx->fParams.checksumFlag)
126954 -               xxh64_reset(&dctx->xxhState, 0);
126955 -       return 0;
126958 -typedef struct {
126959 -       blockType_e blockType;
126960 -       U32 lastBlock;
126961 -       U32 origSize;
126962 -} blockProperties_t;
126964 -/*! ZSTD_getcBlockSize() :
126965 -*   Provides the size of compressed block from block header `src` */
126966 -size_t ZSTD_getcBlockSize(const void *src, size_t srcSize, blockProperties_t *bpPtr)
126968 -       if (srcSize < ZSTD_blockHeaderSize)
126969 -               return ERROR(srcSize_wrong);
126970 -       {
126971 -               U32 const cBlockHeader = ZSTD_readLE24(src);
126972 -               U32 const cSize = cBlockHeader >> 3;
126973 -               bpPtr->lastBlock = cBlockHeader & 1;
126974 -               bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
126975 -               bpPtr->origSize = cSize; /* only useful for RLE */
126976 -               if (bpPtr->blockType == bt_rle)
126977 -                       return 1;
126978 -               if (bpPtr->blockType == bt_reserved)
126979 -                       return ERROR(corruption_detected);
126980 -               return cSize;
126981 -       }
126984 -static size_t ZSTD_copyRawBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
126986 -       if (srcSize > dstCapacity)
126987 -               return ERROR(dstSize_tooSmall);
126988 -       memcpy(dst, src, srcSize);
126989 -       return srcSize;
126992 -static size_t ZSTD_setRleBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize, size_t regenSize)
126994 -       if (srcSize != 1)
126995 -               return ERROR(srcSize_wrong);
126996 -       if (regenSize > dstCapacity)
126997 -               return ERROR(dstSize_tooSmall);
126998 -       memset(dst, *(const BYTE *)src, regenSize);
126999 -       return regenSize;
127002 -/*! ZSTD_decodeLiteralsBlock() :
127003 -       @return : nb of bytes read from src (< srcSize ) */
127004 -size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx *dctx, const void *src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
127006 -       if (srcSize < MIN_CBLOCK_SIZE)
127007 -               return ERROR(corruption_detected);
127009 -       {
127010 -               const BYTE *const istart = (const BYTE *)src;
127011 -               symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
127013 -               switch (litEncType) {
127014 -               case set_repeat:
127015 -                       if (dctx->litEntropy == 0)
127016 -                               return ERROR(dictionary_corrupted);
127017 -                       fallthrough;
127018 -               case set_compressed:
127019 -                       if (srcSize < 5)
127020 -                               return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
127021 -                       {
127022 -                               size_t lhSize, litSize, litCSize;
127023 -                               U32 singleStream = 0;
127024 -                               U32 const lhlCode = (istart[0] >> 2) & 3;
127025 -                               U32 const lhc = ZSTD_readLE32(istart);
127026 -                               switch (lhlCode) {
127027 -                               case 0:
127028 -                               case 1:
127029 -                               default: /* note : default is impossible, since lhlCode into [0..3] */
127030 -                                       /* 2 - 2 - 10 - 10 */
127031 -                                       singleStream = !lhlCode;
127032 -                                       lhSize = 3;
127033 -                                       litSize = (lhc >> 4) & 0x3FF;
127034 -                                       litCSize = (lhc >> 14) & 0x3FF;
127035 -                                       break;
127036 -                               case 2:
127037 -                                       /* 2 - 2 - 14 - 14 */
127038 -                                       lhSize = 4;
127039 -                                       litSize = (lhc >> 4) & 0x3FFF;
127040 -                                       litCSize = lhc >> 18;
127041 -                                       break;
127042 -                               case 3:
127043 -                                       /* 2 - 2 - 18 - 18 */
127044 -                                       lhSize = 5;
127045 -                                       litSize = (lhc >> 4) & 0x3FFFF;
127046 -                                       litCSize = (lhc >> 22) + (istart[4] << 10);
127047 -                                       break;
127048 -                               }
127049 -                               if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX)
127050 -                                       return ERROR(corruption_detected);
127051 -                               if (litCSize + lhSize > srcSize)
127052 -                                       return ERROR(corruption_detected);
127054 -                               if (HUF_isError(
127055 -                                       (litEncType == set_repeat)
127056 -                                           ? (singleStream ? HUF_decompress1X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr)
127057 -                                                           : HUF_decompress4X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr))
127058 -                                           : (singleStream
127059 -                                                  ? HUF_decompress1X2_DCtx_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize,
127060 -                                                                                dctx->entropy.workspace, sizeof(dctx->entropy.workspace))
127061 -                                                  : HUF_decompress4X_hufOnly_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize,
127062 -                                                                                  dctx->entropy.workspace, sizeof(dctx->entropy.workspace)))))
127063 -                                       return ERROR(corruption_detected);
127065 -                               dctx->litPtr = dctx->litBuffer;
127066 -                               dctx->litSize = litSize;
127067 -                               dctx->litEntropy = 1;
127068 -                               if (litEncType == set_compressed)
127069 -                                       dctx->HUFptr = dctx->entropy.hufTable;
127070 -                               memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
127071 -                               return litCSize + lhSize;
127072 -                       }
127074 -               case set_basic: {
127075 -                       size_t litSize, lhSize;
127076 -                       U32 const lhlCode = ((istart[0]) >> 2) & 3;
127077 -                       switch (lhlCode) {
127078 -                       case 0:
127079 -                       case 2:
127080 -                       default: /* note : default is impossible, since lhlCode into [0..3] */
127081 -                               lhSize = 1;
127082 -                               litSize = istart[0] >> 3;
127083 -                               break;
127084 -                       case 1:
127085 -                               lhSize = 2;
127086 -                               litSize = ZSTD_readLE16(istart) >> 4;
127087 -                               break;
127088 -                       case 3:
127089 -                               lhSize = 3;
127090 -                               litSize = ZSTD_readLE24(istart) >> 4;
127091 -                               break;
127092 -                       }
127094 -                       if (lhSize + litSize + WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
127095 -                               if (litSize + lhSize > srcSize)
127096 -                                       return ERROR(corruption_detected);
127097 -                               memcpy(dctx->litBuffer, istart + lhSize, litSize);
127098 -                               dctx->litPtr = dctx->litBuffer;
127099 -                               dctx->litSize = litSize;
127100 -                               memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
127101 -                               return lhSize + litSize;
127102 -                       }
127103 -                       /* direct reference into compressed stream */
127104 -                       dctx->litPtr = istart + lhSize;
127105 -                       dctx->litSize = litSize;
127106 -                       return lhSize + litSize;
127107 -               }
127109 -               case set_rle: {
127110 -                       U32 const lhlCode = ((istart[0]) >> 2) & 3;
127111 -                       size_t litSize, lhSize;
127112 -                       switch (lhlCode) {
127113 -                       case 0:
127114 -                       case 2:
127115 -                       default: /* note : default is impossible, since lhlCode into [0..3] */
127116 -                               lhSize = 1;
127117 -                               litSize = istart[0] >> 3;
127118 -                               break;
127119 -                       case 1:
127120 -                               lhSize = 2;
127121 -                               litSize = ZSTD_readLE16(istart) >> 4;
127122 -                               break;
127123 -                       case 3:
127124 -                               lhSize = 3;
127125 -                               litSize = ZSTD_readLE24(istart) >> 4;
127126 -                               if (srcSize < 4)
127127 -                                       return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
127128 -                               break;
127129 -                       }
127130 -                       if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX)
127131 -                               return ERROR(corruption_detected);
127132 -                       memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
127133 -                       dctx->litPtr = dctx->litBuffer;
127134 -                       dctx->litSize = litSize;
127135 -                       return lhSize + 1;
127136 -               }
127137 -               default:
127138 -                       return ERROR(corruption_detected); /* impossible */
127139 -               }
127140 -       }
127143 -typedef union {
127144 -       FSE_decode_t realData;
127145 -       U32 alignedBy4;
127146 -} FSE_decode_t4;
127148 -static const FSE_decode_t4 LL_defaultDTable[(1 << LL_DEFAULTNORMLOG) + 1] = {
127149 -    {{LL_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
127150 -    {{0, 0, 4}},                /* 0 : base, symbol, bits */
127151 -    {{16, 0, 4}},
127152 -    {{32, 1, 5}},
127153 -    {{0, 3, 5}},
127154 -    {{0, 4, 5}},
127155 -    {{0, 6, 5}},
127156 -    {{0, 7, 5}},
127157 -    {{0, 9, 5}},
127158 -    {{0, 10, 5}},
127159 -    {{0, 12, 5}},
127160 -    {{0, 14, 6}},
127161 -    {{0, 16, 5}},
127162 -    {{0, 18, 5}},
127163 -    {{0, 19, 5}},
127164 -    {{0, 21, 5}},
127165 -    {{0, 22, 5}},
127166 -    {{0, 24, 5}},
127167 -    {{32, 25, 5}},
127168 -    {{0, 26, 5}},
127169 -    {{0, 27, 6}},
127170 -    {{0, 29, 6}},
127171 -    {{0, 31, 6}},
127172 -    {{32, 0, 4}},
127173 -    {{0, 1, 4}},
127174 -    {{0, 2, 5}},
127175 -    {{32, 4, 5}},
127176 -    {{0, 5, 5}},
127177 -    {{32, 7, 5}},
127178 -    {{0, 8, 5}},
127179 -    {{32, 10, 5}},
127180 -    {{0, 11, 5}},
127181 -    {{0, 13, 6}},
127182 -    {{32, 16, 5}},
127183 -    {{0, 17, 5}},
127184 -    {{32, 19, 5}},
127185 -    {{0, 20, 5}},
127186 -    {{32, 22, 5}},
127187 -    {{0, 23, 5}},
127188 -    {{0, 25, 4}},
127189 -    {{16, 25, 4}},
127190 -    {{32, 26, 5}},
127191 -    {{0, 28, 6}},
127192 -    {{0, 30, 6}},
127193 -    {{48, 0, 4}},
127194 -    {{16, 1, 4}},
127195 -    {{32, 2, 5}},
127196 -    {{32, 3, 5}},
127197 -    {{32, 5, 5}},
127198 -    {{32, 6, 5}},
127199 -    {{32, 8, 5}},
127200 -    {{32, 9, 5}},
127201 -    {{32, 11, 5}},
127202 -    {{32, 12, 5}},
127203 -    {{0, 15, 6}},
127204 -    {{32, 17, 5}},
127205 -    {{32, 18, 5}},
127206 -    {{32, 20, 5}},
127207 -    {{32, 21, 5}},
127208 -    {{32, 23, 5}},
127209 -    {{32, 24, 5}},
127210 -    {{0, 35, 6}},
127211 -    {{0, 34, 6}},
127212 -    {{0, 33, 6}},
127213 -    {{0, 32, 6}},
127214 -}; /* LL_defaultDTable */
127216 -static const FSE_decode_t4 ML_defaultDTable[(1 << ML_DEFAULTNORMLOG) + 1] = {
127217 -    {{ML_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
127218 -    {{0, 0, 6}},                /* 0 : base, symbol, bits */
127219 -    {{0, 1, 4}},
127220 -    {{32, 2, 5}},
127221 -    {{0, 3, 5}},
127222 -    {{0, 5, 5}},
127223 -    {{0, 6, 5}},
127224 -    {{0, 8, 5}},
127225 -    {{0, 10, 6}},
127226 -    {{0, 13, 6}},
127227 -    {{0, 16, 6}},
127228 -    {{0, 19, 6}},
127229 -    {{0, 22, 6}},
127230 -    {{0, 25, 6}},
127231 -    {{0, 28, 6}},
127232 -    {{0, 31, 6}},
127233 -    {{0, 33, 6}},
127234 -    {{0, 35, 6}},
127235 -    {{0, 37, 6}},
127236 -    {{0, 39, 6}},
127237 -    {{0, 41, 6}},
127238 -    {{0, 43, 6}},
127239 -    {{0, 45, 6}},
127240 -    {{16, 1, 4}},
127241 -    {{0, 2, 4}},
127242 -    {{32, 3, 5}},
127243 -    {{0, 4, 5}},
127244 -    {{32, 6, 5}},
127245 -    {{0, 7, 5}},
127246 -    {{0, 9, 6}},
127247 -    {{0, 12, 6}},
127248 -    {{0, 15, 6}},
127249 -    {{0, 18, 6}},
127250 -    {{0, 21, 6}},
127251 -    {{0, 24, 6}},
127252 -    {{0, 27, 6}},
127253 -    {{0, 30, 6}},
127254 -    {{0, 32, 6}},
127255 -    {{0, 34, 6}},
127256 -    {{0, 36, 6}},
127257 -    {{0, 38, 6}},
127258 -    {{0, 40, 6}},
127259 -    {{0, 42, 6}},
127260 -    {{0, 44, 6}},
127261 -    {{32, 1, 4}},
127262 -    {{48, 1, 4}},
127263 -    {{16, 2, 4}},
127264 -    {{32, 4, 5}},
127265 -    {{32, 5, 5}},
127266 -    {{32, 7, 5}},
127267 -    {{32, 8, 5}},
127268 -    {{0, 11, 6}},
127269 -    {{0, 14, 6}},
127270 -    {{0, 17, 6}},
127271 -    {{0, 20, 6}},
127272 -    {{0, 23, 6}},
127273 -    {{0, 26, 6}},
127274 -    {{0, 29, 6}},
127275 -    {{0, 52, 6}},
127276 -    {{0, 51, 6}},
127277 -    {{0, 50, 6}},
127278 -    {{0, 49, 6}},
127279 -    {{0, 48, 6}},
127280 -    {{0, 47, 6}},
127281 -    {{0, 46, 6}},
127282 -}; /* ML_defaultDTable */
127284 -static const FSE_decode_t4 OF_defaultDTable[(1 << OF_DEFAULTNORMLOG) + 1] = {
127285 -    {{OF_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
127286 -    {{0, 0, 5}},                /* 0 : base, symbol, bits */
127287 -    {{0, 6, 4}},
127288 -    {{0, 9, 5}},
127289 -    {{0, 15, 5}},
127290 -    {{0, 21, 5}},
127291 -    {{0, 3, 5}},
127292 -    {{0, 7, 4}},
127293 -    {{0, 12, 5}},
127294 -    {{0, 18, 5}},
127295 -    {{0, 23, 5}},
127296 -    {{0, 5, 5}},
127297 -    {{0, 8, 4}},
127298 -    {{0, 14, 5}},
127299 -    {{0, 20, 5}},
127300 -    {{0, 2, 5}},
127301 -    {{16, 7, 4}},
127302 -    {{0, 11, 5}},
127303 -    {{0, 17, 5}},
127304 -    {{0, 22, 5}},
127305 -    {{0, 4, 5}},
127306 -    {{16, 8, 4}},
127307 -    {{0, 13, 5}},
127308 -    {{0, 19, 5}},
127309 -    {{0, 1, 5}},
127310 -    {{16, 6, 4}},
127311 -    {{0, 10, 5}},
127312 -    {{0, 16, 5}},
127313 -    {{0, 28, 5}},
127314 -    {{0, 27, 5}},
127315 -    {{0, 26, 5}},
127316 -    {{0, 25, 5}},
127317 -    {{0, 24, 5}},
127318 -}; /* OF_defaultDTable */
127320 -/*! ZSTD_buildSeqTable() :
127321 -       @return : nb bytes read from src,
127322 -                         or an error code if it fails, testable with ZSTD_isError()
127324 -static size_t ZSTD_buildSeqTable(FSE_DTable *DTableSpace, const FSE_DTable **DTablePtr, symbolEncodingType_e type, U32 max, U32 maxLog, const void *src,
127325 -                                size_t srcSize, const FSE_decode_t4 *defaultTable, U32 flagRepeatTable, void *workspace, size_t workspaceSize)
127327 -       const void *const tmpPtr = defaultTable; /* bypass strict aliasing */
127328 -       switch (type) {
127329 -       case set_rle:
127330 -               if (!srcSize)
127331 -                       return ERROR(srcSize_wrong);
127332 -               if ((*(const BYTE *)src) > max)
127333 -                       return ERROR(corruption_detected);
127334 -               FSE_buildDTable_rle(DTableSpace, *(const BYTE *)src);
127335 -               *DTablePtr = DTableSpace;
127336 -               return 1;
127337 -       case set_basic: *DTablePtr = (const FSE_DTable *)tmpPtr; return 0;
127338 -       case set_repeat:
127339 -               if (!flagRepeatTable)
127340 -                       return ERROR(corruption_detected);
127341 -               return 0;
127342 -       default: /* impossible */
127343 -       case set_compressed: {
127344 -               U32 tableLog;
127345 -               S16 *norm = (S16 *)workspace;
127346 -               size_t const spaceUsed32 = ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2;
127348 -               if ((spaceUsed32 << 2) > workspaceSize)
127349 -                       return ERROR(GENERIC);
127350 -               workspace = (U32 *)workspace + spaceUsed32;
127351 -               workspaceSize -= (spaceUsed32 << 2);
127352 -               {
127353 -                       size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
127354 -                       if (FSE_isError(headerSize))
127355 -                               return ERROR(corruption_detected);
127356 -                       if (tableLog > maxLog)
127357 -                               return ERROR(corruption_detected);
127358 -                       FSE_buildDTable_wksp(DTableSpace, norm, max, tableLog, workspace, workspaceSize);
127359 -                       *DTablePtr = DTableSpace;
127360 -                       return headerSize;
127361 -               }
127362 -       }
127363 -       }
127366 -size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx *dctx, int *nbSeqPtr, const void *src, size_t srcSize)
127368 -       const BYTE *const istart = (const BYTE *const)src;
127369 -       const BYTE *const iend = istart + srcSize;
127370 -       const BYTE *ip = istart;
127372 -       /* check */
127373 -       if (srcSize < MIN_SEQUENCES_SIZE)
127374 -               return ERROR(srcSize_wrong);
127376 -       /* SeqHead */
127377 -       {
127378 -               int nbSeq = *ip++;
127379 -               if (!nbSeq) {
127380 -                       *nbSeqPtr = 0;
127381 -                       return 1;
127382 -               }
127383 -               if (nbSeq > 0x7F) {
127384 -                       if (nbSeq == 0xFF) {
127385 -                               if (ip + 2 > iend)
127386 -                                       return ERROR(srcSize_wrong);
127387 -                               nbSeq = ZSTD_readLE16(ip) + LONGNBSEQ, ip += 2;
127388 -                       } else {
127389 -                               if (ip >= iend)
127390 -                                       return ERROR(srcSize_wrong);
127391 -                               nbSeq = ((nbSeq - 0x80) << 8) + *ip++;
127392 -                       }
127393 -               }
127394 -               *nbSeqPtr = nbSeq;
127395 -       }
127397 -       /* FSE table descriptors */
127398 -       if (ip + 4 > iend)
127399 -               return ERROR(srcSize_wrong); /* minimum possible size */
127400 -       {
127401 -               symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
127402 -               symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
127403 -               symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
127404 -               ip++;
127406 -               /* Build DTables */
127407 -               {
127408 -                       size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, LLtype, MaxLL, LLFSELog, ip, iend - ip,
127409 -                                                                 LL_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
127410 -                       if (ZSTD_isError(llhSize))
127411 -                               return ERROR(corruption_detected);
127412 -                       ip += llhSize;
127413 -               }
127414 -               {
127415 -                       size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, OFtype, MaxOff, OffFSELog, ip, iend - ip,
127416 -                                                                 OF_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
127417 -                       if (ZSTD_isError(ofhSize))
127418 -                               return ERROR(corruption_detected);
127419 -                       ip += ofhSize;
127420 -               }
127421 -               {
127422 -                       size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, MLtype, MaxML, MLFSELog, ip, iend - ip,
127423 -                                                                 ML_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
127424 -                       if (ZSTD_isError(mlhSize))
127425 -                               return ERROR(corruption_detected);
127426 -                       ip += mlhSize;
127427 -               }
127428 -       }
127430 -       return ip - istart;
127433 -typedef struct {
127434 -       size_t litLength;
127435 -       size_t matchLength;
127436 -       size_t offset;
127437 -       const BYTE *match;
127438 -} seq_t;
127440 -typedef struct {
127441 -       BIT_DStream_t DStream;
127442 -       FSE_DState_t stateLL;
127443 -       FSE_DState_t stateOffb;
127444 -       FSE_DState_t stateML;
127445 -       size_t prevOffset[ZSTD_REP_NUM];
127446 -       const BYTE *base;
127447 -       size_t pos;
127448 -       uPtrDiff gotoDict;
127449 -} seqState_t;
127451 -FORCE_NOINLINE
127452 -size_t ZSTD_execSequenceLast7(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
127453 -                             const BYTE *const vBase, const BYTE *const dictEnd)
127455 -       BYTE *const oLitEnd = op + sequence.litLength;
127456 -       size_t const sequenceLength = sequence.litLength + sequence.matchLength;
127457 -       BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
127458 -       BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
127459 -       const BYTE *const iLitEnd = *litPtr + sequence.litLength;
127460 -       const BYTE *match = oLitEnd - sequence.offset;
127462 -       /* check */
127463 -       if (oMatchEnd > oend)
127464 -               return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
127465 -       if (iLitEnd > litLimit)
127466 -               return ERROR(corruption_detected); /* over-read beyond lit buffer */
127467 -       if (oLitEnd <= oend_w)
127468 -               return ERROR(GENERIC); /* Precondition */
127470 -       /* copy literals */
127471 -       if (op < oend_w) {
127472 -               ZSTD_wildcopy(op, *litPtr, oend_w - op);
127473 -               *litPtr += oend_w - op;
127474 -               op = oend_w;
127475 -       }
127476 -       while (op < oLitEnd)
127477 -               *op++ = *(*litPtr)++;
127479 -       /* copy Match */
127480 -       if (sequence.offset > (size_t)(oLitEnd - base)) {
127481 -               /* offset beyond prefix */
127482 -               if (sequence.offset > (size_t)(oLitEnd - vBase))
127483 -                       return ERROR(corruption_detected);
127484 -               match = dictEnd - (base - match);
127485 -               if (match + sequence.matchLength <= dictEnd) {
127486 -                       memmove(oLitEnd, match, sequence.matchLength);
127487 -                       return sequenceLength;
127488 -               }
127489 -               /* span extDict & currPrefixSegment */
127490 -               {
127491 -                       size_t const length1 = dictEnd - match;
127492 -                       memmove(oLitEnd, match, length1);
127493 -                       op = oLitEnd + length1;
127494 -                       sequence.matchLength -= length1;
127495 -                       match = base;
127496 -               }
127497 -       }
127498 -       while (op < oMatchEnd)
127499 -               *op++ = *match++;
127500 -       return sequenceLength;
127503 -static seq_t ZSTD_decodeSequence(seqState_t *seqState)
127505 -       seq_t seq;
127507 -       U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
127508 -       U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
127509 -       U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
127511 -       U32 const llBits = LL_bits[llCode];
127512 -       U32 const mlBits = ML_bits[mlCode];
127513 -       U32 const ofBits = ofCode;
127514 -       U32 const totalBits = llBits + mlBits + ofBits;
127516 -       static const U32 LL_base[MaxLL + 1] = {0,  1,  2,  3,  4,  5,  6,  7,  8,    9,     10,    11,    12,    13,     14,     15,     16,     18,
127517 -                                              20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000};
127519 -       static const U32 ML_base[MaxML + 1] = {3,  4,  5,  6,  7,  8,  9,  10,   11,    12,    13,    14,    15,     16,     17,     18,     19,     20,
127520 -                                              21, 22, 23, 24, 25, 26, 27, 28,   29,    30,    31,    32,    33,     34,     35,     37,     39,     41,
127521 -                                              43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003};
127523 -       static const U32 OF_base[MaxOff + 1] = {0,       1,     1,      5,      0xD,      0x1D,      0x3D,      0x7D,      0xFD,     0x1FD,
127524 -                                               0x3FD,   0x7FD,    0xFFD,    0x1FFD,   0x3FFD,   0x7FFD,    0xFFFD,    0x1FFFD,   0x3FFFD,  0x7FFFD,
127525 -                                               0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD};
127527 -       /* sequence */
127528 -       {
127529 -               size_t offset;
127530 -               if (!ofCode)
127531 -                       offset = 0;
127532 -               else {
127533 -                       offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
127534 -                       if (ZSTD_32bits())
127535 -                               BIT_reloadDStream(&seqState->DStream);
127536 -               }
127538 -               if (ofCode <= 1) {
127539 -                       offset += (llCode == 0);
127540 -                       if (offset) {
127541 -                               size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
127542 -                               temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
127543 -                               if (offset != 1)
127544 -                                       seqState->prevOffset[2] = seqState->prevOffset[1];
127545 -                               seqState->prevOffset[1] = seqState->prevOffset[0];
127546 -                               seqState->prevOffset[0] = offset = temp;
127547 -                       } else {
127548 -                               offset = seqState->prevOffset[0];
127549 -                       }
127550 -               } else {
127551 -                       seqState->prevOffset[2] = seqState->prevOffset[1];
127552 -                       seqState->prevOffset[1] = seqState->prevOffset[0];
127553 -                       seqState->prevOffset[0] = offset;
127554 -               }
127555 -               seq.offset = offset;
127556 -       }
127558 -       seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <=  16 bits */
127559 -       if (ZSTD_32bits() && (mlBits + llBits > 24))
127560 -               BIT_reloadDStream(&seqState->DStream);
127562 -       seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <=  16 bits */
127563 -       if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
127564 -               BIT_reloadDStream(&seqState->DStream);
127566 -       /* ANS state update */
127567 -       FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <=  9 bits */
127568 -       FSE_updateState(&seqState->stateML, &seqState->DStream); /* <=  9 bits */
127569 -       if (ZSTD_32bits())
127570 -               BIT_reloadDStream(&seqState->DStream);             /* <= 18 bits */
127571 -       FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <=  8 bits */
127573 -       seq.match = NULL;
127575 -       return seq;
127578 -FORCE_INLINE
127579 -size_t ZSTD_execSequence(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
127580 -                        const BYTE *const vBase, const BYTE *const dictEnd)
127582 -       BYTE *const oLitEnd = op + sequence.litLength;
127583 -       size_t const sequenceLength = sequence.litLength + sequence.matchLength;
127584 -       BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
127585 -       BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
127586 -       const BYTE *const iLitEnd = *litPtr + sequence.litLength;
127587 -       const BYTE *match = oLitEnd - sequence.offset;
127589 -       /* check */
127590 -       if (oMatchEnd > oend)
127591 -               return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
127592 -       if (iLitEnd > litLimit)
127593 -               return ERROR(corruption_detected); /* over-read beyond lit buffer */
127594 -       if (oLitEnd > oend_w)
127595 -               return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
127597 -       /* copy Literals */
127598 -       ZSTD_copy8(op, *litPtr);
127599 -       if (sequence.litLength > 8)
127600 -               ZSTD_wildcopy(op + 8, (*litPtr) + 8,
127601 -                             sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
127602 -       op = oLitEnd;
127603 -       *litPtr = iLitEnd; /* update for next sequence */
127605 -       /* copy Match */
127606 -       if (sequence.offset > (size_t)(oLitEnd - base)) {
127607 -               /* offset beyond prefix */
127608 -               if (sequence.offset > (size_t)(oLitEnd - vBase))
127609 -                       return ERROR(corruption_detected);
127610 -               match = dictEnd + (match - base);
127611 -               if (match + sequence.matchLength <= dictEnd) {
127612 -                       memmove(oLitEnd, match, sequence.matchLength);
127613 -                       return sequenceLength;
127614 -               }
127615 -               /* span extDict & currPrefixSegment */
127616 -               {
127617 -                       size_t const length1 = dictEnd - match;
127618 -                       memmove(oLitEnd, match, length1);
127619 -                       op = oLitEnd + length1;
127620 -                       sequence.matchLength -= length1;
127621 -                       match = base;
127622 -                       if (op > oend_w || sequence.matchLength < MINMATCH) {
127623 -                               U32 i;
127624 -                               for (i = 0; i < sequence.matchLength; ++i)
127625 -                                       op[i] = match[i];
127626 -                               return sequenceLength;
127627 -                       }
127628 -               }
127629 -       }
127630 -       /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
127632 -       /* match within prefix */
127633 -       if (sequence.offset < 8) {
127634 -               /* close range match, overlap */
127635 -               static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
127636 -               static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */
127637 -               int const sub2 = dec64table[sequence.offset];
127638 -               op[0] = match[0];
127639 -               op[1] = match[1];
127640 -               op[2] = match[2];
127641 -               op[3] = match[3];
127642 -               match += dec32table[sequence.offset];
127643 -               ZSTD_copy4(op + 4, match);
127644 -               match -= sub2;
127645 -       } else {
127646 -               ZSTD_copy8(op, match);
127647 -       }
127648 -       op += 8;
127649 -       match += 8;
127651 -       if (oMatchEnd > oend - (16 - MINMATCH)) {
127652 -               if (op < oend_w) {
127653 -                       ZSTD_wildcopy(op, match, oend_w - op);
127654 -                       match += oend_w - op;
127655 -                       op = oend_w;
127656 -               }
127657 -               while (op < oMatchEnd)
127658 -                       *op++ = *match++;
127659 -       } else {
127660 -               ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */
127661 -       }
127662 -       return sequenceLength;
127665 -static size_t ZSTD_decompressSequences(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize)
127667 -       const BYTE *ip = (const BYTE *)seqStart;
127668 -       const BYTE *const iend = ip + seqSize;
127669 -       BYTE *const ostart = (BYTE * const)dst;
127670 -       BYTE *const oend = ostart + maxDstSize;
127671 -       BYTE *op = ostart;
127672 -       const BYTE *litPtr = dctx->litPtr;
127673 -       const BYTE *const litEnd = litPtr + dctx->litSize;
127674 -       const BYTE *const base = (const BYTE *)(dctx->base);
127675 -       const BYTE *const vBase = (const BYTE *)(dctx->vBase);
127676 -       const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd);
127677 -       int nbSeq;
127679 -       /* Build Decoding Tables */
127680 -       {
127681 -               size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
127682 -               if (ZSTD_isError(seqHSize))
127683 -                       return seqHSize;
127684 -               ip += seqHSize;
127685 -       }
127687 -       /* Regen sequences */
127688 -       if (nbSeq) {
127689 -               seqState_t seqState;
127690 -               dctx->fseEntropy = 1;
127691 -               {
127692 -                       U32 i;
127693 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
127694 -                               seqState.prevOffset[i] = dctx->entropy.rep[i];
127695 -               }
127696 -               CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected);
127697 -               FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
127698 -               FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
127699 -               FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
127701 -               for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq;) {
127702 -                       nbSeq--;
127703 -                       {
127704 -                               seq_t const sequence = ZSTD_decodeSequence(&seqState);
127705 -                               size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
127706 -                               if (ZSTD_isError(oneSeqSize))
127707 -                                       return oneSeqSize;
127708 -                               op += oneSeqSize;
127709 -                       }
127710 -               }
127712 -               /* check if reached exact end */
127713 -               if (nbSeq)
127714 -                       return ERROR(corruption_detected);
127715 -               /* save reps for next block */
127716 -               {
127717 -                       U32 i;
127718 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
127719 -                               dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]);
127720 -               }
127721 -       }
127723 -       /* last literal segment */
127724 -       {
127725 -               size_t const lastLLSize = litEnd - litPtr;
127726 -               if (lastLLSize > (size_t)(oend - op))
127727 -                       return ERROR(dstSize_tooSmall);
127728 -               memcpy(op, litPtr, lastLLSize);
127729 -               op += lastLLSize;
127730 -       }
127732 -       return op - ostart;
127735 -FORCE_INLINE seq_t ZSTD_decodeSequenceLong_generic(seqState_t *seqState, int const longOffsets)
127737 -       seq_t seq;
127739 -       U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
127740 -       U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
127741 -       U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
127743 -       U32 const llBits = LL_bits[llCode];
127744 -       U32 const mlBits = ML_bits[mlCode];
127745 -       U32 const ofBits = ofCode;
127746 -       U32 const totalBits = llBits + mlBits + ofBits;
127748 -       static const U32 LL_base[MaxLL + 1] = {0,  1,  2,  3,  4,  5,  6,  7,  8,    9,     10,    11,    12,    13,     14,     15,     16,     18,
127749 -                                              20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000};
127751 -       static const U32 ML_base[MaxML + 1] = {3,  4,  5,  6,  7,  8,  9,  10,   11,    12,    13,    14,    15,     16,     17,     18,     19,     20,
127752 -                                              21, 22, 23, 24, 25, 26, 27, 28,   29,    30,    31,    32,    33,     34,     35,     37,     39,     41,
127753 -                                              43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003};
127755 -       static const U32 OF_base[MaxOff + 1] = {0,       1,     1,      5,      0xD,      0x1D,      0x3D,      0x7D,      0xFD,     0x1FD,
127756 -                                               0x3FD,   0x7FD,    0xFFD,    0x1FFD,   0x3FFD,   0x7FFD,    0xFFFD,    0x1FFFD,   0x3FFFD,  0x7FFFD,
127757 -                                               0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD};
127759 -       /* sequence */
127760 -       {
127761 -               size_t offset;
127762 -               if (!ofCode)
127763 -                       offset = 0;
127764 -               else {
127765 -                       if (longOffsets) {
127766 -                               int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN);
127767 -                               offset = OF_base[ofCode] + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
127768 -                               if (ZSTD_32bits() || extraBits)
127769 -                                       BIT_reloadDStream(&seqState->DStream);
127770 -                               if (extraBits)
127771 -                                       offset += BIT_readBitsFast(&seqState->DStream, extraBits);
127772 -                       } else {
127773 -                               offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
127774 -                               if (ZSTD_32bits())
127775 -                                       BIT_reloadDStream(&seqState->DStream);
127776 -                       }
127777 -               }
127779 -               if (ofCode <= 1) {
127780 -                       offset += (llCode == 0);
127781 -                       if (offset) {
127782 -                               size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
127783 -                               temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
127784 -                               if (offset != 1)
127785 -                                       seqState->prevOffset[2] = seqState->prevOffset[1];
127786 -                               seqState->prevOffset[1] = seqState->prevOffset[0];
127787 -                               seqState->prevOffset[0] = offset = temp;
127788 -                       } else {
127789 -                               offset = seqState->prevOffset[0];
127790 -                       }
127791 -               } else {
127792 -                       seqState->prevOffset[2] = seqState->prevOffset[1];
127793 -                       seqState->prevOffset[1] = seqState->prevOffset[0];
127794 -                       seqState->prevOffset[0] = offset;
127795 -               }
127796 -               seq.offset = offset;
127797 -       }
127799 -       seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <=  16 bits */
127800 -       if (ZSTD_32bits() && (mlBits + llBits > 24))
127801 -               BIT_reloadDStream(&seqState->DStream);
127803 -       seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <=  16 bits */
127804 -       if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
127805 -               BIT_reloadDStream(&seqState->DStream);
127807 -       {
127808 -               size_t const pos = seqState->pos + seq.litLength;
127809 -               seq.match = seqState->base + pos - seq.offset; /* single memory segment */
127810 -               if (seq.offset > pos)
127811 -                       seq.match += seqState->gotoDict; /* separate memory segment */
127812 -               seqState->pos = pos + seq.matchLength;
127813 -       }
127815 -       /* ANS state update */
127816 -       FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <=  9 bits */
127817 -       FSE_updateState(&seqState->stateML, &seqState->DStream); /* <=  9 bits */
127818 -       if (ZSTD_32bits())
127819 -               BIT_reloadDStream(&seqState->DStream);             /* <= 18 bits */
127820 -       FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <=  8 bits */
127822 -       return seq;
127825 -static seq_t ZSTD_decodeSequenceLong(seqState_t *seqState, unsigned const windowSize)
127827 -       if (ZSTD_highbit32(windowSize) > STREAM_ACCUMULATOR_MIN) {
127828 -               return ZSTD_decodeSequenceLong_generic(seqState, 1);
127829 -       } else {
127830 -               return ZSTD_decodeSequenceLong_generic(seqState, 0);
127831 -       }
127834 -FORCE_INLINE
127835 -size_t ZSTD_execSequenceLong(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
127836 -                            const BYTE *const vBase, const BYTE *const dictEnd)
127838 -       BYTE *const oLitEnd = op + sequence.litLength;
127839 -       size_t const sequenceLength = sequence.litLength + sequence.matchLength;
127840 -       BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
127841 -       BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
127842 -       const BYTE *const iLitEnd = *litPtr + sequence.litLength;
127843 -       const BYTE *match = sequence.match;
127845 -       /* check */
127846 -       if (oMatchEnd > oend)
127847 -               return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
127848 -       if (iLitEnd > litLimit)
127849 -               return ERROR(corruption_detected); /* over-read beyond lit buffer */
127850 -       if (oLitEnd > oend_w)
127851 -               return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
127853 -       /* copy Literals */
127854 -       ZSTD_copy8(op, *litPtr);
127855 -       if (sequence.litLength > 8)
127856 -               ZSTD_wildcopy(op + 8, (*litPtr) + 8,
127857 -                             sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
127858 -       op = oLitEnd;
127859 -       *litPtr = iLitEnd; /* update for next sequence */
127861 -       /* copy Match */
127862 -       if (sequence.offset > (size_t)(oLitEnd - base)) {
127863 -               /* offset beyond prefix */
127864 -               if (sequence.offset > (size_t)(oLitEnd - vBase))
127865 -                       return ERROR(corruption_detected);
127866 -               if (match + sequence.matchLength <= dictEnd) {
127867 -                       memmove(oLitEnd, match, sequence.matchLength);
127868 -                       return sequenceLength;
127869 -               }
127870 -               /* span extDict & currPrefixSegment */
127871 -               {
127872 -                       size_t const length1 = dictEnd - match;
127873 -                       memmove(oLitEnd, match, length1);
127874 -                       op = oLitEnd + length1;
127875 -                       sequence.matchLength -= length1;
127876 -                       match = base;
127877 -                       if (op > oend_w || sequence.matchLength < MINMATCH) {
127878 -                               U32 i;
127879 -                               for (i = 0; i < sequence.matchLength; ++i)
127880 -                                       op[i] = match[i];
127881 -                               return sequenceLength;
127882 -                       }
127883 -               }
127884 -       }
127885 -       /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
127887 -       /* match within prefix */
127888 -       if (sequence.offset < 8) {
127889 -               /* close range match, overlap */
127890 -               static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
127891 -               static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */
127892 -               int const sub2 = dec64table[sequence.offset];
127893 -               op[0] = match[0];
127894 -               op[1] = match[1];
127895 -               op[2] = match[2];
127896 -               op[3] = match[3];
127897 -               match += dec32table[sequence.offset];
127898 -               ZSTD_copy4(op + 4, match);
127899 -               match -= sub2;
127900 -       } else {
127901 -               ZSTD_copy8(op, match);
127902 -       }
127903 -       op += 8;
127904 -       match += 8;
127906 -       if (oMatchEnd > oend - (16 - MINMATCH)) {
127907 -               if (op < oend_w) {
127908 -                       ZSTD_wildcopy(op, match, oend_w - op);
127909 -                       match += oend_w - op;
127910 -                       op = oend_w;
127911 -               }
127912 -               while (op < oMatchEnd)
127913 -                       *op++ = *match++;
127914 -       } else {
127915 -               ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */
127916 -       }
127917 -       return sequenceLength;
127920 -static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize)
127922 -       const BYTE *ip = (const BYTE *)seqStart;
127923 -       const BYTE *const iend = ip + seqSize;
127924 -       BYTE *const ostart = (BYTE * const)dst;
127925 -       BYTE *const oend = ostart + maxDstSize;
127926 -       BYTE *op = ostart;
127927 -       const BYTE *litPtr = dctx->litPtr;
127928 -       const BYTE *const litEnd = litPtr + dctx->litSize;
127929 -       const BYTE *const base = (const BYTE *)(dctx->base);
127930 -       const BYTE *const vBase = (const BYTE *)(dctx->vBase);
127931 -       const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd);
127932 -       unsigned const windowSize = dctx->fParams.windowSize;
127933 -       int nbSeq;
127935 -       /* Build Decoding Tables */
127936 -       {
127937 -               size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
127938 -               if (ZSTD_isError(seqHSize))
127939 -                       return seqHSize;
127940 -               ip += seqHSize;
127941 -       }
127943 -       /* Regen sequences */
127944 -       if (nbSeq) {
127945 -#define STORED_SEQS 4
127946 -#define STOSEQ_MASK (STORED_SEQS - 1)
127947 -#define ADVANCED_SEQS 4
127948 -               seq_t *sequences = (seq_t *)dctx->entropy.workspace;
127949 -               int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
127950 -               seqState_t seqState;
127951 -               int seqNb;
127952 -               ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.workspace) >= sizeof(seq_t) * STORED_SEQS);
127953 -               dctx->fseEntropy = 1;
127954 -               {
127955 -                       U32 i;
127956 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
127957 -                               seqState.prevOffset[i] = dctx->entropy.rep[i];
127958 -               }
127959 -               seqState.base = base;
127960 -               seqState.pos = (size_t)(op - base);
127961 -               seqState.gotoDict = (uPtrDiff)dictEnd - (uPtrDiff)base; /* cast to avoid undefined behaviour */
127962 -               CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected);
127963 -               FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
127964 -               FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
127965 -               FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
127967 -               /* prepare in advance */
127968 -               for (seqNb = 0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && seqNb < seqAdvance; seqNb++) {
127969 -                       sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, windowSize);
127970 -               }
127971 -               if (seqNb < seqAdvance)
127972 -                       return ERROR(corruption_detected);
127974 -               /* decode and decompress */
127975 -               for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && seqNb < nbSeq; seqNb++) {
127976 -                       seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, windowSize);
127977 -                       size_t const oneSeqSize =
127978 -                           ZSTD_execSequenceLong(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
127979 -                       if (ZSTD_isError(oneSeqSize))
127980 -                               return oneSeqSize;
127981 -                       ZSTD_PREFETCH(sequence.match);
127982 -                       sequences[seqNb & STOSEQ_MASK] = sequence;
127983 -                       op += oneSeqSize;
127984 -               }
127985 -               if (seqNb < nbSeq)
127986 -                       return ERROR(corruption_detected);
127988 -               /* finish queue */
127989 -               seqNb -= seqAdvance;
127990 -               for (; seqNb < nbSeq; seqNb++) {
127991 -                       size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
127992 -                       if (ZSTD_isError(oneSeqSize))
127993 -                               return oneSeqSize;
127994 -                       op += oneSeqSize;
127995 -               }
127997 -               /* save reps for next block */
127998 -               {
127999 -                       U32 i;
128000 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
128001 -                               dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]);
128002 -               }
128003 -       }
128005 -       /* last literal segment */
128006 -       {
128007 -               size_t const lastLLSize = litEnd - litPtr;
128008 -               if (lastLLSize > (size_t)(oend - op))
128009 -                       return ERROR(dstSize_tooSmall);
128010 -               memcpy(op, litPtr, lastLLSize);
128011 -               op += lastLLSize;
128012 -       }
128014 -       return op - ostart;
128017 -static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
128018 -{ /* blockType == blockCompressed */
128019 -       const BYTE *ip = (const BYTE *)src;
128021 -       if (srcSize >= ZSTD_BLOCKSIZE_ABSOLUTEMAX)
128022 -               return ERROR(srcSize_wrong);
128024 -       /* Decode literals section */
128025 -       {
128026 -               size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
128027 -               if (ZSTD_isError(litCSize))
128028 -                       return litCSize;
128029 -               ip += litCSize;
128030 -               srcSize -= litCSize;
128031 -       }
128032 -       if (sizeof(size_t) > 4) /* do not enable prefetching on 32-bits x86, as it's performance detrimental */
128033 -                               /* likely because of register pressure */
128034 -                               /* if that's the correct cause, then 32-bits ARM should be affected differently */
128035 -                               /* it would be good to test this on ARM real hardware, to see if prefetch version improves speed */
128036 -               if (dctx->fParams.windowSize > (1 << 23))
128037 -                       return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize);
128038 -       return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
128041 -static void ZSTD_checkContinuity(ZSTD_DCtx *dctx, const void *dst)
128043 -       if (dst != dctx->previousDstEnd) { /* not contiguous */
128044 -               dctx->dictEnd = dctx->previousDstEnd;
128045 -               dctx->vBase = (const char *)dst - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base));
128046 -               dctx->base = dst;
128047 -               dctx->previousDstEnd = dst;
128048 -       }
128051 -size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
128053 -       size_t dSize;
128054 -       ZSTD_checkContinuity(dctx, dst);
128055 -       dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
128056 -       dctx->previousDstEnd = (char *)dst + dSize;
128057 -       return dSize;
128060 -/** ZSTD_insertBlock() :
128061 -       insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
128062 -size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart, size_t blockSize)
128064 -       ZSTD_checkContinuity(dctx, blockStart);
128065 -       dctx->previousDstEnd = (const char *)blockStart + blockSize;
128066 -       return blockSize;
128069 -size_t ZSTD_generateNxBytes(void *dst, size_t dstCapacity, BYTE byte, size_t length)
128071 -       if (length > dstCapacity)
128072 -               return ERROR(dstSize_tooSmall);
128073 -       memset(dst, byte, length);
128074 -       return length;
128077 -/** ZSTD_findFrameCompressedSize() :
128078 - *  compatible with legacy mode
128079 - *  `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
128080 - *  `srcSize` must be at least as large as the frame contained
128081 - *  @return : the compressed size of the frame starting at `src` */
128082 -size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
128084 -       if (srcSize >= ZSTD_skippableHeaderSize && (ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
128085 -               return ZSTD_skippableHeaderSize + ZSTD_readLE32((const BYTE *)src + 4);
128086 -       } else {
128087 -               const BYTE *ip = (const BYTE *)src;
128088 -               const BYTE *const ipstart = ip;
128089 -               size_t remainingSize = srcSize;
128090 -               ZSTD_frameParams fParams;
128092 -               size_t const headerSize = ZSTD_frameHeaderSize(ip, remainingSize);
128093 -               if (ZSTD_isError(headerSize))
128094 -                       return headerSize;
128096 -               /* Frame Header */
128097 -               {
128098 -                       size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize);
128099 -                       if (ZSTD_isError(ret))
128100 -                               return ret;
128101 -                       if (ret > 0)
128102 -                               return ERROR(srcSize_wrong);
128103 -               }
128105 -               ip += headerSize;
128106 -               remainingSize -= headerSize;
128108 -               /* Loop on each block */
128109 -               while (1) {
128110 -                       blockProperties_t blockProperties;
128111 -                       size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
128112 -                       if (ZSTD_isError(cBlockSize))
128113 -                               return cBlockSize;
128115 -                       if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
128116 -                               return ERROR(srcSize_wrong);
128118 -                       ip += ZSTD_blockHeaderSize + cBlockSize;
128119 -                       remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
128121 -                       if (blockProperties.lastBlock)
128122 -                               break;
128123 -               }
128125 -               if (fParams.checksumFlag) { /* Frame content checksum */
128126 -                       if (remainingSize < 4)
128127 -                               return ERROR(srcSize_wrong);
128128 -                       ip += 4;
128129 -                       remainingSize -= 4;
128130 -               }
128132 -               return ip - ipstart;
128133 -       }
128136 -/*! ZSTD_decompressFrame() :
128137 -*   @dctx must be properly initialized */
128138 -static size_t ZSTD_decompressFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void **srcPtr, size_t *srcSizePtr)
128140 -       const BYTE *ip = (const BYTE *)(*srcPtr);
128141 -       BYTE *const ostart = (BYTE * const)dst;
128142 -       BYTE *const oend = ostart + dstCapacity;
128143 -       BYTE *op = ostart;
128144 -       size_t remainingSize = *srcSizePtr;
128146 -       /* check */
128147 -       if (remainingSize < ZSTD_frameHeaderSize_min + ZSTD_blockHeaderSize)
128148 -               return ERROR(srcSize_wrong);
128150 -       /* Frame Header */
128151 -       {
128152 -               size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix);
128153 -               if (ZSTD_isError(frameHeaderSize))
128154 -                       return frameHeaderSize;
128155 -               if (remainingSize < frameHeaderSize + ZSTD_blockHeaderSize)
128156 -                       return ERROR(srcSize_wrong);
128157 -               CHECK_F(ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize));
128158 -               ip += frameHeaderSize;
128159 -               remainingSize -= frameHeaderSize;
128160 -       }
128162 -       /* Loop on each block */
128163 -       while (1) {
128164 -               size_t decodedSize;
128165 -               blockProperties_t blockProperties;
128166 -               size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
128167 -               if (ZSTD_isError(cBlockSize))
128168 -                       return cBlockSize;
128170 -               ip += ZSTD_blockHeaderSize;
128171 -               remainingSize -= ZSTD_blockHeaderSize;
128172 -               if (cBlockSize > remainingSize)
128173 -                       return ERROR(srcSize_wrong);
128175 -               switch (blockProperties.blockType) {
128176 -               case bt_compressed: decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend - op, ip, cBlockSize); break;
128177 -               case bt_raw: decodedSize = ZSTD_copyRawBlock(op, oend - op, ip, cBlockSize); break;
128178 -               case bt_rle: decodedSize = ZSTD_generateNxBytes(op, oend - op, *ip, blockProperties.origSize); break;
128179 -               case bt_reserved:
128180 -               default: return ERROR(corruption_detected);
128181 -               }
128183 -               if (ZSTD_isError(decodedSize))
128184 -                       return decodedSize;
128185 -               if (dctx->fParams.checksumFlag)
128186 -                       xxh64_update(&dctx->xxhState, op, decodedSize);
128187 -               op += decodedSize;
128188 -               ip += cBlockSize;
128189 -               remainingSize -= cBlockSize;
128190 -               if (blockProperties.lastBlock)
128191 -                       break;
128192 -       }
128194 -       if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
128195 -               U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState);
128196 -               U32 checkRead;
128197 -               if (remainingSize < 4)
128198 -                       return ERROR(checksum_wrong);
128199 -               checkRead = ZSTD_readLE32(ip);
128200 -               if (checkRead != checkCalc)
128201 -                       return ERROR(checksum_wrong);
128202 -               ip += 4;
128203 -               remainingSize -= 4;
128204 -       }
128206 -       /* Allow caller to get size read */
128207 -       *srcPtr = ip;
128208 -       *srcSizePtr = remainingSize;
128209 -       return op - ostart;
128212 -static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict);
128213 -static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict);
128215 -static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
128216 -                                       const ZSTD_DDict *ddict)
128218 -       void *const dststart = dst;
128220 -       if (ddict) {
128221 -               if (dict) {
128222 -                       /* programmer error, these two cases should be mutually exclusive */
128223 -                       return ERROR(GENERIC);
128224 -               }
128226 -               dict = ZSTD_DDictDictContent(ddict);
128227 -               dictSize = ZSTD_DDictDictSize(ddict);
128228 -       }
128230 -       while (srcSize >= ZSTD_frameHeaderSize_prefix) {
128231 -               U32 magicNumber;
128233 -               magicNumber = ZSTD_readLE32(src);
128234 -               if (magicNumber != ZSTD_MAGICNUMBER) {
128235 -                       if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
128236 -                               size_t skippableSize;
128237 -                               if (srcSize < ZSTD_skippableHeaderSize)
128238 -                                       return ERROR(srcSize_wrong);
128239 -                               skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize;
128240 -                               if (srcSize < skippableSize) {
128241 -                                       return ERROR(srcSize_wrong);
128242 -                               }
128244 -                               src = (const BYTE *)src + skippableSize;
128245 -                               srcSize -= skippableSize;
128246 -                               continue;
128247 -                       } else {
128248 -                               return ERROR(prefix_unknown);
128249 -                       }
128250 -               }
128252 -               if (ddict) {
128253 -                       /* we were called from ZSTD_decompress_usingDDict */
128254 -                       ZSTD_refDDict(dctx, ddict);
128255 -               } else {
128256 -                       /* this will initialize correctly with no dict if dict == NULL, so
128257 -                        * use this in all cases but ddict */
128258 -                       CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
128259 -               }
128260 -               ZSTD_checkContinuity(dctx, dst);
128262 -               {
128263 -                       const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize);
128264 -                       if (ZSTD_isError(res))
128265 -                               return res;
128266 -                       /* don't need to bounds check this, ZSTD_decompressFrame will have
128267 -                        * already */
128268 -                       dst = (BYTE *)dst + res;
128269 -                       dstCapacity -= res;
128270 -               }
128271 -       }
128273 -       if (srcSize)
128274 -               return ERROR(srcSize_wrong); /* input not entirely consumed */
128276 -       return (BYTE *)dst - (BYTE *)dststart;
128279 -size_t ZSTD_decompress_usingDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize)
128281 -       return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
128284 -size_t ZSTD_decompressDCtx(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
128286 -       return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
128289 -/*-**************************************
128290 -*   Advanced Streaming Decompression API
128291 -*   Bufferless and synchronous
128292 -****************************************/
128293 -size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx) { return dctx->expected; }
128295 -ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx)
128297 -       switch (dctx->stage) {
128298 -       default: /* should not happen */
128299 -       case ZSTDds_getFrameHeaderSize:
128300 -       case ZSTDds_decodeFrameHeader: return ZSTDnit_frameHeader;
128301 -       case ZSTDds_decodeBlockHeader: return ZSTDnit_blockHeader;
128302 -       case ZSTDds_decompressBlock: return ZSTDnit_block;
128303 -       case ZSTDds_decompressLastBlock: return ZSTDnit_lastBlock;
128304 -       case ZSTDds_checkChecksum: return ZSTDnit_checksum;
128305 -       case ZSTDds_decodeSkippableHeader:
128306 -       case ZSTDds_skipFrame: return ZSTDnit_skippableFrame;
128307 -       }
128310 -int ZSTD_isSkipFrame(ZSTD_DCtx *dctx) { return dctx->stage == ZSTDds_skipFrame; } /* for zbuff */
128312 -/** ZSTD_decompressContinue() :
128313 -*   @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
128314 -*             or an error code, which can be tested using ZSTD_isError() */
128315 -size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
128317 -       /* Sanity check */
128318 -       if (srcSize != dctx->expected)
128319 -               return ERROR(srcSize_wrong);
128320 -       if (dstCapacity)
128321 -               ZSTD_checkContinuity(dctx, dst);
128323 -       switch (dctx->stage) {
128324 -       case ZSTDds_getFrameHeaderSize:
128325 -               if (srcSize != ZSTD_frameHeaderSize_prefix)
128326 -                       return ERROR(srcSize_wrong);                                    /* impossible */
128327 -               if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
128328 -                       memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix);
128329 -                       dctx->expected = ZSTD_skippableHeaderSize - ZSTD_frameHeaderSize_prefix; /* magic number + skippable frame length */
128330 -                       dctx->stage = ZSTDds_decodeSkippableHeader;
128331 -                       return 0;
128332 -               }
128333 -               dctx->headerSize = ZSTD_frameHeaderSize(src, ZSTD_frameHeaderSize_prefix);
128334 -               if (ZSTD_isError(dctx->headerSize))
128335 -                       return dctx->headerSize;
128336 -               memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix);
128337 -               if (dctx->headerSize > ZSTD_frameHeaderSize_prefix) {
128338 -                       dctx->expected = dctx->headerSize - ZSTD_frameHeaderSize_prefix;
128339 -                       dctx->stage = ZSTDds_decodeFrameHeader;
128340 -                       return 0;
128341 -               }
128342 -               dctx->expected = 0; /* not necessary to copy more */
128343 -               fallthrough;
128345 -       case ZSTDds_decodeFrameHeader:
128346 -               memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
128347 -               CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
128348 -               dctx->expected = ZSTD_blockHeaderSize;
128349 -               dctx->stage = ZSTDds_decodeBlockHeader;
128350 -               return 0;
128352 -       case ZSTDds_decodeBlockHeader: {
128353 -               blockProperties_t bp;
128354 -               size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
128355 -               if (ZSTD_isError(cBlockSize))
128356 -                       return cBlockSize;
128357 -               dctx->expected = cBlockSize;
128358 -               dctx->bType = bp.blockType;
128359 -               dctx->rleSize = bp.origSize;
128360 -               if (cBlockSize) {
128361 -                       dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
128362 -                       return 0;
128363 -               }
128364 -               /* empty block */
128365 -               if (bp.lastBlock) {
128366 -                       if (dctx->fParams.checksumFlag) {
128367 -                               dctx->expected = 4;
128368 -                               dctx->stage = ZSTDds_checkChecksum;
128369 -                       } else {
128370 -                               dctx->expected = 0; /* end of frame */
128371 -                               dctx->stage = ZSTDds_getFrameHeaderSize;
128372 -                       }
128373 -               } else {
128374 -                       dctx->expected = 3; /* go directly to next header */
128375 -                       dctx->stage = ZSTDds_decodeBlockHeader;
128376 -               }
128377 -               return 0;
128378 -       }
128379 -       case ZSTDds_decompressLastBlock:
128380 -       case ZSTDds_decompressBlock: {
128381 -               size_t rSize;
128382 -               switch (dctx->bType) {
128383 -               case bt_compressed: rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); break;
128384 -               case bt_raw: rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); break;
128385 -               case bt_rle: rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize); break;
128386 -               case bt_reserved: /* should never happen */
128387 -               default: return ERROR(corruption_detected);
128388 -               }
128389 -               if (ZSTD_isError(rSize))
128390 -                       return rSize;
128391 -               if (dctx->fParams.checksumFlag)
128392 -                       xxh64_update(&dctx->xxhState, dst, rSize);
128394 -               if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
128395 -                       if (dctx->fParams.checksumFlag) {       /* another round for frame checksum */
128396 -                               dctx->expected = 4;
128397 -                               dctx->stage = ZSTDds_checkChecksum;
128398 -                       } else {
128399 -                               dctx->expected = 0; /* ends here */
128400 -                               dctx->stage = ZSTDds_getFrameHeaderSize;
128401 -                       }
128402 -               } else {
128403 -                       dctx->stage = ZSTDds_decodeBlockHeader;
128404 -                       dctx->expected = ZSTD_blockHeaderSize;
128405 -                       dctx->previousDstEnd = (char *)dst + rSize;
128406 -               }
128407 -               return rSize;
128408 -       }
128409 -       case ZSTDds_checkChecksum: {
128410 -               U32 const h32 = (U32)xxh64_digest(&dctx->xxhState);
128411 -               U32 const check32 = ZSTD_readLE32(src); /* srcSize == 4, guaranteed by dctx->expected */
128412 -               if (check32 != h32)
128413 -                       return ERROR(checksum_wrong);
128414 -               dctx->expected = 0;
128415 -               dctx->stage = ZSTDds_getFrameHeaderSize;
128416 -               return 0;
128417 -       }
128418 -       case ZSTDds_decodeSkippableHeader: {
128419 -               memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
128420 -               dctx->expected = ZSTD_readLE32(dctx->headerBuffer + 4);
128421 -               dctx->stage = ZSTDds_skipFrame;
128422 -               return 0;
128423 -       }
128424 -       case ZSTDds_skipFrame: {
128425 -               dctx->expected = 0;
128426 -               dctx->stage = ZSTDds_getFrameHeaderSize;
128427 -               return 0;
128428 -       }
128429 -       default:
128430 -               return ERROR(GENERIC); /* impossible */
128431 -       }
128434 -static size_t ZSTD_refDictContent(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
128436 -       dctx->dictEnd = dctx->previousDstEnd;
128437 -       dctx->vBase = (const char *)dict - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base));
128438 -       dctx->base = dict;
128439 -       dctx->previousDstEnd = (const char *)dict + dictSize;
128440 -       return 0;
128443 -/* ZSTD_loadEntropy() :
128444 - * dict : must point at beginning of a valid zstd dictionary
128445 - * @return : size of entropy tables read */
128446 -static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t *entropy, const void *const dict, size_t const dictSize)
128448 -       const BYTE *dictPtr = (const BYTE *)dict;
128449 -       const BYTE *const dictEnd = dictPtr + dictSize;
128451 -       if (dictSize <= 8)
128452 -               return ERROR(dictionary_corrupted);
128453 -       dictPtr += 8; /* skip header = magic + dictID */
128455 -       {
128456 -               size_t const hSize = HUF_readDTableX4_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, entropy->workspace, sizeof(entropy->workspace));
128457 -               if (HUF_isError(hSize))
128458 -                       return ERROR(dictionary_corrupted);
128459 -               dictPtr += hSize;
128460 -       }
128462 -       {
128463 -               short offcodeNCount[MaxOff + 1];
128464 -               U32 offcodeMaxValue = MaxOff, offcodeLog;
128465 -               size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr);
128466 -               if (FSE_isError(offcodeHeaderSize))
128467 -                       return ERROR(dictionary_corrupted);
128468 -               if (offcodeLog > OffFSELog)
128469 -                       return ERROR(dictionary_corrupted);
128470 -               CHECK_E(FSE_buildDTable_wksp(entropy->OFTable, offcodeNCount, offcodeMaxValue, offcodeLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
128471 -               dictPtr += offcodeHeaderSize;
128472 -       }
128474 -       {
128475 -               short matchlengthNCount[MaxML + 1];
128476 -               unsigned matchlengthMaxValue = MaxML, matchlengthLog;
128477 -               size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr);
128478 -               if (FSE_isError(matchlengthHeaderSize))
128479 -                       return ERROR(dictionary_corrupted);
128480 -               if (matchlengthLog > MLFSELog)
128481 -                       return ERROR(dictionary_corrupted);
128482 -               CHECK_E(FSE_buildDTable_wksp(entropy->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
128483 -               dictPtr += matchlengthHeaderSize;
128484 -       }
128486 -       {
128487 -               short litlengthNCount[MaxLL + 1];
128488 -               unsigned litlengthMaxValue = MaxLL, litlengthLog;
128489 -               size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr);
128490 -               if (FSE_isError(litlengthHeaderSize))
128491 -                       return ERROR(dictionary_corrupted);
128492 -               if (litlengthLog > LLFSELog)
128493 -                       return ERROR(dictionary_corrupted);
128494 -               CHECK_E(FSE_buildDTable_wksp(entropy->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
128495 -               dictPtr += litlengthHeaderSize;
128496 -       }
128498 -       if (dictPtr + 12 > dictEnd)
128499 -               return ERROR(dictionary_corrupted);
128500 -       {
128501 -               int i;
128502 -               size_t const dictContentSize = (size_t)(dictEnd - (dictPtr + 12));
128503 -               for (i = 0; i < 3; i++) {
128504 -                       U32 const rep = ZSTD_readLE32(dictPtr);
128505 -                       dictPtr += 4;
128506 -                       if (rep == 0 || rep >= dictContentSize)
128507 -                               return ERROR(dictionary_corrupted);
128508 -                       entropy->rep[i] = rep;
128509 -               }
128510 -       }
128512 -       return dictPtr - (const BYTE *)dict;
128515 -static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
128517 -       if (dictSize < 8)
128518 -               return ZSTD_refDictContent(dctx, dict, dictSize);
128519 -       {
128520 -               U32 const magic = ZSTD_readLE32(dict);
128521 -               if (magic != ZSTD_DICT_MAGIC) {
128522 -                       return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
128523 -               }
128524 -       }
128525 -       dctx->dictID = ZSTD_readLE32((const char *)dict + 4);
128527 -       /* load entropy tables */
128528 -       {
128529 -               size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize);
128530 -               if (ZSTD_isError(eSize))
128531 -                       return ERROR(dictionary_corrupted);
128532 -               dict = (const char *)dict + eSize;
128533 -               dictSize -= eSize;
128534 -       }
128535 -       dctx->litEntropy = dctx->fseEntropy = 1;
128537 -       /* reference dictionary content */
128538 -       return ZSTD_refDictContent(dctx, dict, dictSize);
128541 -size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
128543 -       CHECK_F(ZSTD_decompressBegin(dctx));
128544 -       if (dict && dictSize)
128545 -               CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted);
128546 -       return 0;
128549 -/* ======   ZSTD_DDict   ====== */
128551 -struct ZSTD_DDict_s {
128552 -       void *dictBuffer;
128553 -       const void *dictContent;
128554 -       size_t dictSize;
128555 -       ZSTD_entropyTables_t entropy;
128556 -       U32 dictID;
128557 -       U32 entropyPresent;
128558 -       ZSTD_customMem cMem;
128559 -}; /* typedef'd to ZSTD_DDict within "zstd.h" */
128561 -size_t ZSTD_DDictWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DDict)); }
128563 -static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict) { return ddict->dictContent; }
128565 -static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict) { return ddict->dictSize; }
128567 -static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict)
128569 -       ZSTD_decompressBegin(dstDCtx); /* init */
128570 -       if (ddict) {                   /* support refDDict on NULL */
128571 -               dstDCtx->dictID = ddict->dictID;
128572 -               dstDCtx->base = ddict->dictContent;
128573 -               dstDCtx->vBase = ddict->dictContent;
128574 -               dstDCtx->dictEnd = (const BYTE *)ddict->dictContent + ddict->dictSize;
128575 -               dstDCtx->previousDstEnd = dstDCtx->dictEnd;
128576 -               if (ddict->entropyPresent) {
128577 -                       dstDCtx->litEntropy = 1;
128578 -                       dstDCtx->fseEntropy = 1;
128579 -                       dstDCtx->LLTptr = ddict->entropy.LLTable;
128580 -                       dstDCtx->MLTptr = ddict->entropy.MLTable;
128581 -                       dstDCtx->OFTptr = ddict->entropy.OFTable;
128582 -                       dstDCtx->HUFptr = ddict->entropy.hufTable;
128583 -                       dstDCtx->entropy.rep[0] = ddict->entropy.rep[0];
128584 -                       dstDCtx->entropy.rep[1] = ddict->entropy.rep[1];
128585 -                       dstDCtx->entropy.rep[2] = ddict->entropy.rep[2];
128586 -               } else {
128587 -                       dstDCtx->litEntropy = 0;
128588 -                       dstDCtx->fseEntropy = 0;
128589 -               }
128590 -       }
128593 -static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict *ddict)
128595 -       ddict->dictID = 0;
128596 -       ddict->entropyPresent = 0;
128597 -       if (ddict->dictSize < 8)
128598 -               return 0;
128599 -       {
128600 -               U32 const magic = ZSTD_readLE32(ddict->dictContent);
128601 -               if (magic != ZSTD_DICT_MAGIC)
128602 -                       return 0; /* pure content mode */
128603 -       }
128604 -       ddict->dictID = ZSTD_readLE32((const char *)ddict->dictContent + 4);
128606 -       /* load entropy tables */
128607 -       CHECK_E(ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted);
128608 -       ddict->entropyPresent = 1;
128609 -       return 0;
128612 -static ZSTD_DDict *ZSTD_createDDict_advanced(const void *dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem)
128614 -       if (!customMem.customAlloc || !customMem.customFree)
128615 -               return NULL;
128617 -       {
128618 -               ZSTD_DDict *const ddict = (ZSTD_DDict *)ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
128619 -               if (!ddict)
128620 -                       return NULL;
128621 -               ddict->cMem = customMem;
128623 -               if ((byReference) || (!dict) || (!dictSize)) {
128624 -                       ddict->dictBuffer = NULL;
128625 -                       ddict->dictContent = dict;
128626 -               } else {
128627 -                       void *const internalBuffer = ZSTD_malloc(dictSize, customMem);
128628 -                       if (!internalBuffer) {
128629 -                               ZSTD_freeDDict(ddict);
128630 -                               return NULL;
128631 -                       }
128632 -                       memcpy(internalBuffer, dict, dictSize);
128633 -                       ddict->dictBuffer = internalBuffer;
128634 -                       ddict->dictContent = internalBuffer;
128635 -               }
128636 -               ddict->dictSize = dictSize;
128637 -               ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
128638 -               /* parse dictionary content */
128639 -               {
128640 -                       size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict);
128641 -                       if (ZSTD_isError(errorCode)) {
128642 -                               ZSTD_freeDDict(ddict);
128643 -                               return NULL;
128644 -                       }
128645 -               }
128647 -               return ddict;
128648 -       }
128651 -/*! ZSTD_initDDict() :
128652 -*   Create a digested dictionary, to start decompression without startup delay.
128653 -*   `dict` content is copied inside DDict.
128654 -*   Consequently, `dict` can be released after `ZSTD_DDict` creation */
128655 -ZSTD_DDict *ZSTD_initDDict(const void *dict, size_t dictSize, void *workspace, size_t workspaceSize)
128657 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
128658 -       return ZSTD_createDDict_advanced(dict, dictSize, 1, stackMem);
128661 -size_t ZSTD_freeDDict(ZSTD_DDict *ddict)
128663 -       if (ddict == NULL)
128664 -               return 0; /* support free on NULL */
128665 -       {
128666 -               ZSTD_customMem const cMem = ddict->cMem;
128667 -               ZSTD_free(ddict->dictBuffer, cMem);
128668 -               ZSTD_free(ddict, cMem);
128669 -               return 0;
128670 -       }
128673 -/*! ZSTD_getDictID_fromDict() :
128674 - *  Provides the dictID stored within dictionary.
128675 - *  if @return == 0, the dictionary is not conformant with Zstandard specification.
128676 - *  It can still be loaded, but as a content-only dictionary. */
128677 -unsigned ZSTD_getDictID_fromDict(const void *dict, size_t dictSize)
128679 -       if (dictSize < 8)
128680 -               return 0;
128681 -       if (ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC)
128682 -               return 0;
128683 -       return ZSTD_readLE32((const char *)dict + 4);
128686 -/*! ZSTD_getDictID_fromDDict() :
128687 - *  Provides the dictID of the dictionary loaded into `ddict`.
128688 - *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
128689 - *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
128690 -unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict)
128692 -       if (ddict == NULL)
128693 -               return 0;
128694 -       return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
128697 -/*! ZSTD_getDictID_fromFrame() :
128698 - *  Provides the dictID required to decompressed the frame stored within `src`.
128699 - *  If @return == 0, the dictID could not be decoded.
128700 - *  This could for one of the following reasons :
128701 - *  - The frame does not require a dictionary to be decoded (most common case).
128702 - *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
128703 - *    Note : this use case also happens when using a non-conformant dictionary.
128704 - *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
128705 - *  - This is not a Zstandard frame.
128706 - *  When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */
128707 -unsigned ZSTD_getDictID_fromFrame(const void *src, size_t srcSize)
128709 -       ZSTD_frameParams zfp = {0, 0, 0, 0};
128710 -       size_t const hError = ZSTD_getFrameParams(&zfp, src, srcSize);
128711 -       if (ZSTD_isError(hError))
128712 -               return 0;
128713 -       return zfp.dictID;
128716 -/*! ZSTD_decompress_usingDDict() :
128717 -*   Decompression using a pre-digested Dictionary
128718 -*   Use dictionary without significant overhead. */
128719 -size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_DDict *ddict)
128721 -       /* pass content and size in case legacy frames are encountered */
128722 -       return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, NULL, 0, ddict);
128725 -/*=====================================
128726 -*   Streaming decompression
128727 -*====================================*/
128729 -typedef enum { zdss_init, zdss_loadHeader, zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
128731 -/* *** Resource management *** */
128732 -struct ZSTD_DStream_s {
128733 -       ZSTD_DCtx *dctx;
128734 -       ZSTD_DDict *ddictLocal;
128735 -       const ZSTD_DDict *ddict;
128736 -       ZSTD_frameParams fParams;
128737 -       ZSTD_dStreamStage stage;
128738 -       char *inBuff;
128739 -       size_t inBuffSize;
128740 -       size_t inPos;
128741 -       size_t maxWindowSize;
128742 -       char *outBuff;
128743 -       size_t outBuffSize;
128744 -       size_t outStart;
128745 -       size_t outEnd;
128746 -       size_t blockSize;
128747 -       BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; /* tmp buffer to store frame header */
128748 -       size_t lhSize;
128749 -       ZSTD_customMem customMem;
128750 -       void *legacyContext;
128751 -       U32 previousLegacyVersion;
128752 -       U32 legacyVersion;
128753 -       U32 hostageByte;
128754 -}; /* typedef'd to ZSTD_DStream within "zstd.h" */
128756 -size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize)
128758 -       size_t const blockSize = MIN(maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
128759 -       size_t const inBuffSize = blockSize;
128760 -       size_t const outBuffSize = maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
128761 -       return ZSTD_DCtxWorkspaceBound() + ZSTD_ALIGN(sizeof(ZSTD_DStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize);
128764 -static ZSTD_DStream *ZSTD_createDStream_advanced(ZSTD_customMem customMem)
128766 -       ZSTD_DStream *zds;
128768 -       if (!customMem.customAlloc || !customMem.customFree)
128769 -               return NULL;
128771 -       zds = (ZSTD_DStream *)ZSTD_malloc(sizeof(ZSTD_DStream), customMem);
128772 -       if (zds == NULL)
128773 -               return NULL;
128774 -       memset(zds, 0, sizeof(ZSTD_DStream));
128775 -       memcpy(&zds->customMem, &customMem, sizeof(ZSTD_customMem));
128776 -       zds->dctx = ZSTD_createDCtx_advanced(customMem);
128777 -       if (zds->dctx == NULL) {
128778 -               ZSTD_freeDStream(zds);
128779 -               return NULL;
128780 -       }
128781 -       zds->stage = zdss_init;
128782 -       zds->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
128783 -       return zds;
128786 -ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace, size_t workspaceSize)
128788 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
128789 -       ZSTD_DStream *zds = ZSTD_createDStream_advanced(stackMem);
128790 -       if (!zds) {
128791 -               return NULL;
128792 -       }
128794 -       zds->maxWindowSize = maxWindowSize;
128795 -       zds->stage = zdss_loadHeader;
128796 -       zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
128797 -       ZSTD_freeDDict(zds->ddictLocal);
128798 -       zds->ddictLocal = NULL;
128799 -       zds->ddict = zds->ddictLocal;
128800 -       zds->legacyVersion = 0;
128801 -       zds->hostageByte = 0;
128803 -       {
128804 -               size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
128805 -               size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
128807 -               zds->inBuff = (char *)ZSTD_malloc(blockSize, zds->customMem);
128808 -               zds->inBuffSize = blockSize;
128809 -               zds->outBuff = (char *)ZSTD_malloc(neededOutSize, zds->customMem);
128810 -               zds->outBuffSize = neededOutSize;
128811 -               if (zds->inBuff == NULL || zds->outBuff == NULL) {
128812 -                       ZSTD_freeDStream(zds);
128813 -                       return NULL;
128814 -               }
128815 -       }
128816 -       return zds;
128819 -ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize, const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize)
128821 -       ZSTD_DStream *zds = ZSTD_initDStream(maxWindowSize, workspace, workspaceSize);
128822 -       if (zds) {
128823 -               zds->ddict = ddict;
128824 -       }
128825 -       return zds;
128828 -size_t ZSTD_freeDStream(ZSTD_DStream *zds)
128830 -       if (zds == NULL)
128831 -               return 0; /* support free on null */
128832 -       {
128833 -               ZSTD_customMem const cMem = zds->customMem;
128834 -               ZSTD_freeDCtx(zds->dctx);
128835 -               zds->dctx = NULL;
128836 -               ZSTD_freeDDict(zds->ddictLocal);
128837 -               zds->ddictLocal = NULL;
128838 -               ZSTD_free(zds->inBuff, cMem);
128839 -               zds->inBuff = NULL;
128840 -               ZSTD_free(zds->outBuff, cMem);
128841 -               zds->outBuff = NULL;
128842 -               ZSTD_free(zds, cMem);
128843 -               return 0;
128844 -       }
128847 -/* *** Initialization *** */
128849 -size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX + ZSTD_blockHeaderSize; }
128850 -size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
128852 -size_t ZSTD_resetDStream(ZSTD_DStream *zds)
128854 -       zds->stage = zdss_loadHeader;
128855 -       zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
128856 -       zds->legacyVersion = 0;
128857 -       zds->hostageByte = 0;
128858 -       return ZSTD_frameHeaderSize_prefix;
128861 -/* *****   Decompression   ***** */
128863 -ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
128865 -       size_t const length = MIN(dstCapacity, srcSize);
128866 -       memcpy(dst, src, length);
128867 -       return length;
128870 -size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inBuffer *input)
128872 -       const char *const istart = (const char *)(input->src) + input->pos;
128873 -       const char *const iend = (const char *)(input->src) + input->size;
128874 -       const char *ip = istart;
128875 -       char *const ostart = (char *)(output->dst) + output->pos;
128876 -       char *const oend = (char *)(output->dst) + output->size;
128877 -       char *op = ostart;
128878 -       U32 someMoreWork = 1;
128880 -       while (someMoreWork) {
128881 -               switch (zds->stage) {
128882 -               case zdss_init:
128883 -                       ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */
128884 -                       fallthrough;
128886 -               case zdss_loadHeader: {
128887 -                       size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize);
128888 -                       if (ZSTD_isError(hSize))
128889 -                               return hSize;
128890 -                       if (hSize != 0) {                                  /* need more input */
128891 -                               size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */
128892 -                               if (toLoad > (size_t)(iend - ip)) {     /* not enough input to load full header */
128893 -                                       memcpy(zds->headerBuffer + zds->lhSize, ip, iend - ip);
128894 -                                       zds->lhSize += iend - ip;
128895 -                                       input->pos = input->size;
128896 -                                       return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) +
128897 -                                              ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
128898 -                               }
128899 -                               memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad);
128900 -                               zds->lhSize = hSize;
128901 -                               ip += toLoad;
128902 -                               break;
128903 -                       }
128905 -                       /* check for single-pass mode opportunity */
128906 -                       if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */
128907 -                           && (U64)(size_t)(oend - op) >= zds->fParams.frameContentSize) {
128908 -                               size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend - istart);
128909 -                               if (cSize <= (size_t)(iend - istart)) {
128910 -                                       size_t const decompressedSize = ZSTD_decompress_usingDDict(zds->dctx, op, oend - op, istart, cSize, zds->ddict);
128911 -                                       if (ZSTD_isError(decompressedSize))
128912 -                                               return decompressedSize;
128913 -                                       ip = istart + cSize;
128914 -                                       op += decompressedSize;
128915 -                                       zds->dctx->expected = 0;
128916 -                                       zds->stage = zdss_init;
128917 -                                       someMoreWork = 0;
128918 -                                       break;
128919 -                               }
128920 -                       }
128922 -                       /* Consume header */
128923 -                       ZSTD_refDDict(zds->dctx, zds->ddict);
128924 -                       {
128925 -                               size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */
128926 -                               CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer, h1Size));
128927 -                               {
128928 -                                       size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx);
128929 -                                       CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer + h1Size, h2Size));
128930 -                               }
128931 -                       }
128933 -                       zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
128934 -                       if (zds->fParams.windowSize > zds->maxWindowSize)
128935 -                               return ERROR(frameParameter_windowTooLarge);
128937 -                       /* Buffers are preallocated, but double check */
128938 -                       {
128939 -                               size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
128940 -                               size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
128941 -                               if (zds->inBuffSize < blockSize) {
128942 -                                       return ERROR(GENERIC);
128943 -                               }
128944 -                               if (zds->outBuffSize < neededOutSize) {
128945 -                                       return ERROR(GENERIC);
128946 -                               }
128947 -                               zds->blockSize = blockSize;
128948 -                       }
128949 -                       zds->stage = zdss_read;
128950 -               }
128951 -                       fallthrough;
128953 -               case zdss_read: {
128954 -                       size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
128955 -                       if (neededInSize == 0) { /* end of frame */
128956 -                               zds->stage = zdss_init;
128957 -                               someMoreWork = 0;
128958 -                               break;
128959 -                       }
128960 -                       if ((size_t)(iend - ip) >= neededInSize) { /* decode directly from src */
128961 -                               const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx);
128962 -                               size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart,
128963 -                                                                                  (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart), ip, neededInSize);
128964 -                               if (ZSTD_isError(decodedSize))
128965 -                                       return decodedSize;
128966 -                               ip += neededInSize;
128967 -                               if (!decodedSize && !isSkipFrame)
128968 -                                       break; /* this was just a header */
128969 -                               zds->outEnd = zds->outStart + decodedSize;
128970 -                               zds->stage = zdss_flush;
128971 -                               break;
128972 -                       }
128973 -                       if (ip == iend) {
128974 -                               someMoreWork = 0;
128975 -                               break;
128976 -                       } /* no more input */
128977 -                       zds->stage = zdss_load;
128978 -                       /* pass-through */
128979 -               }
128980 -                       fallthrough;
128982 -               case zdss_load: {
128983 -                       size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
128984 -                       size_t const toLoad = neededInSize - zds->inPos; /* should always be <= remaining space within inBuff */
128985 -                       size_t loadedSize;
128986 -                       if (toLoad > zds->inBuffSize - zds->inPos)
128987 -                               return ERROR(corruption_detected); /* should never happen */
128988 -                       loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend - ip);
128989 -                       ip += loadedSize;
128990 -                       zds->inPos += loadedSize;
128991 -                       if (loadedSize < toLoad) {
128992 -                               someMoreWork = 0;
128993 -                               break;
128994 -                       } /* not enough input, wait for more */
128996 -                       /* decode loaded input */
128997 -                       {
128998 -                               const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx);
128999 -                               size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart,
129000 -                                                                                  zds->inBuff, neededInSize);
129001 -                               if (ZSTD_isError(decodedSize))
129002 -                                       return decodedSize;
129003 -                               zds->inPos = 0; /* input is consumed */
129004 -                               if (!decodedSize && !isSkipFrame) {
129005 -                                       zds->stage = zdss_read;
129006 -                                       break;
129007 -                               } /* this was just a header */
129008 -                               zds->outEnd = zds->outStart + decodedSize;
129009 -                               zds->stage = zdss_flush;
129010 -                               /* pass-through */
129011 -                       }
129012 -               }
129013 -                       fallthrough;
129015 -               case zdss_flush: {
129016 -                       size_t const toFlushSize = zds->outEnd - zds->outStart;
129017 -                       size_t const flushedSize = ZSTD_limitCopy(op, oend - op, zds->outBuff + zds->outStart, toFlushSize);
129018 -                       op += flushedSize;
129019 -                       zds->outStart += flushedSize;
129020 -                       if (flushedSize == toFlushSize) { /* flush completed */
129021 -                               zds->stage = zdss_read;
129022 -                               if (zds->outStart + zds->blockSize > zds->outBuffSize)
129023 -                                       zds->outStart = zds->outEnd = 0;
129024 -                               break;
129025 -                       }
129026 -                       /* cannot complete flush */
129027 -                       someMoreWork = 0;
129028 -                       break;
129029 -               }
129030 -               default:
129031 -                       return ERROR(GENERIC); /* impossible */
129032 -               }
129033 -       }
129035 -       /* result */
129036 -       input->pos += (size_t)(ip - istart);
129037 -       output->pos += (size_t)(op - ostart);
129038 -       {
129039 -               size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx);
129040 -               if (!nextSrcSizeHint) {                     /* frame fully decoded */
129041 -                       if (zds->outEnd == zds->outStart) { /* output fully flushed */
129042 -                               if (zds->hostageByte) {
129043 -                                       if (input->pos >= input->size) {
129044 -                                               zds->stage = zdss_read;
129045 -                                               return 1;
129046 -                                       }            /* can't release hostage (not present) */
129047 -                                       input->pos++; /* release hostage */
129048 -                               }
129049 -                               return 0;
129050 -                       }
129051 -                       if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
129052 -                               input->pos--;    /* note : pos > 0, otherwise, impossible to finish reading last block */
129053 -                               zds->hostageByte = 1;
129054 -                       }
129055 -                       return 1;
129056 -               }
129057 -               nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds->dctx) == ZSTDnit_block); /* preload header of next block */
129058 -               if (zds->inPos > nextSrcSizeHint)
129059 -                       return ERROR(GENERIC); /* should never happen */
129060 -               nextSrcSizeHint -= zds->inPos; /* already loaded*/
129061 -               return nextSrcSizeHint;
129062 -       }
129065 -EXPORT_SYMBOL(ZSTD_DCtxWorkspaceBound);
129066 -EXPORT_SYMBOL(ZSTD_initDCtx);
129067 -EXPORT_SYMBOL(ZSTD_decompressDCtx);
129068 -EXPORT_SYMBOL(ZSTD_decompress_usingDict);
129070 -EXPORT_SYMBOL(ZSTD_DDictWorkspaceBound);
129071 -EXPORT_SYMBOL(ZSTD_initDDict);
129072 -EXPORT_SYMBOL(ZSTD_decompress_usingDDict);
129074 -EXPORT_SYMBOL(ZSTD_DStreamWorkspaceBound);
129075 -EXPORT_SYMBOL(ZSTD_initDStream);
129076 -EXPORT_SYMBOL(ZSTD_initDStream_usingDDict);
129077 -EXPORT_SYMBOL(ZSTD_resetDStream);
129078 -EXPORT_SYMBOL(ZSTD_decompressStream);
129079 -EXPORT_SYMBOL(ZSTD_DStreamInSize);
129080 -EXPORT_SYMBOL(ZSTD_DStreamOutSize);
129082 -EXPORT_SYMBOL(ZSTD_findFrameCompressedSize);
129083 -EXPORT_SYMBOL(ZSTD_getFrameContentSize);
129084 -EXPORT_SYMBOL(ZSTD_findDecompressedSize);
129086 -EXPORT_SYMBOL(ZSTD_isFrame);
129087 -EXPORT_SYMBOL(ZSTD_getDictID_fromDict);
129088 -EXPORT_SYMBOL(ZSTD_getDictID_fromDDict);
129089 -EXPORT_SYMBOL(ZSTD_getDictID_fromFrame);
129091 -EXPORT_SYMBOL(ZSTD_getFrameParams);
129092 -EXPORT_SYMBOL(ZSTD_decompressBegin);
129093 -EXPORT_SYMBOL(ZSTD_decompressBegin_usingDict);
129094 -EXPORT_SYMBOL(ZSTD_copyDCtx);
129095 -EXPORT_SYMBOL(ZSTD_nextSrcSizeToDecompress);
129096 -EXPORT_SYMBOL(ZSTD_decompressContinue);
129097 -EXPORT_SYMBOL(ZSTD_nextInputType);
129099 -EXPORT_SYMBOL(ZSTD_decompressBlock);
129100 -EXPORT_SYMBOL(ZSTD_insertBlock);
129102 -MODULE_LICENSE("Dual BSD/GPL");
129103 -MODULE_DESCRIPTION("Zstd Decompressor");
129104 diff --git a/lib/zstd/decompress/huf_decompress.c b/lib/zstd/decompress/huf_decompress.c
129105 new file mode 100644
129106 index 000000000000..dee939434873
129107 --- /dev/null
129108 +++ b/lib/zstd/decompress/huf_decompress.c
129109 @@ -0,0 +1,1205 @@
129110 +/* ******************************************************************
129111 + * huff0 huffman decoder,
129112 + * part of Finite State Entropy library
129113 + * Copyright (c) Yann Collet, Facebook, Inc.
129115 + *  You can contact the author at :
129116 + *  - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
129118 + * This source code is licensed under both the BSD-style license (found in the
129119 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
129120 + * in the COPYING file in the root directory of this source tree).
129121 + * You may select, at your option, one of the above-listed licenses.
129122 +****************************************************************** */
129124 +/* **************************************************************
129125 +*  Dependencies
129126 +****************************************************************/
129127 +#include "../common/zstd_deps.h"  /* ZSTD_memcpy, ZSTD_memset */
129128 +#include "../common/compiler.h"
129129 +#include "../common/bitstream.h"  /* BIT_* */
129130 +#include "../common/fse.h"        /* to compress headers */
129131 +#define HUF_STATIC_LINKING_ONLY
129132 +#include "../common/huf.h"
129133 +#include "../common/error_private.h"
129135 +/* **************************************************************
129136 +*  Macros
129137 +****************************************************************/
129139 +/* These two optional macros force the use one way or another of the two
129140 + * Huffman decompression implementations. You can't force in both directions
129141 + * at the same time.
129142 + */
129143 +#if defined(HUF_FORCE_DECOMPRESS_X1) && \
129144 +    defined(HUF_FORCE_DECOMPRESS_X2)
129145 +#error "Cannot force the use of the X1 and X2 decoders at the same time!"
129146 +#endif
129149 +/* **************************************************************
129150 +*  Error Management
129151 +****************************************************************/
129152 +#define HUF_isError ERR_isError
129155 +/* **************************************************************
129156 +*  Byte alignment for workSpace management
129157 +****************************************************************/
129158 +#define HUF_ALIGN(x, a)         HUF_ALIGN_MASK((x), (a) - 1)
129159 +#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
129162 +/* **************************************************************
129163 +*  BMI2 Variant Wrappers
129164 +****************************************************************/
129165 +#if DYNAMIC_BMI2
129167 +#define HUF_DGEN(fn)                                                        \
129168 +                                                                            \
129169 +    static size_t fn##_default(                                             \
129170 +                  void* dst,  size_t dstSize,                               \
129171 +            const void* cSrc, size_t cSrcSize,                              \
129172 +            const HUF_DTable* DTable)                                       \
129173 +    {                                                                       \
129174 +        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
129175 +    }                                                                       \
129176 +                                                                            \
129177 +    static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2(                       \
129178 +                  void* dst,  size_t dstSize,                               \
129179 +            const void* cSrc, size_t cSrcSize,                              \
129180 +            const HUF_DTable* DTable)                                       \
129181 +    {                                                                       \
129182 +        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
129183 +    }                                                                       \
129184 +                                                                            \
129185 +    static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \
129186 +                     size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \
129187 +    {                                                                       \
129188 +        if (bmi2) {                                                         \
129189 +            return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);         \
129190 +        }                                                                   \
129191 +        return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable);          \
129192 +    }
129194 +#else
129196 +#define HUF_DGEN(fn)                                                        \
129197 +    static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \
129198 +                     size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \
129199 +    {                                                                       \
129200 +        (void)bmi2;                                                         \
129201 +        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
129202 +    }
129204 +#endif
129207 +/*-***************************/
129208 +/*  generic DTableDesc       */
129209 +/*-***************************/
129210 +typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
129212 +static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
129214 +    DTableDesc dtd;
129215 +    ZSTD_memcpy(&dtd, table, sizeof(dtd));
129216 +    return dtd;
129220 +#ifndef HUF_FORCE_DECOMPRESS_X2
129222 +/*-***************************/
129223 +/*  single-symbol decoding   */
129224 +/*-***************************/
129225 +typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1;   /* single-symbol decoding */
129228 + * Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at
129229 + * a time.
129230 + */
129231 +static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) {
129232 +    U64 D4;
129233 +    if (MEM_isLittleEndian()) {
129234 +        D4 = symbol + (nbBits << 8);
129235 +    } else {
129236 +        D4 = (symbol << 8) + nbBits;
129237 +    }
129238 +    D4 *= 0x0001000100010001ULL;
129239 +    return D4;
129242 +typedef struct {
129243 +        U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];
129244 +        U32 rankStart[HUF_TABLELOG_ABSOLUTEMAX + 1];
129245 +        U32 statsWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
129246 +        BYTE symbols[HUF_SYMBOLVALUE_MAX + 1];
129247 +        BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
129248 +} HUF_ReadDTableX1_Workspace;
129251 +size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
129253 +    return HUF_readDTableX1_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0);
129256 +size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2)
129258 +    U32 tableLog = 0;
129259 +    U32 nbSymbols = 0;
129260 +    size_t iSize;
129261 +    void* const dtPtr = DTable + 1;
129262 +    HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;
129263 +    HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace;
129265 +    DEBUG_STATIC_ASSERT(HUF_DECOMPRESS_WORKSPACE_SIZE >= sizeof(*wksp));
129266 +    if (sizeof(*wksp) > wkspSize) return ERROR(tableLog_tooLarge);
129268 +    DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
129269 +    /* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */   /* is not necessary, even though some analyzer complain ... */
129271 +    iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2);
129272 +    if (HUF_isError(iSize)) return iSize;
129274 +    /* Table header */
129275 +    {   DTableDesc dtd = HUF_getDTableDesc(DTable);
129276 +        if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge);   /* DTable too small, Huffman tree cannot fit in */
129277 +        dtd.tableType = 0;
129278 +        dtd.tableLog = (BYTE)tableLog;
129279 +        ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
129280 +    }
129282 +    /* Compute symbols and rankStart given rankVal:
129283 +     *
129284 +     * rankVal already contains the number of values of each weight.
129285 +     *
129286 +     * symbols contains the symbols ordered by weight. First are the rankVal[0]
129287 +     * weight 0 symbols, followed by the rankVal[1] weight 1 symbols, and so on.
129288 +     * symbols[0] is filled (but unused) to avoid a branch.
129289 +     *
129290 +     * rankStart contains the offset where each rank belongs in the DTable.
129291 +     * rankStart[0] is not filled because there are no entries in the table for
129292 +     * weight 0.
129293 +     */
129294 +    {
129295 +        int n;
129296 +        int nextRankStart = 0;
129297 +        int const unroll = 4;
129298 +        int const nLimit = (int)nbSymbols - unroll + 1;
129299 +        for (n=0; n<(int)tableLog+1; n++) {
129300 +            U32 const curr = nextRankStart;
129301 +            nextRankStart += wksp->rankVal[n];
129302 +            wksp->rankStart[n] = curr;
129303 +        }
129304 +        for (n=0; n < nLimit; n += unroll) {
129305 +            int u;
129306 +            for (u=0; u < unroll; ++u) {
129307 +                size_t const w = wksp->huffWeight[n+u];
129308 +                wksp->symbols[wksp->rankStart[w]++] = (BYTE)(n+u);
129309 +            }
129310 +        }
129311 +        for (; n < (int)nbSymbols; ++n) {
129312 +            size_t const w = wksp->huffWeight[n];
129313 +            wksp->symbols[wksp->rankStart[w]++] = (BYTE)n;
129314 +        }
129315 +    }
129317 +    /* fill DTable
129318 +     * We fill all entries of each weight in order.
129319 +     * That way length is a constant for each iteration of the outter loop.
129320 +     * We can switch based on the length to a different inner loop which is
129321 +     * optimized for that particular case.
129322 +     */
129323 +    {
129324 +        U32 w;
129325 +        int symbol=wksp->rankVal[0];
129326 +        int rankStart=0;
129327 +        for (w=1; w<tableLog+1; ++w) {
129328 +            int const symbolCount = wksp->rankVal[w];
129329 +            int const length = (1 << w) >> 1;
129330 +            int uStart = rankStart;
129331 +            BYTE const nbBits = (BYTE)(tableLog + 1 - w);
129332 +            int s;
129333 +            int u;
129334 +            switch (length) {
129335 +            case 1:
129336 +                for (s=0; s<symbolCount; ++s) {
129337 +                    HUF_DEltX1 D;
129338 +                    D.byte = wksp->symbols[symbol + s];
129339 +                    D.nbBits = nbBits;
129340 +                    dt[uStart] = D;
129341 +                    uStart += 1;
129342 +                }
129343 +                break;
129344 +            case 2:
129345 +                for (s=0; s<symbolCount; ++s) {
129346 +                    HUF_DEltX1 D;
129347 +                    D.byte = wksp->symbols[symbol + s];
129348 +                    D.nbBits = nbBits;
129349 +                    dt[uStart+0] = D;
129350 +                    dt[uStart+1] = D;
129351 +                    uStart += 2;
129352 +                }
129353 +                break;
129354 +            case 4:
129355 +                for (s=0; s<symbolCount; ++s) {
129356 +                    U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
129357 +                    MEM_write64(dt + uStart, D4);
129358 +                    uStart += 4;
129359 +                }
129360 +                break;
129361 +            case 8:
129362 +                for (s=0; s<symbolCount; ++s) {
129363 +                    U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
129364 +                    MEM_write64(dt + uStart, D4);
129365 +                    MEM_write64(dt + uStart + 4, D4);
129366 +                    uStart += 8;
129367 +                }
129368 +                break;
129369 +            default:
129370 +                for (s=0; s<symbolCount; ++s) {
129371 +                    U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
129372 +                    for (u=0; u < length; u += 16) {
129373 +                        MEM_write64(dt + uStart + u + 0, D4);
129374 +                        MEM_write64(dt + uStart + u + 4, D4);
129375 +                        MEM_write64(dt + uStart + u + 8, D4);
129376 +                        MEM_write64(dt + uStart + u + 12, D4);
129377 +                    }
129378 +                    assert(u == length);
129379 +                    uStart += length;
129380 +                }
129381 +                break;
129382 +            }
129383 +            symbol += symbolCount;
129384 +            rankStart += symbolCount * length;
129385 +        }
129386 +    }
129387 +    return iSize;
129390 +FORCE_INLINE_TEMPLATE BYTE
129391 +HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog)
129393 +    size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
129394 +    BYTE const c = dt[val].byte;
129395 +    BIT_skipBits(Dstream, dt[val].nbBits);
129396 +    return c;
129399 +#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \
129400 +    *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog)
129402 +#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr)  \
129403 +    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
129404 +        HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
129406 +#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
129407 +    if (MEM_64bits()) \
129408 +        HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
129410 +HINT_INLINE size_t
129411 +HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
129413 +    BYTE* const pStart = p;
129415 +    /* up to 4 symbols at a time */
129416 +    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
129417 +        HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
129418 +        HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
129419 +        HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
129420 +        HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
129421 +    }
129423 +    /* [0-3] symbols remaining */
129424 +    if (MEM_32bits())
129425 +        while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
129426 +            HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
129428 +    /* no more data to retrieve from bitstream, no need to reload */
129429 +    while (p < pEnd)
129430 +        HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
129432 +    return pEnd-pStart;
129435 +FORCE_INLINE_TEMPLATE size_t
129436 +HUF_decompress1X1_usingDTable_internal_body(
129437 +          void* dst,  size_t dstSize,
129438 +    const void* cSrc, size_t cSrcSize,
129439 +    const HUF_DTable* DTable)
129441 +    BYTE* op = (BYTE*)dst;
129442 +    BYTE* const oend = op + dstSize;
129443 +    const void* dtPtr = DTable + 1;
129444 +    const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
129445 +    BIT_DStream_t bitD;
129446 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
129447 +    U32 const dtLog = dtd.tableLog;
129449 +    CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
129451 +    HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog);
129453 +    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
129455 +    return dstSize;
129458 +FORCE_INLINE_TEMPLATE size_t
129459 +HUF_decompress4X1_usingDTable_internal_body(
129460 +          void* dst,  size_t dstSize,
129461 +    const void* cSrc, size_t cSrcSize,
129462 +    const HUF_DTable* DTable)
129464 +    /* Check */
129465 +    if (cSrcSize < 10) return ERROR(corruption_detected);  /* strict minimum : jump table + 1 byte per stream */
129467 +    {   const BYTE* const istart = (const BYTE*) cSrc;
129468 +        BYTE* const ostart = (BYTE*) dst;
129469 +        BYTE* const oend = ostart + dstSize;
129470 +        BYTE* const olimit = oend - 3;
129471 +        const void* const dtPtr = DTable + 1;
129472 +        const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
129474 +        /* Init */
129475 +        BIT_DStream_t bitD1;
129476 +        BIT_DStream_t bitD2;
129477 +        BIT_DStream_t bitD3;
129478 +        BIT_DStream_t bitD4;
129479 +        size_t const length1 = MEM_readLE16(istart);
129480 +        size_t const length2 = MEM_readLE16(istart+2);
129481 +        size_t const length3 = MEM_readLE16(istart+4);
129482 +        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
129483 +        const BYTE* const istart1 = istart + 6;  /* jumpTable */
129484 +        const BYTE* const istart2 = istart1 + length1;
129485 +        const BYTE* const istart3 = istart2 + length2;
129486 +        const BYTE* const istart4 = istart3 + length3;
129487 +        const size_t segmentSize = (dstSize+3) / 4;
129488 +        BYTE* const opStart2 = ostart + segmentSize;
129489 +        BYTE* const opStart3 = opStart2 + segmentSize;
129490 +        BYTE* const opStart4 = opStart3 + segmentSize;
129491 +        BYTE* op1 = ostart;
129492 +        BYTE* op2 = opStart2;
129493 +        BYTE* op3 = opStart3;
129494 +        BYTE* op4 = opStart4;
129495 +        DTableDesc const dtd = HUF_getDTableDesc(DTable);
129496 +        U32 const dtLog = dtd.tableLog;
129497 +        U32 endSignal = 1;
129499 +        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
129500 +        CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
129501 +        CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
129502 +        CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
129503 +        CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
129505 +        /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
129506 +        for ( ; (endSignal) & (op4 < olimit) ; ) {
129507 +            HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
129508 +            HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
129509 +            HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
129510 +            HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
129511 +            HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
129512 +            HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
129513 +            HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
129514 +            HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
129515 +            HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
129516 +            HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
129517 +            HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
129518 +            HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
129519 +            HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
129520 +            HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
129521 +            HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
129522 +            HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
129523 +            endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
129524 +            endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
129525 +            endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
129526 +            endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
129527 +        }
129529 +        /* check corruption */
129530 +        /* note : should not be necessary : op# advance in lock step, and we control op4.
129531 +         *        but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */
129532 +        if (op1 > opStart2) return ERROR(corruption_detected);
129533 +        if (op2 > opStart3) return ERROR(corruption_detected);
129534 +        if (op3 > opStart4) return ERROR(corruption_detected);
129535 +        /* note : op4 supposed already verified within main loop */
129537 +        /* finish bitStreams one by one */
129538 +        HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog);
129539 +        HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog);
129540 +        HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog);
129541 +        HUF_decodeStreamX1(op4, &bitD4, oend,     dt, dtLog);
129543 +        /* check */
129544 +        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
129545 +          if (!endCheck) return ERROR(corruption_detected); }
129547 +        /* decoded size */
129548 +        return dstSize;
129549 +    }
129553 +typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
129554 +                                               const void *cSrc,
129555 +                                               size_t cSrcSize,
129556 +                                               const HUF_DTable *DTable);
129558 +HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
129559 +HUF_DGEN(HUF_decompress4X1_usingDTable_internal)
129563 +size_t HUF_decompress1X1_usingDTable(
129564 +          void* dst,  size_t dstSize,
129565 +    const void* cSrc, size_t cSrcSize,
129566 +    const HUF_DTable* DTable)
129568 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
129569 +    if (dtd.tableType != 0) return ERROR(GENERIC);
129570 +    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
129573 +size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
129574 +                                   const void* cSrc, size_t cSrcSize,
129575 +                                   void* workSpace, size_t wkspSize)
129577 +    const BYTE* ip = (const BYTE*) cSrc;
129579 +    size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
129580 +    if (HUF_isError(hSize)) return hSize;
129581 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
129582 +    ip += hSize; cSrcSize -= hSize;
129584 +    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
129588 +size_t HUF_decompress4X1_usingDTable(
129589 +          void* dst,  size_t dstSize,
129590 +    const void* cSrc, size_t cSrcSize,
129591 +    const HUF_DTable* DTable)
129593 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
129594 +    if (dtd.tableType != 0) return ERROR(GENERIC);
129595 +    return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
129598 +static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
129599 +                                   const void* cSrc, size_t cSrcSize,
129600 +                                   void* workSpace, size_t wkspSize, int bmi2)
129602 +    const BYTE* ip = (const BYTE*) cSrc;
129604 +    size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
129605 +    if (HUF_isError(hSize)) return hSize;
129606 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
129607 +    ip += hSize; cSrcSize -= hSize;
129609 +    return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
129612 +size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
129613 +                                   const void* cSrc, size_t cSrcSize,
129614 +                                   void* workSpace, size_t wkspSize)
129616 +    return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
129620 +#endif /* HUF_FORCE_DECOMPRESS_X2 */
129623 +#ifndef HUF_FORCE_DECOMPRESS_X1
129625 +/* *************************/
129626 +/* double-symbols decoding */
129627 +/* *************************/
129629 +typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2;  /* double-symbols decoding */
129630 +typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
129631 +typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
129632 +typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
129635 +/* HUF_fillDTableX2Level2() :
129636 + * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
129637 +static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed,
129638 +                           const U32* rankValOrigin, const int minWeight,
129639 +                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
129640 +                           U32 nbBitsBaseline, U16 baseSeq, U32* wksp, size_t wkspSize)
129642 +    HUF_DEltX2 DElt;
129643 +    U32* rankVal = wksp;
129645 +    assert(wkspSize >= HUF_TABLELOG_MAX + 1);
129646 +    (void)wkspSize;
129647 +    /* get pre-calculated rankVal */
129648 +    ZSTD_memcpy(rankVal, rankValOrigin, sizeof(U32) * (HUF_TABLELOG_MAX + 1));
129650 +    /* fill skipped values */
129651 +    if (minWeight>1) {
129652 +        U32 i, skipSize = rankVal[minWeight];
129653 +        MEM_writeLE16(&(DElt.sequence), baseSeq);
129654 +        DElt.nbBits   = (BYTE)(consumed);
129655 +        DElt.length   = 1;
129656 +        for (i = 0; i < skipSize; i++)
129657 +            DTable[i] = DElt;
129658 +    }
129660 +    /* fill DTable */
129661 +    {   U32 s; for (s=0; s<sortedListSize; s++) {   /* note : sortedSymbols already skipped */
129662 +            const U32 symbol = sortedSymbols[s].symbol;
129663 +            const U32 weight = sortedSymbols[s].weight;
129664 +            const U32 nbBits = nbBitsBaseline - weight;
129665 +            const U32 length = 1 << (sizeLog-nbBits);
129666 +            const U32 start = rankVal[weight];
129667 +            U32 i = start;
129668 +            const U32 end = start + length;
129670 +            MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
129671 +            DElt.nbBits = (BYTE)(nbBits + consumed);
129672 +            DElt.length = 2;
129673 +            do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
129675 +            rankVal[weight] += length;
129676 +    }   }
129680 +static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
129681 +                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
129682 +                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
129683 +                           const U32 nbBitsBaseline, U32* wksp, size_t wkspSize)
129685 +    U32* rankVal = wksp;
129686 +    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
129687 +    const U32 minBits  = nbBitsBaseline - maxWeight;
129688 +    U32 s;
129690 +    assert(wkspSize >= HUF_TABLELOG_MAX + 1);
129691 +    wksp += HUF_TABLELOG_MAX + 1;
129692 +    wkspSize -= HUF_TABLELOG_MAX + 1;
129694 +    ZSTD_memcpy(rankVal, rankValOrigin, sizeof(U32) * (HUF_TABLELOG_MAX + 1));
129696 +    /* fill DTable */
129697 +    for (s=0; s<sortedListSize; s++) {
129698 +        const U16 symbol = sortedList[s].symbol;
129699 +        const U32 weight = sortedList[s].weight;
129700 +        const U32 nbBits = nbBitsBaseline - weight;
129701 +        const U32 start = rankVal[weight];
129702 +        const U32 length = 1 << (targetLog-nbBits);
129704 +        if (targetLog-nbBits >= minBits) {   /* enough room for a second symbol */
129705 +            U32 sortedRank;
129706 +            int minWeight = nbBits + scaleLog;
129707 +            if (minWeight < 1) minWeight = 1;
129708 +            sortedRank = rankStart[minWeight];
129709 +            HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits,
129710 +                           rankValOrigin[nbBits], minWeight,
129711 +                           sortedList+sortedRank, sortedListSize-sortedRank,
129712 +                           nbBitsBaseline, symbol, wksp, wkspSize);
129713 +        } else {
129714 +            HUF_DEltX2 DElt;
129715 +            MEM_writeLE16(&(DElt.sequence), symbol);
129716 +            DElt.nbBits = (BYTE)(nbBits);
129717 +            DElt.length = 1;
129718 +            {   U32 const end = start + length;
129719 +                U32 u;
129720 +                for (u = start; u < end; u++) DTable[u] = DElt;
129721 +        }   }
129722 +        rankVal[weight] += length;
129723 +    }
129726 +typedef struct {
129727 +    rankValCol_t rankVal[HUF_TABLELOG_MAX];
129728 +    U32 rankStats[HUF_TABLELOG_MAX + 1];
129729 +    U32 rankStart0[HUF_TABLELOG_MAX + 2];
129730 +    sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1];
129731 +    BYTE weightList[HUF_SYMBOLVALUE_MAX + 1];
129732 +    U32 calleeWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
129733 +} HUF_ReadDTableX2_Workspace;
129735 +size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
129736 +                       const void* src, size_t srcSize,
129737 +                             void* workSpace, size_t wkspSize)
129739 +    U32 tableLog, maxW, sizeOfSort, nbSymbols;
129740 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
129741 +    U32 const maxTableLog = dtd.maxTableLog;
129742 +    size_t iSize;
129743 +    void* dtPtr = DTable+1;   /* force compiler to avoid strict-aliasing */
129744 +    HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
129745 +    U32 *rankStart;
129747 +    HUF_ReadDTableX2_Workspace* const wksp = (HUF_ReadDTableX2_Workspace*)workSpace;
129749 +    if (sizeof(*wksp) > wkspSize) return ERROR(GENERIC);
129751 +    rankStart = wksp->rankStart0 + 1;
129752 +    ZSTD_memset(wksp->rankStats, 0, sizeof(wksp->rankStats));
129753 +    ZSTD_memset(wksp->rankStart0, 0, sizeof(wksp->rankStart0));
129755 +    DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable));   /* if compiler fails here, assertion is wrong */
129756 +    if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
129757 +    /* ZSTD_memset(weightList, 0, sizeof(weightList)); */  /* is not necessary, even though some analyzer complain ... */
129759 +    iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), /* bmi2 */ 0);
129760 +    if (HUF_isError(iSize)) return iSize;
129762 +    /* check result */
129763 +    if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
129765 +    /* find maxWeight */
129766 +    for (maxW = tableLog; wksp->rankStats[maxW]==0; maxW--) {}  /* necessarily finds a solution before 0 */
129768 +    /* Get start index of each weight */
129769 +    {   U32 w, nextRankStart = 0;
129770 +        for (w=1; w<maxW+1; w++) {
129771 +            U32 curr = nextRankStart;
129772 +            nextRankStart += wksp->rankStats[w];
129773 +            rankStart[w] = curr;
129774 +        }
129775 +        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
129776 +        sizeOfSort = nextRankStart;
129777 +    }
129779 +    /* sort symbols by weight */
129780 +    {   U32 s;
129781 +        for (s=0; s<nbSymbols; s++) {
129782 +            U32 const w = wksp->weightList[s];
129783 +            U32 const r = rankStart[w]++;
129784 +            wksp->sortedSymbol[r].symbol = (BYTE)s;
129785 +            wksp->sortedSymbol[r].weight = (BYTE)w;
129786 +        }
129787 +        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
129788 +    }
129790 +    /* Build rankVal */
129791 +    {   U32* const rankVal0 = wksp->rankVal[0];
129792 +        {   int const rescale = (maxTableLog-tableLog) - 1;   /* tableLog <= maxTableLog */
129793 +            U32 nextRankVal = 0;
129794 +            U32 w;
129795 +            for (w=1; w<maxW+1; w++) {
129796 +                U32 curr = nextRankVal;
129797 +                nextRankVal += wksp->rankStats[w] << (w+rescale);
129798 +                rankVal0[w] = curr;
129799 +        }   }
129800 +        {   U32 const minBits = tableLog+1 - maxW;
129801 +            U32 consumed;
129802 +            for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
129803 +                U32* const rankValPtr = wksp->rankVal[consumed];
129804 +                U32 w;
129805 +                for (w = 1; w < maxW+1; w++) {
129806 +                    rankValPtr[w] = rankVal0[w] >> consumed;
129807 +    }   }   }   }
129809 +    HUF_fillDTableX2(dt, maxTableLog,
129810 +                   wksp->sortedSymbol, sizeOfSort,
129811 +                   wksp->rankStart0, wksp->rankVal, maxW,
129812 +                   tableLog+1,
129813 +                   wksp->calleeWksp, sizeof(wksp->calleeWksp) / sizeof(U32));
129815 +    dtd.tableLog = (BYTE)maxTableLog;
129816 +    dtd.tableType = 1;
129817 +    ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
129818 +    return iSize;
129822 +FORCE_INLINE_TEMPLATE U32
129823 +HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
129825 +    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
129826 +    ZSTD_memcpy(op, dt+val, 2);
129827 +    BIT_skipBits(DStream, dt[val].nbBits);
129828 +    return dt[val].length;
129831 +FORCE_INLINE_TEMPLATE U32
129832 +HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
129834 +    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
129835 +    ZSTD_memcpy(op, dt+val, 1);
129836 +    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
129837 +    else {
129838 +        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
129839 +            BIT_skipBits(DStream, dt[val].nbBits);
129840 +            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
129841 +                /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
129842 +                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
129843 +    }   }
129844 +    return 1;
129847 +#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
129848 +    ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
129850 +#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
129851 +    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
129852 +        ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
129854 +#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
129855 +    if (MEM_64bits()) \
129856 +        ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
129858 +HINT_INLINE size_t
129859 +HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
129860 +                const HUF_DEltX2* const dt, const U32 dtLog)
129862 +    BYTE* const pStart = p;
129864 +    /* up to 8 symbols at a time */
129865 +    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
129866 +        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
129867 +        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
129868 +        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
129869 +        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
129870 +    }
129872 +    /* closer to end : up to 2 symbols at a time */
129873 +    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
129874 +        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
129876 +    while (p <= pEnd-2)
129877 +        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
129879 +    if (p < pEnd)
129880 +        p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog);
129882 +    return p-pStart;
129885 +FORCE_INLINE_TEMPLATE size_t
129886 +HUF_decompress1X2_usingDTable_internal_body(
129887 +          void* dst,  size_t dstSize,
129888 +    const void* cSrc, size_t cSrcSize,
129889 +    const HUF_DTable* DTable)
129891 +    BIT_DStream_t bitD;
129893 +    /* Init */
129894 +    CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
129896 +    /* decode */
129897 +    {   BYTE* const ostart = (BYTE*) dst;
129898 +        BYTE* const oend = ostart + dstSize;
129899 +        const void* const dtPtr = DTable+1;   /* force compiler to not use strict-aliasing */
129900 +        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
129901 +        DTableDesc const dtd = HUF_getDTableDesc(DTable);
129902 +        HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog);
129903 +    }
129905 +    /* check */
129906 +    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
129908 +    /* decoded size */
129909 +    return dstSize;
129912 +FORCE_INLINE_TEMPLATE size_t
129913 +HUF_decompress4X2_usingDTable_internal_body(
129914 +          void* dst,  size_t dstSize,
129915 +    const void* cSrc, size_t cSrcSize,
129916 +    const HUF_DTable* DTable)
129918 +    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
129920 +    {   const BYTE* const istart = (const BYTE*) cSrc;
129921 +        BYTE* const ostart = (BYTE*) dst;
129922 +        BYTE* const oend = ostart + dstSize;
129923 +        BYTE* const olimit = oend - (sizeof(size_t)-1);
129924 +        const void* const dtPtr = DTable+1;
129925 +        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
129927 +        /* Init */
129928 +        BIT_DStream_t bitD1;
129929 +        BIT_DStream_t bitD2;
129930 +        BIT_DStream_t bitD3;
129931 +        BIT_DStream_t bitD4;
129932 +        size_t const length1 = MEM_readLE16(istart);
129933 +        size_t const length2 = MEM_readLE16(istart+2);
129934 +        size_t const length3 = MEM_readLE16(istart+4);
129935 +        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
129936 +        const BYTE* const istart1 = istart + 6;  /* jumpTable */
129937 +        const BYTE* const istart2 = istart1 + length1;
129938 +        const BYTE* const istart3 = istart2 + length2;
129939 +        const BYTE* const istart4 = istart3 + length3;
129940 +        size_t const segmentSize = (dstSize+3) / 4;
129941 +        BYTE* const opStart2 = ostart + segmentSize;
129942 +        BYTE* const opStart3 = opStart2 + segmentSize;
129943 +        BYTE* const opStart4 = opStart3 + segmentSize;
129944 +        BYTE* op1 = ostart;
129945 +        BYTE* op2 = opStart2;
129946 +        BYTE* op3 = opStart3;
129947 +        BYTE* op4 = opStart4;
129948 +        U32 endSignal = 1;
129949 +        DTableDesc const dtd = HUF_getDTableDesc(DTable);
129950 +        U32 const dtLog = dtd.tableLog;
129952 +        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
129953 +        CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
129954 +        CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
129955 +        CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
129956 +        CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
129958 +        /* 16-32 symbols per loop (4-8 symbols per stream) */
129959 +        for ( ; (endSignal) & (op4 < olimit); ) {
129960 +#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
129961 +            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
129962 +            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
129963 +            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
129964 +            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
129965 +            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
129966 +            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
129967 +            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
129968 +            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
129969 +            endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
129970 +            endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
129971 +            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
129972 +            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
129973 +            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
129974 +            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
129975 +            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
129976 +            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
129977 +            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
129978 +            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
129979 +            endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
129980 +            endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
129981 +#else
129982 +            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
129983 +            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
129984 +            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
129985 +            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
129986 +            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
129987 +            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
129988 +            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
129989 +            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
129990 +            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
129991 +            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
129992 +            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
129993 +            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
129994 +            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
129995 +            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
129996 +            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
129997 +            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
129998 +            endSignal = (U32)LIKELY(
129999 +                        (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished)
130000 +                      & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished)
130001 +                      & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished)
130002 +                      & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished));
130003 +#endif
130004 +        }
130006 +        /* check corruption */
130007 +        if (op1 > opStart2) return ERROR(corruption_detected);
130008 +        if (op2 > opStart3) return ERROR(corruption_detected);
130009 +        if (op3 > opStart4) return ERROR(corruption_detected);
130010 +        /* note : op4 already verified within main loop */
130012 +        /* finish bitStreams one by one */
130013 +        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
130014 +        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
130015 +        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
130016 +        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
130018 +        /* check */
130019 +        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
130020 +          if (!endCheck) return ERROR(corruption_detected); }
130022 +        /* decoded size */
130023 +        return dstSize;
130024 +    }
130027 +HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
130028 +HUF_DGEN(HUF_decompress4X2_usingDTable_internal)
130030 +size_t HUF_decompress1X2_usingDTable(
130031 +          void* dst,  size_t dstSize,
130032 +    const void* cSrc, size_t cSrcSize,
130033 +    const HUF_DTable* DTable)
130035 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
130036 +    if (dtd.tableType != 1) return ERROR(GENERIC);
130037 +    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
130040 +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
130041 +                                   const void* cSrc, size_t cSrcSize,
130042 +                                   void* workSpace, size_t wkspSize)
130044 +    const BYTE* ip = (const BYTE*) cSrc;
130046 +    size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,
130047 +                                               workSpace, wkspSize);
130048 +    if (HUF_isError(hSize)) return hSize;
130049 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
130050 +    ip += hSize; cSrcSize -= hSize;
130052 +    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
130056 +size_t HUF_decompress4X2_usingDTable(
130057 +          void* dst,  size_t dstSize,
130058 +    const void* cSrc, size_t cSrcSize,
130059 +    const HUF_DTable* DTable)
130061 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
130062 +    if (dtd.tableType != 1) return ERROR(GENERIC);
130063 +    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
130066 +static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
130067 +                                   const void* cSrc, size_t cSrcSize,
130068 +                                   void* workSpace, size_t wkspSize, int bmi2)
130070 +    const BYTE* ip = (const BYTE*) cSrc;
130072 +    size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
130073 +                                         workSpace, wkspSize);
130074 +    if (HUF_isError(hSize)) return hSize;
130075 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
130076 +    ip += hSize; cSrcSize -= hSize;
130078 +    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
130081 +size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
130082 +                                   const void* cSrc, size_t cSrcSize,
130083 +                                   void* workSpace, size_t wkspSize)
130085 +    return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
130089 +#endif /* HUF_FORCE_DECOMPRESS_X1 */
130092 +/* ***********************************/
130093 +/* Universal decompression selectors */
130094 +/* ***********************************/
130096 +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
130097 +                                    const void* cSrc, size_t cSrcSize,
130098 +                                    const HUF_DTable* DTable)
130100 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
130101 +#if defined(HUF_FORCE_DECOMPRESS_X1)
130102 +    (void)dtd;
130103 +    assert(dtd.tableType == 0);
130104 +    return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
130105 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
130106 +    (void)dtd;
130107 +    assert(dtd.tableType == 1);
130108 +    return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
130109 +#else
130110 +    return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
130111 +                           HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
130112 +#endif
130115 +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
130116 +                                    const void* cSrc, size_t cSrcSize,
130117 +                                    const HUF_DTable* DTable)
130119 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
130120 +#if defined(HUF_FORCE_DECOMPRESS_X1)
130121 +    (void)dtd;
130122 +    assert(dtd.tableType == 0);
130123 +    return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
130124 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
130125 +    (void)dtd;
130126 +    assert(dtd.tableType == 1);
130127 +    return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
130128 +#else
130129 +    return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
130130 +                           HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
130131 +#endif
130135 +#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
130136 +typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
130137 +static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
130139 +    /* single, double, quad */
130140 +    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
130141 +    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
130142 +    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
130143 +    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
130144 +    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
130145 +    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
130146 +    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
130147 +    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
130148 +    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
130149 +    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
130150 +    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
130151 +    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
130152 +    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
130153 +    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
130154 +    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
130155 +    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
130157 +#endif
130159 +/** HUF_selectDecoder() :
130160 + *  Tells which decoder is likely to decode faster,
130161 + *  based on a set of pre-computed metrics.
130162 + * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
130163 + *  Assumption : 0 < dstSize <= 128 KB */
130164 +U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
130166 +    assert(dstSize > 0);
130167 +    assert(dstSize <= 128*1024);
130168 +#if defined(HUF_FORCE_DECOMPRESS_X1)
130169 +    (void)dstSize;
130170 +    (void)cSrcSize;
130171 +    return 0;
130172 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
130173 +    (void)dstSize;
130174 +    (void)cSrcSize;
130175 +    return 1;
130176 +#else
130177 +    /* decoder timing evaluation */
130178 +    {   U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 */
130179 +        U32 const D256 = (U32)(dstSize >> 8);
130180 +        U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
130181 +        U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
130182 +        DTime1 += DTime1 >> 3;  /* advantage to algorithm using less memory, to reduce cache eviction */
130183 +        return DTime1 < DTime0;
130184 +    }
130185 +#endif
130189 +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
130190 +                                     size_t dstSize, const void* cSrc,
130191 +                                     size_t cSrcSize, void* workSpace,
130192 +                                     size_t wkspSize)
130194 +    /* validation checks */
130195 +    if (dstSize == 0) return ERROR(dstSize_tooSmall);
130196 +    if (cSrcSize == 0) return ERROR(corruption_detected);
130198 +    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
130199 +#if defined(HUF_FORCE_DECOMPRESS_X1)
130200 +        (void)algoNb;
130201 +        assert(algoNb == 0);
130202 +        return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
130203 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
130204 +        (void)algoNb;
130205 +        assert(algoNb == 1);
130206 +        return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
130207 +#else
130208 +        return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
130209 +                            cSrcSize, workSpace, wkspSize):
130210 +                        HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
130211 +#endif
130212 +    }
130215 +size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
130216 +                                  const void* cSrc, size_t cSrcSize,
130217 +                                  void* workSpace, size_t wkspSize)
130219 +    /* validation checks */
130220 +    if (dstSize == 0) return ERROR(dstSize_tooSmall);
130221 +    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
130222 +    if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
130223 +    if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
130225 +    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
130226 +#if defined(HUF_FORCE_DECOMPRESS_X1)
130227 +        (void)algoNb;
130228 +        assert(algoNb == 0);
130229 +        return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
130230 +                                cSrcSize, workSpace, wkspSize);
130231 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
130232 +        (void)algoNb;
130233 +        assert(algoNb == 1);
130234 +        return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
130235 +                                cSrcSize, workSpace, wkspSize);
130236 +#else
130237 +        return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
130238 +                                cSrcSize, workSpace, wkspSize):
130239 +                        HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
130240 +                                cSrcSize, workSpace, wkspSize);
130241 +#endif
130242 +    }
130246 +size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
130248 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
130249 +#if defined(HUF_FORCE_DECOMPRESS_X1)
130250 +    (void)dtd;
130251 +    assert(dtd.tableType == 0);
130252 +    return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
130253 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
130254 +    (void)dtd;
130255 +    assert(dtd.tableType == 1);
130256 +    return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
130257 +#else
130258 +    return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
130259 +                           HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
130260 +#endif
130263 +#ifndef HUF_FORCE_DECOMPRESS_X2
130264 +size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
130266 +    const BYTE* ip = (const BYTE*) cSrc;
130268 +    size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
130269 +    if (HUF_isError(hSize)) return hSize;
130270 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
130271 +    ip += hSize; cSrcSize -= hSize;
130273 +    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
130275 +#endif
130277 +size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
130279 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
130280 +#if defined(HUF_FORCE_DECOMPRESS_X1)
130281 +    (void)dtd;
130282 +    assert(dtd.tableType == 0);
130283 +    return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
130284 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
130285 +    (void)dtd;
130286 +    assert(dtd.tableType == 1);
130287 +    return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
130288 +#else
130289 +    return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
130290 +                           HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
130291 +#endif
130294 +size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
130296 +    /* validation checks */
130297 +    if (dstSize == 0) return ERROR(dstSize_tooSmall);
130298 +    if (cSrcSize == 0) return ERROR(corruption_detected);
130300 +    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
130301 +#if defined(HUF_FORCE_DECOMPRESS_X1)
130302 +        (void)algoNb;
130303 +        assert(algoNb == 0);
130304 +        return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
130305 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
130306 +        (void)algoNb;
130307 +        assert(algoNb == 1);
130308 +        return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
130309 +#else
130310 +        return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
130311 +                        HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
130312 +#endif
130313 +    }
130315 diff --git a/lib/zstd/decompress/zstd_ddict.c b/lib/zstd/decompress/zstd_ddict.c
130316 new file mode 100644
130317 index 000000000000..dbbc7919de53
130318 --- /dev/null
130319 +++ b/lib/zstd/decompress/zstd_ddict.c
130320 @@ -0,0 +1,241 @@
130322 + * Copyright (c) Yann Collet, Facebook, Inc.
130323 + * All rights reserved.
130325 + * This source code is licensed under both the BSD-style license (found in the
130326 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
130327 + * in the COPYING file in the root directory of this source tree).
130328 + * You may select, at your option, one of the above-listed licenses.
130329 + */
130331 +/* zstd_ddict.c :
130332 + * concentrates all logic that needs to know the internals of ZSTD_DDict object */
130334 +/*-*******************************************************
130335 +*  Dependencies
130336 +*********************************************************/
130337 +#include "../common/zstd_deps.h"   /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
130338 +#include "../common/cpu.h"         /* bmi2 */
130339 +#include "../common/mem.h"         /* low level memory routines */
130340 +#define FSE_STATIC_LINKING_ONLY
130341 +#include "../common/fse.h"
130342 +#define HUF_STATIC_LINKING_ONLY
130343 +#include "../common/huf.h"
130344 +#include "zstd_decompress_internal.h"
130345 +#include "zstd_ddict.h"
130350 +/*-*******************************************************
130351 +*  Types
130352 +*********************************************************/
130353 +struct ZSTD_DDict_s {
130354 +    void* dictBuffer;
130355 +    const void* dictContent;
130356 +    size_t dictSize;
130357 +    ZSTD_entropyDTables_t entropy;
130358 +    U32 dictID;
130359 +    U32 entropyPresent;
130360 +    ZSTD_customMem cMem;
130361 +};  /* typedef'd to ZSTD_DDict within "zstd.h" */
130363 +const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict)
130365 +    assert(ddict != NULL);
130366 +    return ddict->dictContent;
130369 +size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict)
130371 +    assert(ddict != NULL);
130372 +    return ddict->dictSize;
130375 +void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
130377 +    DEBUGLOG(4, "ZSTD_copyDDictParameters");
130378 +    assert(dctx != NULL);
130379 +    assert(ddict != NULL);
130380 +    dctx->dictID = ddict->dictID;
130381 +    dctx->prefixStart = ddict->dictContent;
130382 +    dctx->virtualStart = ddict->dictContent;
130383 +    dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
130384 +    dctx->previousDstEnd = dctx->dictEnd;
130385 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
130386 +    dctx->dictContentBeginForFuzzing = dctx->prefixStart;
130387 +    dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
130388 +#endif
130389 +    if (ddict->entropyPresent) {
130390 +        dctx->litEntropy = 1;
130391 +        dctx->fseEntropy = 1;
130392 +        dctx->LLTptr = ddict->entropy.LLTable;
130393 +        dctx->MLTptr = ddict->entropy.MLTable;
130394 +        dctx->OFTptr = ddict->entropy.OFTable;
130395 +        dctx->HUFptr = ddict->entropy.hufTable;
130396 +        dctx->entropy.rep[0] = ddict->entropy.rep[0];
130397 +        dctx->entropy.rep[1] = ddict->entropy.rep[1];
130398 +        dctx->entropy.rep[2] = ddict->entropy.rep[2];
130399 +    } else {
130400 +        dctx->litEntropy = 0;
130401 +        dctx->fseEntropy = 0;
130402 +    }
130406 +static size_t
130407 +ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict,
130408 +                           ZSTD_dictContentType_e dictContentType)
130410 +    ddict->dictID = 0;
130411 +    ddict->entropyPresent = 0;
130412 +    if (dictContentType == ZSTD_dct_rawContent) return 0;
130414 +    if (ddict->dictSize < 8) {
130415 +        if (dictContentType == ZSTD_dct_fullDict)
130416 +            return ERROR(dictionary_corrupted);   /* only accept specified dictionaries */
130417 +        return 0;   /* pure content mode */
130418 +    }
130419 +    {   U32 const magic = MEM_readLE32(ddict->dictContent);
130420 +        if (magic != ZSTD_MAGIC_DICTIONARY) {
130421 +            if (dictContentType == ZSTD_dct_fullDict)
130422 +                return ERROR(dictionary_corrupted);   /* only accept specified dictionaries */
130423 +            return 0;   /* pure content mode */
130424 +        }
130425 +    }
130426 +    ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
130428 +    /* load entropy tables */
130429 +    RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy(
130430 +            &ddict->entropy, ddict->dictContent, ddict->dictSize)),
130431 +        dictionary_corrupted, "");
130432 +    ddict->entropyPresent = 1;
130433 +    return 0;
130437 +static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
130438 +                                      const void* dict, size_t dictSize,
130439 +                                      ZSTD_dictLoadMethod_e dictLoadMethod,
130440 +                                      ZSTD_dictContentType_e dictContentType)
130442 +    if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
130443 +        ddict->dictBuffer = NULL;
130444 +        ddict->dictContent = dict;
130445 +        if (!dict) dictSize = 0;
130446 +    } else {
130447 +        void* const internalBuffer = ZSTD_customMalloc(dictSize, ddict->cMem);
130448 +        ddict->dictBuffer = internalBuffer;
130449 +        ddict->dictContent = internalBuffer;
130450 +        if (!internalBuffer) return ERROR(memory_allocation);
130451 +        ZSTD_memcpy(internalBuffer, dict, dictSize);
130452 +    }
130453 +    ddict->dictSize = dictSize;
130454 +    ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */
130456 +    /* parse dictionary content */
130457 +    FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , "");
130459 +    return 0;
130462 +ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
130463 +                                      ZSTD_dictLoadMethod_e dictLoadMethod,
130464 +                                      ZSTD_dictContentType_e dictContentType,
130465 +                                      ZSTD_customMem customMem)
130467 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
130469 +    {   ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_customMalloc(sizeof(ZSTD_DDict), customMem);
130470 +        if (ddict == NULL) return NULL;
130471 +        ddict->cMem = customMem;
130472 +        {   size_t const initResult = ZSTD_initDDict_internal(ddict,
130473 +                                            dict, dictSize,
130474 +                                            dictLoadMethod, dictContentType);
130475 +            if (ZSTD_isError(initResult)) {
130476 +                ZSTD_freeDDict(ddict);
130477 +                return NULL;
130478 +        }   }
130479 +        return ddict;
130480 +    }
130483 +/*! ZSTD_createDDict() :
130484 +*   Create a digested dictionary, to start decompression without startup delay.
130485 +*   `dict` content is copied inside DDict.
130486 +*   Consequently, `dict` can be released after `ZSTD_DDict` creation */
130487 +ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
130489 +    ZSTD_customMem const allocator = { NULL, NULL, NULL };
130490 +    return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);
130493 +/*! ZSTD_createDDict_byReference() :
130494 + *  Create a digested dictionary, to start decompression without startup delay.
130495 + *  Dictionary content is simply referenced, it will be accessed during decompression.
130496 + *  Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
130497 +ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
130499 +    ZSTD_customMem const allocator = { NULL, NULL, NULL };
130500 +    return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);
130504 +const ZSTD_DDict* ZSTD_initStaticDDict(
130505 +                                void* sBuffer, size_t sBufferSize,
130506 +                                const void* dict, size_t dictSize,
130507 +                                ZSTD_dictLoadMethod_e dictLoadMethod,
130508 +                                ZSTD_dictContentType_e dictContentType)
130510 +    size_t const neededSpace = sizeof(ZSTD_DDict)
130511 +                             + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
130512 +    ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;
130513 +    assert(sBuffer != NULL);
130514 +    assert(dict != NULL);
130515 +    if ((size_t)sBuffer & 7) return NULL;   /* 8-aligned */
130516 +    if (sBufferSize < neededSpace) return NULL;
130517 +    if (dictLoadMethod == ZSTD_dlm_byCopy) {
130518 +        ZSTD_memcpy(ddict+1, dict, dictSize);  /* local copy */
130519 +        dict = ddict+1;
130520 +    }
130521 +    if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
130522 +                                              dict, dictSize,
130523 +                                              ZSTD_dlm_byRef, dictContentType) ))
130524 +        return NULL;
130525 +    return ddict;
130529 +size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
130531 +    if (ddict==NULL) return 0;   /* support free on NULL */
130532 +    {   ZSTD_customMem const cMem = ddict->cMem;
130533 +        ZSTD_customFree(ddict->dictBuffer, cMem);
130534 +        ZSTD_customFree(ddict, cMem);
130535 +        return 0;
130536 +    }
130539 +/*! ZSTD_estimateDDictSize() :
130540 + *  Estimate amount of memory that will be needed to create a dictionary for decompression.
130541 + *  Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
130542 +size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
130544 +    return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
130547 +size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
130549 +    if (ddict==NULL) return 0;   /* support sizeof on NULL */
130550 +    return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
130553 +/*! ZSTD_getDictID_fromDDict() :
130554 + *  Provides the dictID of the dictionary loaded into `ddict`.
130555 + *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
130556 + *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
130557 +unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
130559 +    if (ddict==NULL) return 0;
130560 +    return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
130562 diff --git a/lib/zstd/decompress/zstd_ddict.h b/lib/zstd/decompress/zstd_ddict.h
130563 new file mode 100644
130564 index 000000000000..8c1a79d666f8
130565 --- /dev/null
130566 +++ b/lib/zstd/decompress/zstd_ddict.h
130567 @@ -0,0 +1,44 @@
130569 + * Copyright (c) Yann Collet, Facebook, Inc.
130570 + * All rights reserved.
130572 + * This source code is licensed under both the BSD-style license (found in the
130573 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
130574 + * in the COPYING file in the root directory of this source tree).
130575 + * You may select, at your option, one of the above-listed licenses.
130576 + */
130579 +#ifndef ZSTD_DDICT_H
130580 +#define ZSTD_DDICT_H
130582 +/*-*******************************************************
130583 + *  Dependencies
130584 + *********************************************************/
130585 +#include "../common/zstd_deps.h"   /* size_t */
130586 +#include <linux/zstd.h>     /* ZSTD_DDict, and several public functions */
130589 +/*-*******************************************************
130590 + *  Interface
130591 + *********************************************************/
130593 +/* note: several prototypes are already published in `zstd.h` :
130594 + * ZSTD_createDDict()
130595 + * ZSTD_createDDict_byReference()
130596 + * ZSTD_createDDict_advanced()
130597 + * ZSTD_freeDDict()
130598 + * ZSTD_initStaticDDict()
130599 + * ZSTD_sizeof_DDict()
130600 + * ZSTD_estimateDDictSize()
130601 + * ZSTD_getDictID_fromDict()
130602 + */
130604 +const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict);
130605 +size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict);
130607 +void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
130611 +#endif /* ZSTD_DDICT_H */
130612 diff --git a/lib/zstd/decompress/zstd_decompress.c b/lib/zstd/decompress/zstd_decompress.c
130613 new file mode 100644
130614 index 000000000000..16b4ea795a7e
130615 --- /dev/null
130616 +++ b/lib/zstd/decompress/zstd_decompress.c
130617 @@ -0,0 +1,2075 @@
130619 + * Copyright (c) Yann Collet, Facebook, Inc.
130620 + * All rights reserved.
130622 + * This source code is licensed under both the BSD-style license (found in the
130623 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
130624 + * in the COPYING file in the root directory of this source tree).
130625 + * You may select, at your option, one of the above-listed licenses.
130626 + */
130629 +/* ***************************************************************
130630 +*  Tuning parameters
130631 +*****************************************************************/
130633 + * HEAPMODE :
130634 + * Select how default decompression function ZSTD_decompress() allocates its context,
130635 + * on stack (0), or into heap (1, default; requires malloc()).
130636 + * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected.
130637 + */
130638 +#ifndef ZSTD_HEAPMODE
130639 +#  define ZSTD_HEAPMODE 1
130640 +#endif
130643 +*  LEGACY_SUPPORT :
130644 +*  if set to 1+, ZSTD_decompress() can decode older formats (v0.1+)
130648 + *  MAXWINDOWSIZE_DEFAULT :
130649 + *  maximum window size accepted by DStream __by default__.
130650 + *  Frames requiring more memory will be rejected.
130651 + *  It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().
130652 + */
130653 +#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
130654 +#  define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1)
130655 +#endif
130658 + *  NO_FORWARD_PROGRESS_MAX :
130659 + *  maximum allowed nb of calls to ZSTD_decompressStream()
130660 + *  without any forward progress
130661 + *  (defined as: no byte read from input, and no byte flushed to output)
130662 + *  before triggering an error.
130663 + */
130664 +#ifndef ZSTD_NO_FORWARD_PROGRESS_MAX
130665 +#  define ZSTD_NO_FORWARD_PROGRESS_MAX 16
130666 +#endif
130669 +/*-*******************************************************
130670 +*  Dependencies
130671 +*********************************************************/
130672 +#include "../common/zstd_deps.h"   /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
130673 +#include "../common/cpu.h"         /* bmi2 */
130674 +#include "../common/mem.h"         /* low level memory routines */
130675 +#define FSE_STATIC_LINKING_ONLY
130676 +#include "../common/fse.h"
130677 +#define HUF_STATIC_LINKING_ONLY
130678 +#include "../common/huf.h"
130679 +#include <linux/xxhash.h> /* xxh64_reset, xxh64_update, xxh64_digest, XXH64 */
130680 +#include "../common/zstd_internal.h"  /* blockProperties_t */
130681 +#include "zstd_decompress_internal.h"   /* ZSTD_DCtx */
130682 +#include "zstd_ddict.h"  /* ZSTD_DDictDictContent */
130683 +#include "zstd_decompress_block.h"   /* ZSTD_decompressBlock_internal */
130688 +/*************************************
130689 + * Multiple DDicts Hashset internals *
130690 + *************************************/
130692 +#define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4
130693 +#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3   /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float.
130694 +                                                     * Currently, that means a 0.75 load factor.
130695 +                                                     * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded
130696 +                                                     * the load factor of the ddict hash set.
130697 +                                                     */
130699 +#define DDICT_HASHSET_TABLE_BASE_SIZE 64
130700 +#define DDICT_HASHSET_RESIZE_FACTOR 2
130702 +/* Hash function to determine starting position of dict insertion within the table
130703 + * Returns an index between [0, hashSet->ddictPtrTableSize]
130704 + */
130705 +static size_t ZSTD_DDictHashSet_getIndex(const ZSTD_DDictHashSet* hashSet, U32 dictID) {
130706 +    const U64 hash = xxh64(&dictID, sizeof(U32), 0);
130707 +    /* DDict ptr table size is a multiple of 2, use size - 1 as mask to get index within [0, hashSet->ddictPtrTableSize) */
130708 +    return hash & (hashSet->ddictPtrTableSize - 1);
130711 +/* Adds DDict to a hashset without resizing it.
130712 + * If inserting a DDict with a dictID that already exists in the set, replaces the one in the set.
130713 + * Returns 0 if successful, or a zstd error code if something went wrong.
130714 + */
130715 +static size_t ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict) {
130716 +    const U32 dictID = ZSTD_getDictID_fromDDict(ddict);
130717 +    size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
130718 +    const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
130719 +    RETURN_ERROR_IF(hashSet->ddictPtrCount == hashSet->ddictPtrTableSize, GENERIC, "Hash set is full!");
130720 +    DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
130721 +    while (hashSet->ddictPtrTable[idx] != NULL) {
130722 +        /* Replace existing ddict if inserting ddict with same dictID */
130723 +        if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) {
130724 +            DEBUGLOG(4, "DictID already exists, replacing rather than adding");
130725 +            hashSet->ddictPtrTable[idx] = ddict;
130726 +            return 0;
130727 +        }
130728 +        idx &= idxRangeMask;
130729 +        idx++;
130730 +    }
130731 +    DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
130732 +    hashSet->ddictPtrTable[idx] = ddict;
130733 +    hashSet->ddictPtrCount++;
130734 +    return 0;
130737 +/* Expands hash table by factor of DDICT_HASHSET_RESIZE_FACTOR and
130738 + * rehashes all values, allocates new table, frees old table.
130739 + * Returns 0 on success, otherwise a zstd error code.
130740 + */
130741 +static size_t ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
130742 +    size_t newTableSize = hashSet->ddictPtrTableSize * DDICT_HASHSET_RESIZE_FACTOR;
130743 +    const ZSTD_DDict** newTable = (const ZSTD_DDict**)ZSTD_customCalloc(sizeof(ZSTD_DDict*) * newTableSize, customMem);
130744 +    const ZSTD_DDict** oldTable = hashSet->ddictPtrTable;
130745 +    size_t oldTableSize = hashSet->ddictPtrTableSize;
130746 +    size_t i;
130748 +    DEBUGLOG(4, "Expanding DDict hash table! Old size: %zu new size: %zu", oldTableSize, newTableSize);
130749 +    RETURN_ERROR_IF(!newTable, memory_allocation, "Expanded hashset allocation failed!");
130750 +    hashSet->ddictPtrTable = newTable;
130751 +    hashSet->ddictPtrTableSize = newTableSize;
130752 +    hashSet->ddictPtrCount = 0;
130753 +    for (i = 0; i < oldTableSize; ++i) {
130754 +        if (oldTable[i] != NULL) {
130755 +            FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, oldTable[i]), "");
130756 +        }
130757 +    }
130758 +    ZSTD_customFree((void*)oldTable, customMem);
130759 +    DEBUGLOG(4, "Finished re-hash");
130760 +    return 0;
130763 +/* Fetches a DDict with the given dictID
130764 + * Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL.
130765 + */
130766 +static const ZSTD_DDict* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, U32 dictID) {
130767 +    size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
130768 +    const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
130769 +    DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
130770 +    for (;;) {
130771 +        size_t currDictID = ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]);
130772 +        if (currDictID == dictID || currDictID == 0) {
130773 +            /* currDictID == 0 implies a NULL ddict entry */
130774 +            break;
130775 +        } else {
130776 +            idx &= idxRangeMask;    /* Goes to start of table when we reach the end */
130777 +            idx++;
130778 +        }
130779 +    }
130780 +    DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
130781 +    return hashSet->ddictPtrTable[idx];
130784 +/* Allocates space for and returns a ddict hash set
130785 + * The hash set's ZSTD_DDict* table has all values automatically set to NULL to begin with.
130786 + * Returns NULL if allocation failed.
130787 + */
130788 +static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) {
130789 +    ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem);
130790 +    DEBUGLOG(4, "Allocating new hash set");
130791 +    ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem);
130792 +    ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE;
130793 +    ret->ddictPtrCount = 0;
130794 +    if (!ret || !ret->ddictPtrTable) {
130795 +        return NULL;
130796 +    }
130797 +    return ret;
130800 +/* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself.
130801 + * Note: The ZSTD_DDict* within the table are NOT freed.
130802 + */
130803 +static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
130804 +    DEBUGLOG(4, "Freeing ddict hash set");
130805 +    if (hashSet && hashSet->ddictPtrTable) {
130806 +        ZSTD_customFree((void*)hashSet->ddictPtrTable, customMem);
130807 +    }
130808 +    if (hashSet) {
130809 +        ZSTD_customFree(hashSet, customMem);
130810 +    }
130813 +/* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set.
130814 + * Returns 0 on success, or a ZSTD error.
130815 + */
130816 +static size_t ZSTD_DDictHashSet_addDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict, ZSTD_customMem customMem) {
130817 +    DEBUGLOG(4, "Adding dict ID: %u to hashset with - Count: %zu Tablesize: %zu", ZSTD_getDictID_fromDDict(ddict), hashSet->ddictPtrCount, hashSet->ddictPtrTableSize);
130818 +    if (hashSet->ddictPtrCount * DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT / hashSet->ddictPtrTableSize * DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT != 0) {
130819 +        FORWARD_IF_ERROR(ZSTD_DDictHashSet_expand(hashSet, customMem), "");
130820 +    }
130821 +    FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, ddict), "");
130822 +    return 0;
130825 +/*-*************************************************************
130826 +*   Context management
130827 +***************************************************************/
130828 +size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)
130830 +    if (dctx==NULL) return 0;   /* support sizeof NULL */
130831 +    return sizeof(*dctx)
130832 +           + ZSTD_sizeof_DDict(dctx->ddictLocal)
130833 +           + dctx->inBuffSize + dctx->outBuffSize;
130836 +size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
130839 +static size_t ZSTD_startingInputLength(ZSTD_format_e format)
130841 +    size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format);
130842 +    /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
130843 +    assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
130844 +    return startingInputLength;
130847 +static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx)
130849 +    assert(dctx->streamStage == zdss_init);
130850 +    dctx->format = ZSTD_f_zstd1;
130851 +    dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
130852 +    dctx->outBufferMode = ZSTD_bm_buffered;
130853 +    dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum;
130854 +    dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict;
130857 +static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
130859 +    dctx->staticSize  = 0;
130860 +    dctx->ddict       = NULL;
130861 +    dctx->ddictLocal  = NULL;
130862 +    dctx->dictEnd     = NULL;
130863 +    dctx->ddictIsCold = 0;
130864 +    dctx->dictUses = ZSTD_dont_use;
130865 +    dctx->inBuff      = NULL;
130866 +    dctx->inBuffSize  = 0;
130867 +    dctx->outBuffSize = 0;
130868 +    dctx->streamStage = zdss_init;
130869 +    dctx->legacyContext = NULL;
130870 +    dctx->previousLegacyVersion = 0;
130871 +    dctx->noForwardProgress = 0;
130872 +    dctx->oversizedDuration = 0;
130873 +    dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
130874 +    dctx->ddictSet = NULL;
130875 +    ZSTD_DCtx_resetParameters(dctx);
130876 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
130877 +    dctx->dictContentEndForFuzzing = NULL;
130878 +#endif
130881 +ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
130883 +    ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace;
130885 +    if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
130886 +    if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL;  /* minimum size */
130888 +    ZSTD_initDCtx_internal(dctx);
130889 +    dctx->staticSize = workspaceSize;
130890 +    dctx->inBuff = (char*)(dctx+1);
130891 +    return dctx;
130894 +ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
130896 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
130898 +    {   ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem);
130899 +        if (!dctx) return NULL;
130900 +        dctx->customMem = customMem;
130901 +        ZSTD_initDCtx_internal(dctx);
130902 +        return dctx;
130903 +    }
130906 +ZSTD_DCtx* ZSTD_createDCtx(void)
130908 +    DEBUGLOG(3, "ZSTD_createDCtx");
130909 +    return ZSTD_createDCtx_advanced(ZSTD_defaultCMem);
130912 +static void ZSTD_clearDict(ZSTD_DCtx* dctx)
130914 +    ZSTD_freeDDict(dctx->ddictLocal);
130915 +    dctx->ddictLocal = NULL;
130916 +    dctx->ddict = NULL;
130917 +    dctx->dictUses = ZSTD_dont_use;
130920 +size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
130922 +    if (dctx==NULL) return 0;   /* support free on NULL */
130923 +    RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx");
130924 +    {   ZSTD_customMem const cMem = dctx->customMem;
130925 +        ZSTD_clearDict(dctx);
130926 +        ZSTD_customFree(dctx->inBuff, cMem);
130927 +        dctx->inBuff = NULL;
130928 +        if (dctx->ddictSet) {
130929 +            ZSTD_freeDDictHashSet(dctx->ddictSet, cMem);
130930 +            dctx->ddictSet = NULL;
130931 +        }
130932 +        ZSTD_customFree(dctx, cMem);
130933 +        return 0;
130934 +    }
130937 +/* no longer useful */
130938 +void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
130940 +    size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx);
130941 +    ZSTD_memcpy(dstDCtx, srcDCtx, toCopy);  /* no need to copy workspace */
130944 +/* Given a dctx with a digested frame params, re-selects the correct ZSTD_DDict based on
130945 + * the requested dict ID from the frame. If there exists a reference to the correct ZSTD_DDict, then
130946 + * accordingly sets the ddict to be used to decompress the frame.
130948 + * If no DDict is found, then no action is taken, and the ZSTD_DCtx::ddict remains as-is.
130950 + * ZSTD_d_refMultipleDDicts must be enabled for this function to be called.
130951 + */
130952 +static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx* dctx) {
130953 +    assert(dctx->refMultipleDDicts && dctx->ddictSet);
130954 +    DEBUGLOG(4, "Adjusting DDict based on requested dict ID from frame");
130955 +    if (dctx->ddict) {
130956 +        const ZSTD_DDict* frameDDict = ZSTD_DDictHashSet_getDDict(dctx->ddictSet, dctx->fParams.dictID);
130957 +        if (frameDDict) {
130958 +            DEBUGLOG(4, "DDict found!");
130959 +            ZSTD_clearDict(dctx);
130960 +            dctx->dictID = dctx->fParams.dictID;
130961 +            dctx->ddict = frameDDict;
130962 +            dctx->dictUses = ZSTD_use_indefinitely;
130963 +        }
130964 +    }
130968 +/*-*************************************************************
130969 + *   Frame header decoding
130970 + ***************************************************************/
130972 +/*! ZSTD_isFrame() :
130973 + *  Tells if the content of `buffer` starts with a valid Frame Identifier.
130974 + *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
130975 + *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
130976 + *  Note 3 : Skippable Frame Identifiers are considered valid. */
130977 +unsigned ZSTD_isFrame(const void* buffer, size_t size)
130979 +    if (size < ZSTD_FRAMEIDSIZE) return 0;
130980 +    {   U32 const magic = MEM_readLE32(buffer);
130981 +        if (magic == ZSTD_MAGICNUMBER) return 1;
130982 +        if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
130983 +    }
130984 +    return 0;
130987 +/** ZSTD_frameHeaderSize_internal() :
130988 + *  srcSize must be large enough to reach header size fields.
130989 + *  note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.
130990 + * @return : size of the Frame Header
130991 + *           or an error code, which can be tested with ZSTD_isError() */
130992 +static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
130994 +    size_t const minInputSize = ZSTD_startingInputLength(format);
130995 +    RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong, "");
130997 +    {   BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
130998 +        U32 const dictID= fhd & 3;
130999 +        U32 const singleSegment = (fhd >> 5) & 1;
131000 +        U32 const fcsId = fhd >> 6;
131001 +        return minInputSize + !singleSegment
131002 +             + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]
131003 +             + (singleSegment && !fcsId);
131004 +    }
131007 +/** ZSTD_frameHeaderSize() :
131008 + *  srcSize must be >= ZSTD_frameHeaderSize_prefix.
131009 + * @return : size of the Frame Header,
131010 + *           or an error code (if srcSize is too small) */
131011 +size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
131013 +    return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);
131017 +/** ZSTD_getFrameHeader_advanced() :
131018 + *  decode Frame Header, or require larger `srcSize`.
131019 + *  note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
131020 + * @return : 0, `zfhPtr` is correctly filled,
131021 + *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
131022 + *           or an error code, which can be tested using ZSTD_isError() */
131023 +size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
131025 +    const BYTE* ip = (const BYTE*)src;
131026 +    size_t const minInputSize = ZSTD_startingInputLength(format);
131028 +    ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr));   /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
131029 +    if (srcSize < minInputSize) return minInputSize;
131030 +    RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
131032 +    if ( (format != ZSTD_f_zstd1_magicless)
131033 +      && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
131034 +        if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
131035 +            /* skippable frame */
131036 +            if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
131037 +                return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */
131038 +            ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr));
131039 +            zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
131040 +            zfhPtr->frameType = ZSTD_skippableFrame;
131041 +            return 0;
131042 +        }
131043 +        RETURN_ERROR(prefix_unknown, "");
131044 +    }
131046 +    /* ensure there is enough `srcSize` to fully read/decode frame header */
131047 +    {   size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format);
131048 +        if (srcSize < fhsize) return fhsize;
131049 +        zfhPtr->headerSize = (U32)fhsize;
131050 +    }
131052 +    {   BYTE const fhdByte = ip[minInputSize-1];
131053 +        size_t pos = minInputSize;
131054 +        U32 const dictIDSizeCode = fhdByte&3;
131055 +        U32 const checksumFlag = (fhdByte>>2)&1;
131056 +        U32 const singleSegment = (fhdByte>>5)&1;
131057 +        U32 const fcsID = fhdByte>>6;
131058 +        U64 windowSize = 0;
131059 +        U32 dictID = 0;
131060 +        U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
131061 +        RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported,
131062 +                        "reserved bits, must be zero");
131064 +        if (!singleSegment) {
131065 +            BYTE const wlByte = ip[pos++];
131066 +            U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
131067 +            RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge, "");
131068 +            windowSize = (1ULL << windowLog);
131069 +            windowSize += (windowSize >> 3) * (wlByte&7);
131070 +        }
131071 +        switch(dictIDSizeCode)
131072 +        {
131073 +            default: assert(0);  /* impossible */
131074 +            case 0 : break;
131075 +            case 1 : dictID = ip[pos]; pos++; break;
131076 +            case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
131077 +            case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;
131078 +        }
131079 +        switch(fcsID)
131080 +        {
131081 +            default: assert(0);  /* impossible */
131082 +            case 0 : if (singleSegment) frameContentSize = ip[pos]; break;
131083 +            case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
131084 +            case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
131085 +            case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
131086 +        }
131087 +        if (singleSegment) windowSize = frameContentSize;
131089 +        zfhPtr->frameType = ZSTD_frame;
131090 +        zfhPtr->frameContentSize = frameContentSize;
131091 +        zfhPtr->windowSize = windowSize;
131092 +        zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
131093 +        zfhPtr->dictID = dictID;
131094 +        zfhPtr->checksumFlag = checksumFlag;
131095 +    }
131096 +    return 0;
131099 +/** ZSTD_getFrameHeader() :
131100 + *  decode Frame Header, or require larger `srcSize`.
131101 + *  note : this function does not consume input, it only reads it.
131102 + * @return : 0, `zfhPtr` is correctly filled,
131103 + *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
131104 + *           or an error code, which can be tested using ZSTD_isError() */
131105 +size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
131107 +    return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
131111 +/** ZSTD_getFrameContentSize() :
131112 + *  compatible with legacy mode
131113 + * @return : decompressed size of the single frame pointed to be `src` if known, otherwise
131114 + *         - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
131115 + *         - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
131116 +unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
131118 +    {   ZSTD_frameHeader zfh;
131119 +        if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
131120 +            return ZSTD_CONTENTSIZE_ERROR;
131121 +        if (zfh.frameType == ZSTD_skippableFrame) {
131122 +            return 0;
131123 +        } else {
131124 +            return zfh.frameContentSize;
131125 +    }   }
131128 +static size_t readSkippableFrameSize(void const* src, size_t srcSize)
131130 +    size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;
131131 +    U32 sizeU32;
131133 +    RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, "");
131135 +    sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
131136 +    RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
131137 +                    frameParameter_unsupported, "");
131138 +    {
131139 +        size_t const skippableSize = skippableHeaderSize + sizeU32;
131140 +        RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, "");
131141 +        return skippableSize;
131142 +    }
131145 +/** ZSTD_findDecompressedSize() :
131146 + *  compatible with legacy mode
131147 + *  `srcSize` must be the exact length of some number of ZSTD compressed and/or
131148 + *      skippable frames
131149 + *  @return : decompressed size of the frames contained */
131150 +unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
131152 +    unsigned long long totalDstSize = 0;
131154 +    while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) {
131155 +        U32 const magicNumber = MEM_readLE32(src);
131157 +        if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
131158 +            size_t const skippableSize = readSkippableFrameSize(src, srcSize);
131159 +            if (ZSTD_isError(skippableSize)) {
131160 +                return ZSTD_CONTENTSIZE_ERROR;
131161 +            }
131162 +            assert(skippableSize <= srcSize);
131164 +            src = (const BYTE *)src + skippableSize;
131165 +            srcSize -= skippableSize;
131166 +            continue;
131167 +        }
131169 +        {   unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
131170 +            if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;
131172 +            /* check for overflow */
131173 +            if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;
131174 +            totalDstSize += ret;
131175 +        }
131176 +        {   size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
131177 +            if (ZSTD_isError(frameSrcSize)) {
131178 +                return ZSTD_CONTENTSIZE_ERROR;
131179 +            }
131181 +            src = (const BYTE *)src + frameSrcSize;
131182 +            srcSize -= frameSrcSize;
131183 +        }
131184 +    }  /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
131186 +    if (srcSize) return ZSTD_CONTENTSIZE_ERROR;
131188 +    return totalDstSize;
131191 +/** ZSTD_getDecompressedSize() :
131192 + *  compatible with legacy mode
131193 + * @return : decompressed size if known, 0 otherwise
131194 +             note : 0 can mean any of the following :
131195 +                   - frame content is empty
131196 +                   - decompressed size field is not present in frame header
131197 +                   - frame header unknown / not supported
131198 +                   - frame header not complete (`srcSize` too small) */
131199 +unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
131201 +    unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
131202 +    ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN);
131203 +    return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret;
131207 +/** ZSTD_decodeFrameHeader() :
131208 + * `headerSize` must be the size provided by ZSTD_frameHeaderSize().
131209 + * If multiple DDict references are enabled, also will choose the correct DDict to use.
131210 + * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
131211 +static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
131213 +    size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
131214 +    if (ZSTD_isError(result)) return result;    /* invalid header */
131215 +    RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small");
131217 +    /* Reference DDict requested by frame if dctx references multiple ddicts */
131218 +    if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts && dctx->ddictSet) {
131219 +        ZSTD_DCtx_selectFrameDDict(dctx);
131220 +    }
131222 +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
131223 +    /* Skip the dictID check in fuzzing mode, because it makes the search
131224 +     * harder.
131225 +     */
131226 +    RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),
131227 +                    dictionary_wrong, "");
131228 +#endif
131229 +    dctx->validateChecksum = (dctx->fParams.checksumFlag && !dctx->forceIgnoreChecksum) ? 1 : 0;
131230 +    if (dctx->validateChecksum) xxh64_reset(&dctx->xxhState, 0);
131231 +    dctx->processedCSize += headerSize;
131232 +    return 0;
131235 +static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret)
131237 +    ZSTD_frameSizeInfo frameSizeInfo;
131238 +    frameSizeInfo.compressedSize = ret;
131239 +    frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
131240 +    return frameSizeInfo;
131243 +static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)
131245 +    ZSTD_frameSizeInfo frameSizeInfo;
131246 +    ZSTD_memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
131249 +    if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
131250 +        && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
131251 +        frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize);
131252 +        assert(ZSTD_isError(frameSizeInfo.compressedSize) ||
131253 +               frameSizeInfo.compressedSize <= srcSize);
131254 +        return frameSizeInfo;
131255 +    } else {
131256 +        const BYTE* ip = (const BYTE*)src;
131257 +        const BYTE* const ipstart = ip;
131258 +        size_t remainingSize = srcSize;
131259 +        size_t nbBlocks = 0;
131260 +        ZSTD_frameHeader zfh;
131262 +        /* Extract Frame Header */
131263 +        {   size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
131264 +            if (ZSTD_isError(ret))
131265 +                return ZSTD_errorFrameSizeInfo(ret);
131266 +            if (ret > 0)
131267 +                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
131268 +        }
131270 +        ip += zfh.headerSize;
131271 +        remainingSize -= zfh.headerSize;
131273 +        /* Iterate over each block */
131274 +        while (1) {
131275 +            blockProperties_t blockProperties;
131276 +            size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
131277 +            if (ZSTD_isError(cBlockSize))
131278 +                return ZSTD_errorFrameSizeInfo(cBlockSize);
131280 +            if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
131281 +                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
131283 +            ip += ZSTD_blockHeaderSize + cBlockSize;
131284 +            remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
131285 +            nbBlocks++;
131287 +            if (blockProperties.lastBlock) break;
131288 +        }
131290 +        /* Final frame content checksum */
131291 +        if (zfh.checksumFlag) {
131292 +            if (remainingSize < 4)
131293 +                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
131294 +            ip += 4;
131295 +        }
131297 +        frameSizeInfo.compressedSize = (size_t)(ip - ipstart);
131298 +        frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN)
131299 +                                        ? zfh.frameContentSize
131300 +                                        : nbBlocks * zfh.blockSizeMax;
131301 +        return frameSizeInfo;
131302 +    }
131305 +/** ZSTD_findFrameCompressedSize() :
131306 + *  compatible with legacy mode
131307 + *  `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
131308 + *  `srcSize` must be at least as large as the frame contained
131309 + *  @return : the compressed size of the frame starting at `src` */
131310 +size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
131312 +    ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
131313 +    return frameSizeInfo.compressedSize;
131316 +/** ZSTD_decompressBound() :
131317 + *  compatible with legacy mode
131318 + *  `src` must point to the start of a ZSTD frame or a skippeable frame
131319 + *  `srcSize` must be at least as large as the frame contained
131320 + *  @return : the maximum decompressed size of the compressed source
131321 + */
131322 +unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
131324 +    unsigned long long bound = 0;
131325 +    /* Iterate over each frame */
131326 +    while (srcSize > 0) {
131327 +        ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
131328 +        size_t const compressedSize = frameSizeInfo.compressedSize;
131329 +        unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
131330 +        if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
131331 +            return ZSTD_CONTENTSIZE_ERROR;
131332 +        assert(srcSize >= compressedSize);
131333 +        src = (const BYTE*)src + compressedSize;
131334 +        srcSize -= compressedSize;
131335 +        bound += decompressedBound;
131336 +    }
131337 +    return bound;
131341 +/*-*************************************************************
131342 + *   Frame decoding
131343 + ***************************************************************/
131345 +/** ZSTD_insertBlock() :
131346 + *  insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
131347 +size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
131349 +    DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize);
131350 +    ZSTD_checkContinuity(dctx, blockStart, blockSize);
131351 +    dctx->previousDstEnd = (const char*)blockStart + blockSize;
131352 +    return blockSize;
131356 +static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
131357 +                          const void* src, size_t srcSize)
131359 +    DEBUGLOG(5, "ZSTD_copyRawBlock");
131360 +    RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, "");
131361 +    if (dst == NULL) {
131362 +        if (srcSize == 0) return 0;
131363 +        RETURN_ERROR(dstBuffer_null, "");
131364 +    }
131365 +    ZSTD_memcpy(dst, src, srcSize);
131366 +    return srcSize;
131369 +static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
131370 +                               BYTE b,
131371 +                               size_t regenSize)
131373 +    RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, "");
131374 +    if (dst == NULL) {
131375 +        if (regenSize == 0) return 0;
131376 +        RETURN_ERROR(dstBuffer_null, "");
131377 +    }
131378 +    ZSTD_memset(dst, b, regenSize);
131379 +    return regenSize;
131382 +static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming)
131384 +    (void)dctx;
131385 +    (void)uncompressedSize;
131386 +    (void)compressedSize;
131387 +    (void)streaming;
131391 +/*! ZSTD_decompressFrame() :
131392 + * @dctx must be properly initialized
131393 + *  will update *srcPtr and *srcSizePtr,
131394 + *  to make *srcPtr progress by one frame. */
131395 +static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
131396 +                                   void* dst, size_t dstCapacity,
131397 +                             const void** srcPtr, size_t *srcSizePtr)
131399 +    const BYTE* const istart = (const BYTE*)(*srcPtr);
131400 +    const BYTE* ip = istart;
131401 +    BYTE* const ostart = (BYTE*)dst;
131402 +    BYTE* const oend = dstCapacity != 0 ? ostart + dstCapacity : ostart;
131403 +    BYTE* op = ostart;
131404 +    size_t remainingSrcSize = *srcSizePtr;
131406 +    DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr);
131408 +    /* check */
131409 +    RETURN_ERROR_IF(
131410 +        remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize,
131411 +        srcSize_wrong, "");
131413 +    /* Frame Header */
131414 +    {   size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal(
131415 +                ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format);
131416 +        if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
131417 +        RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
131418 +                        srcSize_wrong, "");
131419 +        FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) , "");
131420 +        ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
131421 +    }
131423 +    /* Loop on each block */
131424 +    while (1) {
131425 +        size_t decodedSize;
131426 +        blockProperties_t blockProperties;
131427 +        size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties);
131428 +        if (ZSTD_isError(cBlockSize)) return cBlockSize;
131430 +        ip += ZSTD_blockHeaderSize;
131431 +        remainingSrcSize -= ZSTD_blockHeaderSize;
131432 +        RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, "");
131434 +        switch(blockProperties.blockType)
131435 +        {
131436 +        case bt_compressed:
131437 +            decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1);
131438 +            break;
131439 +        case bt_raw :
131440 +            decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize);
131441 +            break;
131442 +        case bt_rle :
131443 +            decodedSize = ZSTD_setRleBlock(op, (size_t)(oend-op), *ip, blockProperties.origSize);
131444 +            break;
131445 +        case bt_reserved :
131446 +        default:
131447 +            RETURN_ERROR(corruption_detected, "invalid block type");
131448 +        }
131450 +        if (ZSTD_isError(decodedSize)) return decodedSize;
131451 +        if (dctx->validateChecksum)
131452 +            xxh64_update(&dctx->xxhState, op, decodedSize);
131453 +        if (decodedSize != 0)
131454 +            op += decodedSize;
131455 +        assert(ip != NULL);
131456 +        ip += cBlockSize;
131457 +        remainingSrcSize -= cBlockSize;
131458 +        if (blockProperties.lastBlock) break;
131459 +    }
131461 +    if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
131462 +        RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize,
131463 +                        corruption_detected, "");
131464 +    }
131465 +    if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
131466 +        RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong, "");
131467 +        if (!dctx->forceIgnoreChecksum) {
131468 +            U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState);
131469 +            U32 checkRead;
131470 +            checkRead = MEM_readLE32(ip);
131471 +            RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, "");
131472 +        }
131473 +        ip += 4;
131474 +        remainingSrcSize -= 4;
131475 +    }
131476 +    ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0);
131477 +    /* Allow caller to get size read */
131478 +    *srcPtr = ip;
131479 +    *srcSizePtr = remainingSrcSize;
131480 +    return (size_t)(op-ostart);
131483 +static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
131484 +                                        void* dst, size_t dstCapacity,
131485 +                                  const void* src, size_t srcSize,
131486 +                                  const void* dict, size_t dictSize,
131487 +                                  const ZSTD_DDict* ddict)
131489 +    void* const dststart = dst;
131490 +    int moreThan1Frame = 0;
131492 +    DEBUGLOG(5, "ZSTD_decompressMultiFrame");
131493 +    assert(dict==NULL || ddict==NULL);  /* either dict or ddict set, not both */
131495 +    if (ddict) {
131496 +        dict = ZSTD_DDict_dictContent(ddict);
131497 +        dictSize = ZSTD_DDict_dictSize(ddict);
131498 +    }
131500 +    while (srcSize >= ZSTD_startingInputLength(dctx->format)) {
131503 +        {   U32 const magicNumber = MEM_readLE32(src);
131504 +            DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
131505 +                        (unsigned)magicNumber, ZSTD_MAGICNUMBER);
131506 +            if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
131507 +                size_t const skippableSize = readSkippableFrameSize(src, srcSize);
131508 +                FORWARD_IF_ERROR(skippableSize, "readSkippableFrameSize failed");
131509 +                assert(skippableSize <= srcSize);
131511 +                src = (const BYTE *)src + skippableSize;
131512 +                srcSize -= skippableSize;
131513 +                continue;
131514 +        }   }
131516 +        if (ddict) {
131517 +            /* we were called from ZSTD_decompress_usingDDict */
131518 +            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict), "");
131519 +        } else {
131520 +            /* this will initialize correctly with no dict if dict == NULL, so
131521 +             * use this in all cases but ddict */
131522 +            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize), "");
131523 +        }
131524 +        ZSTD_checkContinuity(dctx, dst, dstCapacity);
131526 +        {   const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
131527 +                                                    &src, &srcSize);
131528 +            RETURN_ERROR_IF(
131529 +                (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
131530 +             && (moreThan1Frame==1),
131531 +                srcSize_wrong,
131532 +                "At least one frame successfully completed, "
131533 +                "but following bytes are garbage: "
131534 +                "it's more likely to be a srcSize error, "
131535 +                "specifying more input bytes than size of frame(s). "
131536 +                "Note: one could be unlucky, it might be a corruption error instead, "
131537 +                "happening right at the place where we expect zstd magic bytes. "
131538 +                "But this is _much_ less likely than a srcSize field error.");
131539 +            if (ZSTD_isError(res)) return res;
131540 +            assert(res <= dstCapacity);
131541 +            if (res != 0)
131542 +                dst = (BYTE*)dst + res;
131543 +            dstCapacity -= res;
131544 +        }
131545 +        moreThan1Frame = 1;
131546 +    }  /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
131548 +    RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed");
131550 +    return (size_t)((BYTE*)dst - (BYTE*)dststart);
131553 +size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
131554 +                                 void* dst, size_t dstCapacity,
131555 +                           const void* src, size_t srcSize,
131556 +                           const void* dict, size_t dictSize)
131558 +    return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
131562 +static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx)
131564 +    switch (dctx->dictUses) {
131565 +    default:
131566 +        assert(0 /* Impossible */);
131567 +        /* fall-through */
131568 +    case ZSTD_dont_use:
131569 +        ZSTD_clearDict(dctx);
131570 +        return NULL;
131571 +    case ZSTD_use_indefinitely:
131572 +        return dctx->ddict;
131573 +    case ZSTD_use_once:
131574 +        dctx->dictUses = ZSTD_dont_use;
131575 +        return dctx->ddict;
131576 +    }
131579 +size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
131581 +    return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx));
131585 +size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
131587 +#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
131588 +    size_t regenSize;
131589 +    ZSTD_DCtx* const dctx = ZSTD_createDCtx();
131590 +    RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!");
131591 +    regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
131592 +    ZSTD_freeDCtx(dctx);
131593 +    return regenSize;
131594 +#else   /* stack mode */
131595 +    ZSTD_DCtx dctx;
131596 +    ZSTD_initDCtx_internal(&dctx);
131597 +    return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
131598 +#endif
131602 +/*-**************************************
131603 +*   Advanced Streaming Decompression API
131604 +*   Bufferless and synchronous
131605 +****************************************/
131606 +size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }
131609 + * Similar to ZSTD_nextSrcSizeToDecompress(), but when when a block input can be streamed,
131610 + * we allow taking a partial block as the input. Currently only raw uncompressed blocks can
131611 + * be streamed.
131613 + * For blocks that can be streamed, this allows us to reduce the latency until we produce
131614 + * output, and avoid copying the input.
131616 + * @param inputSize - The total amount of input that the caller currently has.
131617 + */
131618 +static size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t inputSize) {
131619 +    if (!(dctx->stage == ZSTDds_decompressBlock || dctx->stage == ZSTDds_decompressLastBlock))
131620 +        return dctx->expected;
131621 +    if (dctx->bType != bt_raw)
131622 +        return dctx->expected;
131623 +    return MIN(MAX(inputSize, 1), dctx->expected);
131626 +ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
131627 +    switch(dctx->stage)
131628 +    {
131629 +    default:   /* should not happen */
131630 +        assert(0);
131631 +    case ZSTDds_getFrameHeaderSize:
131632 +    case ZSTDds_decodeFrameHeader:
131633 +        return ZSTDnit_frameHeader;
131634 +    case ZSTDds_decodeBlockHeader:
131635 +        return ZSTDnit_blockHeader;
131636 +    case ZSTDds_decompressBlock:
131637 +        return ZSTDnit_block;
131638 +    case ZSTDds_decompressLastBlock:
131639 +        return ZSTDnit_lastBlock;
131640 +    case ZSTDds_checkChecksum:
131641 +        return ZSTDnit_checksum;
131642 +    case ZSTDds_decodeSkippableHeader:
131643 +    case ZSTDds_skipFrame:
131644 +        return ZSTDnit_skippableFrame;
131645 +    }
131648 +static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; }
131650 +/** ZSTD_decompressContinue() :
131651 + *  srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress())
131652 + *  @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
131653 + *            or an error code, which can be tested using ZSTD_isError() */
131654 +size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
131656 +    DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
131657 +    /* Sanity check */
131658 +    RETURN_ERROR_IF(srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize), srcSize_wrong, "not allowed");
131659 +    ZSTD_checkContinuity(dctx, dst, dstCapacity);
131661 +    dctx->processedCSize += srcSize;
131663 +    switch (dctx->stage)
131664 +    {
131665 +    case ZSTDds_getFrameHeaderSize :
131666 +        assert(src != NULL);
131667 +        if (dctx->format == ZSTD_f_zstd1) {  /* allows header */
131668 +            assert(srcSize >= ZSTD_FRAMEIDSIZE);  /* to read skippable magic number */
131669 +            if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {        /* skippable frame */
131670 +                ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
131671 +                dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize;  /* remaining to load to get full skippable frame header */
131672 +                dctx->stage = ZSTDds_decodeSkippableHeader;
131673 +                return 0;
131674 +        }   }
131675 +        dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format);
131676 +        if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize;
131677 +        ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
131678 +        dctx->expected = dctx->headerSize - srcSize;
131679 +        dctx->stage = ZSTDds_decodeFrameHeader;
131680 +        return 0;
131682 +    case ZSTDds_decodeFrameHeader:
131683 +        assert(src != NULL);
131684 +        ZSTD_memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
131685 +        FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize), "");
131686 +        dctx->expected = ZSTD_blockHeaderSize;
131687 +        dctx->stage = ZSTDds_decodeBlockHeader;
131688 +        return 0;
131690 +    case ZSTDds_decodeBlockHeader:
131691 +        {   blockProperties_t bp;
131692 +            size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
131693 +            if (ZSTD_isError(cBlockSize)) return cBlockSize;
131694 +            RETURN_ERROR_IF(cBlockSize > dctx->fParams.blockSizeMax, corruption_detected, "Block Size Exceeds Maximum");
131695 +            dctx->expected = cBlockSize;
131696 +            dctx->bType = bp.blockType;
131697 +            dctx->rleSize = bp.origSize;
131698 +            if (cBlockSize) {
131699 +                dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
131700 +                return 0;
131701 +            }
131702 +            /* empty block */
131703 +            if (bp.lastBlock) {
131704 +                if (dctx->fParams.checksumFlag) {
131705 +                    dctx->expected = 4;
131706 +                    dctx->stage = ZSTDds_checkChecksum;
131707 +                } else {
131708 +                    dctx->expected = 0; /* end of frame */
131709 +                    dctx->stage = ZSTDds_getFrameHeaderSize;
131710 +                }
131711 +            } else {
131712 +                dctx->expected = ZSTD_blockHeaderSize;  /* jump to next header */
131713 +                dctx->stage = ZSTDds_decodeBlockHeader;
131714 +            }
131715 +            return 0;
131716 +        }
131718 +    case ZSTDds_decompressLastBlock:
131719 +    case ZSTDds_decompressBlock:
131720 +        DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock");
131721 +        {   size_t rSize;
131722 +            switch(dctx->bType)
131723 +            {
131724 +            case bt_compressed:
131725 +                DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
131726 +                rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1);
131727 +                dctx->expected = 0;  /* Streaming not supported */
131728 +                break;
131729 +            case bt_raw :
131730 +                assert(srcSize <= dctx->expected);
131731 +                rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);
131732 +                FORWARD_IF_ERROR(rSize, "ZSTD_copyRawBlock failed");
131733 +                assert(rSize == srcSize);
131734 +                dctx->expected -= rSize;
131735 +                break;
131736 +            case bt_rle :
131737 +                rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize);
131738 +                dctx->expected = 0;  /* Streaming not supported */
131739 +                break;
131740 +            case bt_reserved :   /* should never happen */
131741 +            default:
131742 +                RETURN_ERROR(corruption_detected, "invalid block type");
131743 +            }
131744 +            FORWARD_IF_ERROR(rSize, "");
131745 +            RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum");
131746 +            DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
131747 +            dctx->decodedSize += rSize;
131748 +            if (dctx->validateChecksum) xxh64_update(&dctx->xxhState, dst, rSize);
131749 +            dctx->previousDstEnd = (char*)dst + rSize;
131751 +            /* Stay on the same stage until we are finished streaming the block. */
131752 +            if (dctx->expected > 0) {
131753 +                return rSize;
131754 +            }
131756 +            if (dctx->stage == ZSTDds_decompressLastBlock) {   /* end of frame */
131757 +                DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
131758 +                RETURN_ERROR_IF(
131759 +                    dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
131760 +                 && dctx->decodedSize != dctx->fParams.frameContentSize,
131761 +                    corruption_detected, "");
131762 +                if (dctx->fParams.checksumFlag) {  /* another round for frame checksum */
131763 +                    dctx->expected = 4;
131764 +                    dctx->stage = ZSTDds_checkChecksum;
131765 +                } else {
131766 +                    ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1);
131767 +                    dctx->expected = 0;   /* ends here */
131768 +                    dctx->stage = ZSTDds_getFrameHeaderSize;
131769 +                }
131770 +            } else {
131771 +                dctx->stage = ZSTDds_decodeBlockHeader;
131772 +                dctx->expected = ZSTD_blockHeaderSize;
131773 +            }
131774 +            return rSize;
131775 +        }
131777 +    case ZSTDds_checkChecksum:
131778 +        assert(srcSize == 4);  /* guaranteed by dctx->expected */
131779 +        {
131780 +            if (dctx->validateChecksum) {
131781 +                U32 const h32 = (U32)xxh64_digest(&dctx->xxhState);
131782 +                U32 const check32 = MEM_readLE32(src);
131783 +                DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
131784 +                RETURN_ERROR_IF(check32 != h32, checksum_wrong, "");
131785 +            }
131786 +            ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1);
131787 +            dctx->expected = 0;
131788 +            dctx->stage = ZSTDds_getFrameHeaderSize;
131789 +            return 0;
131790 +        }
131792 +    case ZSTDds_decodeSkippableHeader:
131793 +        assert(src != NULL);
131794 +        assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE);
131795 +        ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize);   /* complete skippable header */
131796 +        dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE);   /* note : dctx->expected can grow seriously large, beyond local buffer size */
131797 +        dctx->stage = ZSTDds_skipFrame;
131798 +        return 0;
131800 +    case ZSTDds_skipFrame:
131801 +        dctx->expected = 0;
131802 +        dctx->stage = ZSTDds_getFrameHeaderSize;
131803 +        return 0;
131805 +    default:
131806 +        assert(0);   /* impossible */
131807 +        RETURN_ERROR(GENERIC, "impossible to reach");   /* some compiler require default to do something */
131808 +    }
131812 +static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
131814 +    dctx->dictEnd = dctx->previousDstEnd;
131815 +    dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
131816 +    dctx->prefixStart = dict;
131817 +    dctx->previousDstEnd = (const char*)dict + dictSize;
131818 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
131819 +    dctx->dictContentBeginForFuzzing = dctx->prefixStart;
131820 +    dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
131821 +#endif
131822 +    return 0;
131825 +/*! ZSTD_loadDEntropy() :
131826 + *  dict : must point at beginning of a valid zstd dictionary.
131827 + * @return : size of entropy tables read */
131828 +size_t
131829 +ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
131830 +                  const void* const dict, size_t const dictSize)
131832 +    const BYTE* dictPtr = (const BYTE*)dict;
131833 +    const BYTE* const dictEnd = dictPtr + dictSize;
131835 +    RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted, "dict is too small");
131836 +    assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY);   /* dict must be valid */
131837 +    dictPtr += 8;   /* skip header = magic + dictID */
131839 +    ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable));
131840 +    ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable));
131841 +    ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE);
131842 +    {   void* const workspace = &entropy->LLTable;   /* use fse tables as temporary workspace; implies fse tables are grouped together */
131843 +        size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable);
131844 +#ifdef HUF_FORCE_DECOMPRESS_X1
131845 +        /* in minimal huffman, we always use X1 variants */
131846 +        size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable,
131847 +                                                dictPtr, dictEnd - dictPtr,
131848 +                                                workspace, workspaceSize);
131849 +#else
131850 +        size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,
131851 +                                                dictPtr, (size_t)(dictEnd - dictPtr),
131852 +                                                workspace, workspaceSize);
131853 +#endif
131854 +        RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, "");
131855 +        dictPtr += hSize;
131856 +    }
131858 +    {   short offcodeNCount[MaxOff+1];
131859 +        unsigned offcodeMaxValue = MaxOff, offcodeLog;
131860 +        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr));
131861 +        RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
131862 +        RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted, "");
131863 +        RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
131864 +        ZSTD_buildFSETable( entropy->OFTable,
131865 +                            offcodeNCount, offcodeMaxValue,
131866 +                            OF_base, OF_bits,
131867 +                            offcodeLog,
131868 +                            entropy->workspace, sizeof(entropy->workspace),
131869 +                            /* bmi2 */0);
131870 +        dictPtr += offcodeHeaderSize;
131871 +    }
131873 +    {   short matchlengthNCount[MaxML+1];
131874 +        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
131875 +        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
131876 +        RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
131877 +        RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted, "");
131878 +        RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
131879 +        ZSTD_buildFSETable( entropy->MLTable,
131880 +                            matchlengthNCount, matchlengthMaxValue,
131881 +                            ML_base, ML_bits,
131882 +                            matchlengthLog,
131883 +                            entropy->workspace, sizeof(entropy->workspace),
131884 +                            /* bmi2 */ 0);
131885 +        dictPtr += matchlengthHeaderSize;
131886 +    }
131888 +    {   short litlengthNCount[MaxLL+1];
131889 +        unsigned litlengthMaxValue = MaxLL, litlengthLog;
131890 +        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
131891 +        RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
131892 +        RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted, "");
131893 +        RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
131894 +        ZSTD_buildFSETable( entropy->LLTable,
131895 +                            litlengthNCount, litlengthMaxValue,
131896 +                            LL_base, LL_bits,
131897 +                            litlengthLog,
131898 +                            entropy->workspace, sizeof(entropy->workspace),
131899 +                            /* bmi2 */ 0);
131900 +        dictPtr += litlengthHeaderSize;
131901 +    }
131903 +    RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
131904 +    {   int i;
131905 +        size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
131906 +        for (i=0; i<3; i++) {
131907 +            U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
131908 +            RETURN_ERROR_IF(rep==0 || rep > dictContentSize,
131909 +                            dictionary_corrupted, "");
131910 +            entropy->rep[i] = rep;
131911 +    }   }
131913 +    return (size_t)(dictPtr - (const BYTE*)dict);
131916 +static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
131918 +    if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize);
131919 +    {   U32 const magic = MEM_readLE32(dict);
131920 +        if (magic != ZSTD_MAGIC_DICTIONARY) {
131921 +            return ZSTD_refDictContent(dctx, dict, dictSize);   /* pure content mode */
131922 +    }   }
131923 +    dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
131925 +    /* load entropy tables */
131926 +    {   size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize);
131927 +        RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted, "");
131928 +        dict = (const char*)dict + eSize;
131929 +        dictSize -= eSize;
131930 +    }
131931 +    dctx->litEntropy = dctx->fseEntropy = 1;
131933 +    /* reference dictionary content */
131934 +    return ZSTD_refDictContent(dctx, dict, dictSize);
131937 +size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
131939 +    assert(dctx != NULL);
131940 +    dctx->expected = ZSTD_startingInputLength(dctx->format);  /* dctx->format must be properly set */
131941 +    dctx->stage = ZSTDds_getFrameHeaderSize;
131942 +    dctx->processedCSize = 0;
131943 +    dctx->decodedSize = 0;
131944 +    dctx->previousDstEnd = NULL;
131945 +    dctx->prefixStart = NULL;
131946 +    dctx->virtualStart = NULL;
131947 +    dctx->dictEnd = NULL;
131948 +    dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */
131949 +    dctx->litEntropy = dctx->fseEntropy = 0;
131950 +    dctx->dictID = 0;
131951 +    dctx->bType = bt_reserved;
131952 +    ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
131953 +    ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue));  /* initial repcodes */
131954 +    dctx->LLTptr = dctx->entropy.LLTable;
131955 +    dctx->MLTptr = dctx->entropy.MLTable;
131956 +    dctx->OFTptr = dctx->entropy.OFTable;
131957 +    dctx->HUFptr = dctx->entropy.hufTable;
131958 +    return 0;
131961 +size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
131963 +    FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
131964 +    if (dict && dictSize)
131965 +        RETURN_ERROR_IF(
131966 +            ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),
131967 +            dictionary_corrupted, "");
131968 +    return 0;
131972 +/* ======   ZSTD_DDict   ====== */
131974 +size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
131976 +    DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict");
131977 +    assert(dctx != NULL);
131978 +    if (ddict) {
131979 +        const char* const dictStart = (const char*)ZSTD_DDict_dictContent(ddict);
131980 +        size_t const dictSize = ZSTD_DDict_dictSize(ddict);
131981 +        const void* const dictEnd = dictStart + dictSize;
131982 +        dctx->ddictIsCold = (dctx->dictEnd != dictEnd);
131983 +        DEBUGLOG(4, "DDict is %s",
131984 +                    dctx->ddictIsCold ? "~cold~" : "hot!");
131985 +    }
131986 +    FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
131987 +    if (ddict) {   /* NULL ddict is equivalent to no dictionary */
131988 +        ZSTD_copyDDictParameters(dctx, ddict);
131989 +    }
131990 +    return 0;
131993 +/*! ZSTD_getDictID_fromDict() :
131994 + *  Provides the dictID stored within dictionary.
131995 + *  if @return == 0, the dictionary is not conformant with Zstandard specification.
131996 + *  It can still be loaded, but as a content-only dictionary. */
131997 +unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
131999 +    if (dictSize < 8) return 0;
132000 +    if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
132001 +    return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
132004 +/*! ZSTD_getDictID_fromFrame() :
132005 + *  Provides the dictID required to decompress frame stored within `src`.
132006 + *  If @return == 0, the dictID could not be decoded.
132007 + *  This could for one of the following reasons :
132008 + *  - The frame does not require a dictionary (most common case).
132009 + *  - The frame was built with dictID intentionally removed.
132010 + *    Needed dictionary is a hidden information.
132011 + *    Note : this use case also happens when using a non-conformant dictionary.
132012 + *  - `srcSize` is too small, and as a result, frame header could not be decoded.
132013 + *    Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
132014 + *  - This is not a Zstandard frame.
132015 + *  When identifying the exact failure cause, it's possible to use
132016 + *  ZSTD_getFrameHeader(), which will provide a more precise error code. */
132017 +unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
132019 +    ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };
132020 +    size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
132021 +    if (ZSTD_isError(hError)) return 0;
132022 +    return zfp.dictID;
132026 +/*! ZSTD_decompress_usingDDict() :
132027 +*   Decompression using a pre-digested Dictionary
132028 +*   Use dictionary without significant overhead. */
132029 +size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
132030 +                                  void* dst, size_t dstCapacity,
132031 +                            const void* src, size_t srcSize,
132032 +                            const ZSTD_DDict* ddict)
132034 +    /* pass content and size in case legacy frames are encountered */
132035 +    return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize,
132036 +                                     NULL, 0,
132037 +                                     ddict);
132041 +/*=====================================
132042 +*   Streaming decompression
132043 +*====================================*/
132045 +ZSTD_DStream* ZSTD_createDStream(void)
132047 +    DEBUGLOG(3, "ZSTD_createDStream");
132048 +    return ZSTD_createDStream_advanced(ZSTD_defaultCMem);
132051 +ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
132053 +    return ZSTD_initStaticDCtx(workspace, workspaceSize);
132056 +ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
132058 +    return ZSTD_createDCtx_advanced(customMem);
132061 +size_t ZSTD_freeDStream(ZSTD_DStream* zds)
132063 +    return ZSTD_freeDCtx(zds);
132067 +/* ***  Initialization  *** */
132069 +size_t ZSTD_DStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }
132070 +size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }
132072 +size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
132073 +                                   const void* dict, size_t dictSize,
132074 +                                         ZSTD_dictLoadMethod_e dictLoadMethod,
132075 +                                         ZSTD_dictContentType_e dictContentType)
132077 +    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
132078 +    ZSTD_clearDict(dctx);
132079 +    if (dict && dictSize != 0) {
132080 +        dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
132081 +        RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation, "NULL pointer!");
132082 +        dctx->ddict = dctx->ddictLocal;
132083 +        dctx->dictUses = ZSTD_use_indefinitely;
132084 +    }
132085 +    return 0;
132088 +size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
132090 +    return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
132093 +size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
132095 +    return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
132098 +size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
132100 +    FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType), "");
132101 +    dctx->dictUses = ZSTD_use_once;
132102 +    return 0;
132105 +size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
132107 +    return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent);
132111 +/* ZSTD_initDStream_usingDict() :
132112 + * return : expected size, aka ZSTD_startingInputLength().
132113 + * this function cannot fail */
132114 +size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
132116 +    DEBUGLOG(4, "ZSTD_initDStream_usingDict");
132117 +    FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) , "");
132118 +    FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) , "");
132119 +    return ZSTD_startingInputLength(zds->format);
132122 +/* note : this variant can't fail */
132123 +size_t ZSTD_initDStream(ZSTD_DStream* zds)
132125 +    DEBUGLOG(4, "ZSTD_initDStream");
132126 +    return ZSTD_initDStream_usingDDict(zds, NULL);
132129 +/* ZSTD_initDStream_usingDDict() :
132130 + * ddict will just be referenced, and must outlive decompression session
132131 + * this function cannot fail */
132132 +size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
132134 +    FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , "");
132135 +    FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , "");
132136 +    return ZSTD_startingInputLength(dctx->format);
132139 +/* ZSTD_resetDStream() :
132140 + * return : expected size, aka ZSTD_startingInputLength().
132141 + * this function cannot fail */
132142 +size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
132144 +    FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), "");
132145 +    return ZSTD_startingInputLength(dctx->format);
132149 +size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
132151 +    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
132152 +    ZSTD_clearDict(dctx);
132153 +    if (ddict) {
132154 +        dctx->ddict = ddict;
132155 +        dctx->dictUses = ZSTD_use_indefinitely;
132156 +        if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts) {
132157 +            if (dctx->ddictSet == NULL) {
132158 +                dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem);
132159 +                if (!dctx->ddictSet) {
132160 +                    RETURN_ERROR(memory_allocation, "Failed to allocate memory for hash set!");
132161 +                }
132162 +            }
132163 +            assert(!dctx->staticSize);  /* Impossible: ddictSet cannot have been allocated if static dctx */
132164 +            FORWARD_IF_ERROR(ZSTD_DDictHashSet_addDDict(dctx->ddictSet, ddict, dctx->customMem), "");
132165 +        }
132166 +    }
132167 +    return 0;
132170 +/* ZSTD_DCtx_setMaxWindowSize() :
132171 + * note : no direct equivalence in ZSTD_DCtx_setParameter,
132172 + * since this version sets windowSize, and the other sets windowLog */
132173 +size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
132175 +    ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);
132176 +    size_t const min = (size_t)1 << bounds.lowerBound;
132177 +    size_t const max = (size_t)1 << bounds.upperBound;
132178 +    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
132179 +    RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound, "");
132180 +    RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound, "");
132181 +    dctx->maxWindowSize = maxWindowSize;
132182 +    return 0;
132185 +size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
132187 +    return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, (int)format);
132190 +ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
132192 +    ZSTD_bounds bounds = { 0, 0, 0 };
132193 +    switch(dParam) {
132194 +        case ZSTD_d_windowLogMax:
132195 +            bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN;
132196 +            bounds.upperBound = ZSTD_WINDOWLOG_MAX;
132197 +            return bounds;
132198 +        case ZSTD_d_format:
132199 +            bounds.lowerBound = (int)ZSTD_f_zstd1;
132200 +            bounds.upperBound = (int)ZSTD_f_zstd1_magicless;
132201 +            ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
132202 +            return bounds;
132203 +        case ZSTD_d_stableOutBuffer:
132204 +            bounds.lowerBound = (int)ZSTD_bm_buffered;
132205 +            bounds.upperBound = (int)ZSTD_bm_stable;
132206 +            return bounds;
132207 +        case ZSTD_d_forceIgnoreChecksum:
132208 +            bounds.lowerBound = (int)ZSTD_d_validateChecksum;
132209 +            bounds.upperBound = (int)ZSTD_d_ignoreChecksum;
132210 +            return bounds;
132211 +        case ZSTD_d_refMultipleDDicts:
132212 +            bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict;
132213 +            bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts;
132214 +            return bounds;
132215 +        default:;
132216 +    }
132217 +    bounds.error = ERROR(parameter_unsupported);
132218 +    return bounds;
132221 +/* ZSTD_dParam_withinBounds:
132222 + * @return 1 if value is within dParam bounds,
132223 + * 0 otherwise */
132224 +static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value)
132226 +    ZSTD_bounds const bounds = ZSTD_dParam_getBounds(dParam);
132227 +    if (ZSTD_isError(bounds.error)) return 0;
132228 +    if (value < bounds.lowerBound) return 0;
132229 +    if (value > bounds.upperBound) return 0;
132230 +    return 1;
132233 +#define CHECK_DBOUNDS(p,v) {                \
132234 +    RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound, ""); \
132237 +size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value)
132239 +    switch (param) {
132240 +        case ZSTD_d_windowLogMax:
132241 +            *value = (int)ZSTD_highbit32((U32)dctx->maxWindowSize);
132242 +            return 0;
132243 +        case ZSTD_d_format:
132244 +            *value = (int)dctx->format;
132245 +            return 0;
132246 +        case ZSTD_d_stableOutBuffer:
132247 +            *value = (int)dctx->outBufferMode;
132248 +            return 0;
132249 +        case ZSTD_d_forceIgnoreChecksum:
132250 +            *value = (int)dctx->forceIgnoreChecksum;
132251 +            return 0;
132252 +        case ZSTD_d_refMultipleDDicts:
132253 +            *value = (int)dctx->refMultipleDDicts;
132254 +            return 0;
132255 +        default:;
132256 +    }
132257 +    RETURN_ERROR(parameter_unsupported, "");
132260 +size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)
132262 +    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
132263 +    switch(dParam) {
132264 +        case ZSTD_d_windowLogMax:
132265 +            if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT;
132266 +            CHECK_DBOUNDS(ZSTD_d_windowLogMax, value);
132267 +            dctx->maxWindowSize = ((size_t)1) << value;
132268 +            return 0;
132269 +        case ZSTD_d_format:
132270 +            CHECK_DBOUNDS(ZSTD_d_format, value);
132271 +            dctx->format = (ZSTD_format_e)value;
132272 +            return 0;
132273 +        case ZSTD_d_stableOutBuffer:
132274 +            CHECK_DBOUNDS(ZSTD_d_stableOutBuffer, value);
132275 +            dctx->outBufferMode = (ZSTD_bufferMode_e)value;
132276 +            return 0;
132277 +        case ZSTD_d_forceIgnoreChecksum:
132278 +            CHECK_DBOUNDS(ZSTD_d_forceIgnoreChecksum, value);
132279 +            dctx->forceIgnoreChecksum = (ZSTD_forceIgnoreChecksum_e)value;
132280 +            return 0;
132281 +        case ZSTD_d_refMultipleDDicts:
132282 +            CHECK_DBOUNDS(ZSTD_d_refMultipleDDicts, value);
132283 +            if (dctx->staticSize != 0) {
132284 +                RETURN_ERROR(parameter_unsupported, "Static dctx does not support multiple DDicts!");
132285 +            }
132286 +            dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value;
132287 +            return 0;
132288 +        default:;
132289 +    }
132290 +    RETURN_ERROR(parameter_unsupported, "");
132293 +size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
132295 +    if ( (reset == ZSTD_reset_session_only)
132296 +      || (reset == ZSTD_reset_session_and_parameters) ) {
132297 +        dctx->streamStage = zdss_init;
132298 +        dctx->noForwardProgress = 0;
132299 +    }
132300 +    if ( (reset == ZSTD_reset_parameters)
132301 +      || (reset == ZSTD_reset_session_and_parameters) ) {
132302 +        RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
132303 +        ZSTD_clearDict(dctx);
132304 +        ZSTD_DCtx_resetParameters(dctx);
132305 +    }
132306 +    return 0;
132310 +size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)
132312 +    return ZSTD_sizeof_DCtx(dctx);
132315 +size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
132317 +    size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
132318 +    unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
132319 +    unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
132320 +    size_t const minRBSize = (size_t) neededSize;
132321 +    RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
132322 +                    frameParameter_windowTooLarge, "");
132323 +    return minRBSize;
132326 +size_t ZSTD_estimateDStreamSize(size_t windowSize)
132328 +    size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
132329 +    size_t const inBuffSize = blockSize;  /* no block can be larger */
132330 +    size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN);
132331 +    return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
132334 +size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
132336 +    U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;   /* note : should be user-selectable, but requires an additional parameter (or a dctx) */
132337 +    ZSTD_frameHeader zfh;
132338 +    size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
132339 +    if (ZSTD_isError(err)) return err;
132340 +    RETURN_ERROR_IF(err>0, srcSize_wrong, "");
132341 +    RETURN_ERROR_IF(zfh.windowSize > windowSizeMax,
132342 +                    frameParameter_windowTooLarge, "");
132343 +    return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
132347 +/* *****   Decompression   ***** */
132349 +static int ZSTD_DCtx_isOverflow(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
132351 +    return (zds->inBuffSize + zds->outBuffSize) >= (neededInBuffSize + neededOutBuffSize) * ZSTD_WORKSPACETOOLARGE_FACTOR;
132354 +static void ZSTD_DCtx_updateOversizedDuration(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
132356 +    if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize))
132357 +        zds->oversizedDuration++;
132358 +    else
132359 +        zds->oversizedDuration = 0;
132362 +static int ZSTD_DCtx_isOversizedTooLong(ZSTD_DStream* zds)
132364 +    return zds->oversizedDuration >= ZSTD_WORKSPACETOOLARGE_MAXDURATION;
132367 +/* Checks that the output buffer hasn't changed if ZSTD_obm_stable is used. */
132368 +static size_t ZSTD_checkOutBuffer(ZSTD_DStream const* zds, ZSTD_outBuffer const* output)
132370 +    ZSTD_outBuffer const expect = zds->expectedOutBuffer;
132371 +    /* No requirement when ZSTD_obm_stable is not enabled. */
132372 +    if (zds->outBufferMode != ZSTD_bm_stable)
132373 +        return 0;
132374 +    /* Any buffer is allowed in zdss_init, this must be the same for every other call until
132375 +     * the context is reset.
132376 +     */
132377 +    if (zds->streamStage == zdss_init)
132378 +        return 0;
132379 +    /* The buffer must match our expectation exactly. */
132380 +    if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size)
132381 +        return 0;
132382 +    RETURN_ERROR(dstBuffer_wrong, "ZSTD_d_stableOutBuffer enabled but output differs!");
132385 +/* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream()
132386 + * and updates the stage and the output buffer state. This call is extracted so it can be
132387 + * used both when reading directly from the ZSTD_inBuffer, and in buffered input mode.
132388 + * NOTE: You must break after calling this function since the streamStage is modified.
132389 + */
132390 +static size_t ZSTD_decompressContinueStream(
132391 +            ZSTD_DStream* zds, char** op, char* oend,
132392 +            void const* src, size_t srcSize) {
132393 +    int const isSkipFrame = ZSTD_isSkipFrame(zds);
132394 +    if (zds->outBufferMode == ZSTD_bm_buffered) {
132395 +        size_t const dstSize = isSkipFrame ? 0 : zds->outBuffSize - zds->outStart;
132396 +        size_t const decodedSize = ZSTD_decompressContinue(zds,
132397 +                zds->outBuff + zds->outStart, dstSize, src, srcSize);
132398 +        FORWARD_IF_ERROR(decodedSize, "");
132399 +        if (!decodedSize && !isSkipFrame) {
132400 +            zds->streamStage = zdss_read;
132401 +        } else {
132402 +            zds->outEnd = zds->outStart + decodedSize;
132403 +            zds->streamStage = zdss_flush;
132404 +        }
132405 +    } else {
132406 +        /* Write directly into the output buffer */
132407 +        size_t const dstSize = isSkipFrame ? 0 : (size_t)(oend - *op);
132408 +        size_t const decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize);
132409 +        FORWARD_IF_ERROR(decodedSize, "");
132410 +        *op += decodedSize;
132411 +        /* Flushing is not needed. */
132412 +        zds->streamStage = zdss_read;
132413 +        assert(*op <= oend);
132414 +        assert(zds->outBufferMode == ZSTD_bm_stable);
132415 +    }
132416 +    return 0;
132419 +size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
132421 +    const char* const src = (const char*)input->src;
132422 +    const char* const istart = input->pos != 0 ? src + input->pos : src;
132423 +    const char* const iend = input->size != 0 ? src + input->size : src;
132424 +    const char* ip = istart;
132425 +    char* const dst = (char*)output->dst;
132426 +    char* const ostart = output->pos != 0 ? dst + output->pos : dst;
132427 +    char* const oend = output->size != 0 ? dst + output->size : dst;
132428 +    char* op = ostart;
132429 +    U32 someMoreWork = 1;
132431 +    DEBUGLOG(5, "ZSTD_decompressStream");
132432 +    RETURN_ERROR_IF(
132433 +        input->pos > input->size,
132434 +        srcSize_wrong,
132435 +        "forbidden. in: pos: %u   vs size: %u",
132436 +        (U32)input->pos, (U32)input->size);
132437 +    RETURN_ERROR_IF(
132438 +        output->pos > output->size,
132439 +        dstSize_tooSmall,
132440 +        "forbidden. out: pos: %u   vs size: %u",
132441 +        (U32)output->pos, (U32)output->size);
132442 +    DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
132443 +    FORWARD_IF_ERROR(ZSTD_checkOutBuffer(zds, output), "");
132445 +    while (someMoreWork) {
132446 +        switch(zds->streamStage)
132447 +        {
132448 +        case zdss_init :
132449 +            DEBUGLOG(5, "stage zdss_init => transparent reset ");
132450 +            zds->streamStage = zdss_loadHeader;
132451 +            zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
132452 +            zds->legacyVersion = 0;
132453 +            zds->hostageByte = 0;
132454 +            zds->expectedOutBuffer = *output;
132455 +            /* fall-through */
132457 +        case zdss_loadHeader :
132458 +            DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
132459 +            {   size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
132460 +                if (zds->refMultipleDDicts && zds->ddictSet) {
132461 +                    ZSTD_DCtx_selectFrameDDict(zds);
132462 +                }
132463 +                DEBUGLOG(5, "header size : %u", (U32)hSize);
132464 +                if (ZSTD_isError(hSize)) {
132465 +                    return hSize;   /* error */
132466 +                }
132467 +                if (hSize != 0) {   /* need more input */
132468 +                    size_t const toLoad = hSize - zds->lhSize;   /* if hSize!=0, hSize > zds->lhSize */
132469 +                    size_t const remainingInput = (size_t)(iend-ip);
132470 +                    assert(iend >= ip);
132471 +                    if (toLoad > remainingInput) {   /* not enough input to load full header */
132472 +                        if (remainingInput > 0) {
132473 +                            ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput);
132474 +                            zds->lhSize += remainingInput;
132475 +                        }
132476 +                        input->pos = input->size;
132477 +                        return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize;   /* remaining header bytes + next block header */
132478 +                    }
132479 +                    assert(ip != NULL);
132480 +                    ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
132481 +                    break;
132482 +            }   }
132484 +            /* check for single-pass mode opportunity */
132485 +            if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
132486 +                && zds->fParams.frameType != ZSTD_skippableFrame
132487 +                && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
132488 +                size_t const cSize = ZSTD_findFrameCompressedSize(istart, (size_t)(iend-istart));
132489 +                if (cSize <= (size_t)(iend-istart)) {
132490 +                    /* shortcut : using single-pass mode */
132491 +                    size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds));
132492 +                    if (ZSTD_isError(decompressedSize)) return decompressedSize;
132493 +                    DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
132494 +                    ip = istart + cSize;
132495 +                    op += decompressedSize;
132496 +                    zds->expected = 0;
132497 +                    zds->streamStage = zdss_init;
132498 +                    someMoreWork = 0;
132499 +                    break;
132500 +            }   }
132502 +            /* Check output buffer is large enough for ZSTD_odm_stable. */
132503 +            if (zds->outBufferMode == ZSTD_bm_stable
132504 +                && zds->fParams.frameType != ZSTD_skippableFrame
132505 +                && zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
132506 +                && (U64)(size_t)(oend-op) < zds->fParams.frameContentSize) {
132507 +                RETURN_ERROR(dstSize_tooSmall, "ZSTD_obm_stable passed but ZSTD_outBuffer is too small");
132508 +            }
132510 +            /* Consume header (see ZSTDds_decodeFrameHeader) */
132511 +            DEBUGLOG(4, "Consume header");
132512 +            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), "");
132514 +            if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {  /* skippable frame */
132515 +                zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
132516 +                zds->stage = ZSTDds_skipFrame;
132517 +            } else {
132518 +                FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize), "");
132519 +                zds->expected = ZSTD_blockHeaderSize;
132520 +                zds->stage = ZSTDds_decodeBlockHeader;
132521 +            }
132523 +            /* control buffer memory usage */
132524 +            DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)",
132525 +                        (U32)(zds->fParams.windowSize >>10),
132526 +                        (U32)(zds->maxWindowSize >> 10) );
132527 +            zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
132528 +            RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
132529 +                            frameParameter_windowTooLarge, "");
132531 +            /* Adapt buffer sizes to frame header instructions */
132532 +            {   size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
132533 +                size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered
132534 +                        ? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize)
132535 +                        : 0;
132537 +                ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize);
132539 +                {   int const tooSmall = (zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize);
132540 +                    int const tooLarge = ZSTD_DCtx_isOversizedTooLong(zds);
132542 +                    if (tooSmall || tooLarge) {
132543 +                        size_t const bufferSize = neededInBuffSize + neededOutBuffSize;
132544 +                        DEBUGLOG(4, "inBuff  : from %u to %u",
132545 +                                    (U32)zds->inBuffSize, (U32)neededInBuffSize);
132546 +                        DEBUGLOG(4, "outBuff : from %u to %u",
132547 +                                    (U32)zds->outBuffSize, (U32)neededOutBuffSize);
132548 +                        if (zds->staticSize) {  /* static DCtx */
132549 +                            DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
132550 +                            assert(zds->staticSize >= sizeof(ZSTD_DCtx));  /* controlled at init */
132551 +                            RETURN_ERROR_IF(
132552 +                                bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),
132553 +                                memory_allocation, "");
132554 +                        } else {
132555 +                            ZSTD_customFree(zds->inBuff, zds->customMem);
132556 +                            zds->inBuffSize = 0;
132557 +                            zds->outBuffSize = 0;
132558 +                            zds->inBuff = (char*)ZSTD_customMalloc(bufferSize, zds->customMem);
132559 +                            RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation, "");
132560 +                        }
132561 +                        zds->inBuffSize = neededInBuffSize;
132562 +                        zds->outBuff = zds->inBuff + zds->inBuffSize;
132563 +                        zds->outBuffSize = neededOutBuffSize;
132564 +            }   }   }
132565 +            zds->streamStage = zdss_read;
132566 +            /* fall-through */
132568 +        case zdss_read:
132569 +            DEBUGLOG(5, "stage zdss_read");
132570 +            {   size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip));
132571 +                DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize);
132572 +                if (neededInSize==0) {  /* end of frame */
132573 +                    zds->streamStage = zdss_init;
132574 +                    someMoreWork = 0;
132575 +                    break;
132576 +                }
132577 +                if ((size_t)(iend-ip) >= neededInSize) {  /* decode directly from src */
132578 +                    FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), "");
132579 +                    ip += neededInSize;
132580 +                    /* Function modifies the stage so we must break */
132581 +                    break;
132582 +            }   }
132583 +            if (ip==iend) { someMoreWork = 0; break; }   /* no more input */
132584 +            zds->streamStage = zdss_load;
132585 +            /* fall-through */
132587 +        case zdss_load:
132588 +            {   size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
132589 +                size_t const toLoad = neededInSize - zds->inPos;
132590 +                int const isSkipFrame = ZSTD_isSkipFrame(zds);
132591 +                size_t loadedSize;
132592 +                /* At this point we shouldn't be decompressing a block that we can stream. */
132593 +                assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip));
132594 +                if (isSkipFrame) {
132595 +                    loadedSize = MIN(toLoad, (size_t)(iend-ip));
132596 +                } else {
132597 +                    RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,
132598 +                                    corruption_detected,
132599 +                                    "should never happen");
132600 +                    loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip));
132601 +                }
132602 +                ip += loadedSize;
132603 +                zds->inPos += loadedSize;
132604 +                if (loadedSize < toLoad) { someMoreWork = 0; break; }   /* not enough input, wait for more */
132606 +                /* decode loaded input */
132607 +                zds->inPos = 0;   /* input is consumed */
132608 +                FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, zds->inBuff, neededInSize), "");
132609 +                /* Function modifies the stage so we must break */
132610 +                break;
132611 +            }
132612 +        case zdss_flush:
132613 +            {   size_t const toFlushSize = zds->outEnd - zds->outStart;
132614 +                size_t const flushedSize = ZSTD_limitCopy(op, (size_t)(oend-op), zds->outBuff + zds->outStart, toFlushSize);
132615 +                op += flushedSize;
132616 +                zds->outStart += flushedSize;
132617 +                if (flushedSize == toFlushSize) {  /* flush completed */
132618 +                    zds->streamStage = zdss_read;
132619 +                    if ( (zds->outBuffSize < zds->fParams.frameContentSize)
132620 +                      && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
132621 +                        DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
132622 +                                (int)(zds->outBuffSize - zds->outStart),
132623 +                                (U32)zds->fParams.blockSizeMax);
132624 +                        zds->outStart = zds->outEnd = 0;
132625 +                    }
132626 +                    break;
132627 +            }   }
132628 +            /* cannot complete flush */
132629 +            someMoreWork = 0;
132630 +            break;
132632 +        default:
132633 +            assert(0);    /* impossible */
132634 +            RETURN_ERROR(GENERIC, "impossible to reach");   /* some compiler require default to do something */
132635 +    }   }
132637 +    /* result */
132638 +    input->pos = (size_t)(ip - (const char*)(input->src));
132639 +    output->pos = (size_t)(op - (char*)(output->dst));
132641 +    /* Update the expected output buffer for ZSTD_obm_stable. */
132642 +    zds->expectedOutBuffer = *output;
132644 +    if ((ip==istart) && (op==ostart)) {  /* no forward progress */
132645 +        zds->noForwardProgress ++;
132646 +        if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
132647 +            RETURN_ERROR_IF(op==oend, dstSize_tooSmall, "");
132648 +            RETURN_ERROR_IF(ip==iend, srcSize_wrong, "");
132649 +            assert(0);
132650 +        }
132651 +    } else {
132652 +        zds->noForwardProgress = 0;
132653 +    }
132654 +    {   size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);
132655 +        if (!nextSrcSizeHint) {   /* frame fully decoded */
132656 +            if (zds->outEnd == zds->outStart) {  /* output fully flushed */
132657 +                if (zds->hostageByte) {
132658 +                    if (input->pos >= input->size) {
132659 +                        /* can't release hostage (not present) */
132660 +                        zds->streamStage = zdss_read;
132661 +                        return 1;
132662 +                    }
132663 +                    input->pos++;  /* release hostage */
132664 +                }   /* zds->hostageByte */
132665 +                return 0;
132666 +            }  /* zds->outEnd == zds->outStart */
132667 +            if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
132668 +                input->pos--;   /* note : pos > 0, otherwise, impossible to finish reading last block */
132669 +                zds->hostageByte=1;
132670 +            }
132671 +            return 1;
132672 +        }  /* nextSrcSizeHint==0 */
132673 +        nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block);   /* preload header of next block */
132674 +        assert(zds->inPos <= nextSrcSizeHint);
132675 +        nextSrcSizeHint -= zds->inPos;   /* part already loaded*/
132676 +        return nextSrcSizeHint;
132677 +    }
132680 +size_t ZSTD_decompressStream_simpleArgs (
132681 +                            ZSTD_DCtx* dctx,
132682 +                            void* dst, size_t dstCapacity, size_t* dstPos,
132683 +                      const void* src, size_t srcSize, size_t* srcPos)
132685 +    ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
132686 +    ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
132687 +    /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
132688 +    size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
132689 +    *dstPos = output.pos;
132690 +    *srcPos = input.pos;
132691 +    return cErr;
132693 diff --git a/lib/zstd/decompress/zstd_decompress_block.c b/lib/zstd/decompress/zstd_decompress_block.c
132694 new file mode 100644
132695 index 000000000000..cd6eba55a21c
132696 --- /dev/null
132697 +++ b/lib/zstd/decompress/zstd_decompress_block.c
132698 @@ -0,0 +1,1540 @@
132700 + * Copyright (c) Yann Collet, Facebook, Inc.
132701 + * All rights reserved.
132703 + * This source code is licensed under both the BSD-style license (found in the
132704 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
132705 + * in the COPYING file in the root directory of this source tree).
132706 + * You may select, at your option, one of the above-listed licenses.
132707 + */
132709 +/* zstd_decompress_block :
132710 + * this module takes care of decompressing _compressed_ block */
132712 +/*-*******************************************************
132713 +*  Dependencies
132714 +*********************************************************/
132715 +#include "../common/zstd_deps.h"   /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
132716 +#include "../common/compiler.h"    /* prefetch */
132717 +#include "../common/cpu.h"         /* bmi2 */
132718 +#include "../common/mem.h"         /* low level memory routines */
132719 +#define FSE_STATIC_LINKING_ONLY
132720 +#include "../common/fse.h"
132721 +#define HUF_STATIC_LINKING_ONLY
132722 +#include "../common/huf.h"
132723 +#include "../common/zstd_internal.h"
132724 +#include "zstd_decompress_internal.h"   /* ZSTD_DCtx */
132725 +#include "zstd_ddict.h"  /* ZSTD_DDictDictContent */
132726 +#include "zstd_decompress_block.h"
132728 +/*_*******************************************************
132729 +*  Macros
132730 +**********************************************************/
132732 +/* These two optional macros force the use one way or another of the two
132733 + * ZSTD_decompressSequences implementations. You can't force in both directions
132734 + * at the same time.
132735 + */
132736 +#if defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
132737 +    defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
132738 +#error "Cannot force the use of the short and the long ZSTD_decompressSequences variants!"
132739 +#endif
132742 +/*_*******************************************************
132743 +*  Memory operations
132744 +**********************************************************/
132745 +static void ZSTD_copy4(void* dst, const void* src) { ZSTD_memcpy(dst, src, 4); }
132748 +/*-*************************************************************
132749 + *   Block decoding
132750 + ***************************************************************/
132752 +/*! ZSTD_getcBlockSize() :
132753 + *  Provides the size of compressed block from block header `src` */
132754 +size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
132755 +                          blockProperties_t* bpPtr)
132757 +    RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong, "");
132759 +    {   U32 const cBlockHeader = MEM_readLE24(src);
132760 +        U32 const cSize = cBlockHeader >> 3;
132761 +        bpPtr->lastBlock = cBlockHeader & 1;
132762 +        bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
132763 +        bpPtr->origSize = cSize;   /* only useful for RLE */
132764 +        if (bpPtr->blockType == bt_rle) return 1;
132765 +        RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected, "");
132766 +        return cSize;
132767 +    }
132771 +/* Hidden declaration for fullbench */
132772 +size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
132773 +                          const void* src, size_t srcSize);
132774 +/*! ZSTD_decodeLiteralsBlock() :
132775 + * @return : nb of bytes read from src (< srcSize )
132776 + *  note : symbol not declared but exposed for fullbench */
132777 +size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
132778 +                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
132780 +    DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
132781 +    RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, "");
132783 +    {   const BYTE* const istart = (const BYTE*) src;
132784 +        symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
132786 +        switch(litEncType)
132787 +        {
132788 +        case set_repeat:
132789 +            DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block");
132790 +            RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, "");
132791 +            /* fall-through */
132793 +        case set_compressed:
132794 +            RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
132795 +            {   size_t lhSize, litSize, litCSize;
132796 +                U32 singleStream=0;
132797 +                U32 const lhlCode = (istart[0] >> 2) & 3;
132798 +                U32 const lhc = MEM_readLE32(istart);
132799 +                size_t hufSuccess;
132800 +                switch(lhlCode)
132801 +                {
132802 +                case 0: case 1: default:   /* note : default is impossible, since lhlCode into [0..3] */
132803 +                    /* 2 - 2 - 10 - 10 */
132804 +                    singleStream = !lhlCode;
132805 +                    lhSize = 3;
132806 +                    litSize  = (lhc >> 4) & 0x3FF;
132807 +                    litCSize = (lhc >> 14) & 0x3FF;
132808 +                    break;
132809 +                case 2:
132810 +                    /* 2 - 2 - 14 - 14 */
132811 +                    lhSize = 4;
132812 +                    litSize  = (lhc >> 4) & 0x3FFF;
132813 +                    litCSize = lhc >> 18;
132814 +                    break;
132815 +                case 3:
132816 +                    /* 2 - 2 - 18 - 18 */
132817 +                    lhSize = 5;
132818 +                    litSize  = (lhc >> 4) & 0x3FFFF;
132819 +                    litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
132820 +                    break;
132821 +                }
132822 +                RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
132823 +                RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, "");
132825 +                /* prefetch huffman table if cold */
132826 +                if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
132827 +                    PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));
132828 +                }
132830 +                if (litEncType==set_repeat) {
132831 +                    if (singleStream) {
132832 +                        hufSuccess = HUF_decompress1X_usingDTable_bmi2(
132833 +                            dctx->litBuffer, litSize, istart+lhSize, litCSize,
132834 +                            dctx->HUFptr, dctx->bmi2);
132835 +                    } else {
132836 +                        hufSuccess = HUF_decompress4X_usingDTable_bmi2(
132837 +                            dctx->litBuffer, litSize, istart+lhSize, litCSize,
132838 +                            dctx->HUFptr, dctx->bmi2);
132839 +                    }
132840 +                } else {
132841 +                    if (singleStream) {
132842 +#if defined(HUF_FORCE_DECOMPRESS_X2)
132843 +                        hufSuccess = HUF_decompress1X_DCtx_wksp(
132844 +                            dctx->entropy.hufTable, dctx->litBuffer, litSize,
132845 +                            istart+lhSize, litCSize, dctx->workspace,
132846 +                            sizeof(dctx->workspace));
132847 +#else
132848 +                        hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2(
132849 +                            dctx->entropy.hufTable, dctx->litBuffer, litSize,
132850 +                            istart+lhSize, litCSize, dctx->workspace,
132851 +                            sizeof(dctx->workspace), dctx->bmi2);
132852 +#endif
132853 +                    } else {
132854 +                        hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2(
132855 +                            dctx->entropy.hufTable, dctx->litBuffer, litSize,
132856 +                            istart+lhSize, litCSize, dctx->workspace,
132857 +                            sizeof(dctx->workspace), dctx->bmi2);
132858 +                    }
132859 +                }
132861 +                RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, "");
132863 +                dctx->litPtr = dctx->litBuffer;
132864 +                dctx->litSize = litSize;
132865 +                dctx->litEntropy = 1;
132866 +                if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
132867 +                ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
132868 +                return litCSize + lhSize;
132869 +            }
132871 +        case set_basic:
132872 +            {   size_t litSize, lhSize;
132873 +                U32 const lhlCode = ((istart[0]) >> 2) & 3;
132874 +                switch(lhlCode)
132875 +                {
132876 +                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */
132877 +                    lhSize = 1;
132878 +                    litSize = istart[0] >> 3;
132879 +                    break;
132880 +                case 1:
132881 +                    lhSize = 2;
132882 +                    litSize = MEM_readLE16(istart) >> 4;
132883 +                    break;
132884 +                case 3:
132885 +                    lhSize = 3;
132886 +                    litSize = MEM_readLE24(istart) >> 4;
132887 +                    break;
132888 +                }
132890 +                if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */
132891 +                    RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, "");
132892 +                    ZSTD_memcpy(dctx->litBuffer, istart+lhSize, litSize);
132893 +                    dctx->litPtr = dctx->litBuffer;
132894 +                    dctx->litSize = litSize;
132895 +                    ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
132896 +                    return lhSize+litSize;
132897 +                }
132898 +                /* direct reference into compressed stream */
132899 +                dctx->litPtr = istart+lhSize;
132900 +                dctx->litSize = litSize;
132901 +                return lhSize+litSize;
132902 +            }
132904 +        case set_rle:
132905 +            {   U32 const lhlCode = ((istart[0]) >> 2) & 3;
132906 +                size_t litSize, lhSize;
132907 +                switch(lhlCode)
132908 +                {
132909 +                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */
132910 +                    lhSize = 1;
132911 +                    litSize = istart[0] >> 3;
132912 +                    break;
132913 +                case 1:
132914 +                    lhSize = 2;
132915 +                    litSize = MEM_readLE16(istart) >> 4;
132916 +                    break;
132917 +                case 3:
132918 +                    lhSize = 3;
132919 +                    litSize = MEM_readLE24(istart) >> 4;
132920 +                    RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
132921 +                    break;
132922 +                }
132923 +                RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
132924 +                ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
132925 +                dctx->litPtr = dctx->litBuffer;
132926 +                dctx->litSize = litSize;
132927 +                return lhSize+1;
132928 +            }
132929 +        default:
132930 +            RETURN_ERROR(corruption_detected, "impossible");
132931 +        }
132932 +    }
132935 +/* Default FSE distribution tables.
132936 + * These are pre-calculated FSE decoding tables using default distributions as defined in specification :
132937 + * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions
132938 + * They were generated programmatically with following method :
132939 + * - start from default distributions, present in /lib/common/zstd_internal.h
132940 + * - generate tables normally, using ZSTD_buildFSETable()
132941 + * - printout the content of tables
132942 + * - pretify output, report below, test with fuzzer to ensure it's correct */
132944 +/* Default FSE distribution table for Literal Lengths */
132945 +static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
132946 +     {  1,  1,  1, LL_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
132947 +     /* nextState, nbAddBits, nbBits, baseVal */
132948 +     {  0,  0,  4,    0},  { 16,  0,  4,    0},
132949 +     { 32,  0,  5,    1},  {  0,  0,  5,    3},
132950 +     {  0,  0,  5,    4},  {  0,  0,  5,    6},
132951 +     {  0,  0,  5,    7},  {  0,  0,  5,    9},
132952 +     {  0,  0,  5,   10},  {  0,  0,  5,   12},
132953 +     {  0,  0,  6,   14},  {  0,  1,  5,   16},
132954 +     {  0,  1,  5,   20},  {  0,  1,  5,   22},
132955 +     {  0,  2,  5,   28},  {  0,  3,  5,   32},
132956 +     {  0,  4,  5,   48},  { 32,  6,  5,   64},
132957 +     {  0,  7,  5,  128},  {  0,  8,  6,  256},
132958 +     {  0, 10,  6, 1024},  {  0, 12,  6, 4096},
132959 +     { 32,  0,  4,    0},  {  0,  0,  4,    1},
132960 +     {  0,  0,  5,    2},  { 32,  0,  5,    4},
132961 +     {  0,  0,  5,    5},  { 32,  0,  5,    7},
132962 +     {  0,  0,  5,    8},  { 32,  0,  5,   10},
132963 +     {  0,  0,  5,   11},  {  0,  0,  6,   13},
132964 +     { 32,  1,  5,   16},  {  0,  1,  5,   18},
132965 +     { 32,  1,  5,   22},  {  0,  2,  5,   24},
132966 +     { 32,  3,  5,   32},  {  0,  3,  5,   40},
132967 +     {  0,  6,  4,   64},  { 16,  6,  4,   64},
132968 +     { 32,  7,  5,  128},  {  0,  9,  6,  512},
132969 +     {  0, 11,  6, 2048},  { 48,  0,  4,    0},
132970 +     { 16,  0,  4,    1},  { 32,  0,  5,    2},
132971 +     { 32,  0,  5,    3},  { 32,  0,  5,    5},
132972 +     { 32,  0,  5,    6},  { 32,  0,  5,    8},
132973 +     { 32,  0,  5,    9},  { 32,  0,  5,   11},
132974 +     { 32,  0,  5,   12},  {  0,  0,  6,   15},
132975 +     { 32,  1,  5,   18},  { 32,  1,  5,   20},
132976 +     { 32,  2,  5,   24},  { 32,  2,  5,   28},
132977 +     { 32,  3,  5,   40},  { 32,  4,  5,   48},
132978 +     {  0, 16,  6,65536},  {  0, 15,  6,32768},
132979 +     {  0, 14,  6,16384},  {  0, 13,  6, 8192},
132980 +};   /* LL_defaultDTable */
132982 +/* Default FSE distribution table for Offset Codes */
132983 +static const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
132984 +    {  1,  1,  1, OF_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
132985 +    /* nextState, nbAddBits, nbBits, baseVal */
132986 +    {  0,  0,  5,    0},     {  0,  6,  4,   61},
132987 +    {  0,  9,  5,  509},     {  0, 15,  5,32765},
132988 +    {  0, 21,  5,2097149},   {  0,  3,  5,    5},
132989 +    {  0,  7,  4,  125},     {  0, 12,  5, 4093},
132990 +    {  0, 18,  5,262141},    {  0, 23,  5,8388605},
132991 +    {  0,  5,  5,   29},     {  0,  8,  4,  253},
132992 +    {  0, 14,  5,16381},     {  0, 20,  5,1048573},
132993 +    {  0,  2,  5,    1},     { 16,  7,  4,  125},
132994 +    {  0, 11,  5, 2045},     {  0, 17,  5,131069},
132995 +    {  0, 22,  5,4194301},   {  0,  4,  5,   13},
132996 +    { 16,  8,  4,  253},     {  0, 13,  5, 8189},
132997 +    {  0, 19,  5,524285},    {  0,  1,  5,    1},
132998 +    { 16,  6,  4,   61},     {  0, 10,  5, 1021},
132999 +    {  0, 16,  5,65533},     {  0, 28,  5,268435453},
133000 +    {  0, 27,  5,134217725}, {  0, 26,  5,67108861},
133001 +    {  0, 25,  5,33554429},  {  0, 24,  5,16777213},
133002 +};   /* OF_defaultDTable */
133005 +/* Default FSE distribution table for Match Lengths */
133006 +static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {
133007 +    {  1,  1,  1, ML_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
133008 +    /* nextState, nbAddBits, nbBits, baseVal */
133009 +    {  0,  0,  6,    3},  {  0,  0,  4,    4},
133010 +    { 32,  0,  5,    5},  {  0,  0,  5,    6},
133011 +    {  0,  0,  5,    8},  {  0,  0,  5,    9},
133012 +    {  0,  0,  5,   11},  {  0,  0,  6,   13},
133013 +    {  0,  0,  6,   16},  {  0,  0,  6,   19},
133014 +    {  0,  0,  6,   22},  {  0,  0,  6,   25},
133015 +    {  0,  0,  6,   28},  {  0,  0,  6,   31},
133016 +    {  0,  0,  6,   34},  {  0,  1,  6,   37},
133017 +    {  0,  1,  6,   41},  {  0,  2,  6,   47},
133018 +    {  0,  3,  6,   59},  {  0,  4,  6,   83},
133019 +    {  0,  7,  6,  131},  {  0,  9,  6,  515},
133020 +    { 16,  0,  4,    4},  {  0,  0,  4,    5},
133021 +    { 32,  0,  5,    6},  {  0,  0,  5,    7},
133022 +    { 32,  0,  5,    9},  {  0,  0,  5,   10},
133023 +    {  0,  0,  6,   12},  {  0,  0,  6,   15},
133024 +    {  0,  0,  6,   18},  {  0,  0,  6,   21},
133025 +    {  0,  0,  6,   24},  {  0,  0,  6,   27},
133026 +    {  0,  0,  6,   30},  {  0,  0,  6,   33},
133027 +    {  0,  1,  6,   35},  {  0,  1,  6,   39},
133028 +    {  0,  2,  6,   43},  {  0,  3,  6,   51},
133029 +    {  0,  4,  6,   67},  {  0,  5,  6,   99},
133030 +    {  0,  8,  6,  259},  { 32,  0,  4,    4},
133031 +    { 48,  0,  4,    4},  { 16,  0,  4,    5},
133032 +    { 32,  0,  5,    7},  { 32,  0,  5,    8},
133033 +    { 32,  0,  5,   10},  { 32,  0,  5,   11},
133034 +    {  0,  0,  6,   14},  {  0,  0,  6,   17},
133035 +    {  0,  0,  6,   20},  {  0,  0,  6,   23},
133036 +    {  0,  0,  6,   26},  {  0,  0,  6,   29},
133037 +    {  0,  0,  6,   32},  {  0, 16,  6,65539},
133038 +    {  0, 15,  6,32771},  {  0, 14,  6,16387},
133039 +    {  0, 13,  6, 8195},  {  0, 12,  6, 4099},
133040 +    {  0, 11,  6, 2051},  {  0, 10,  6, 1027},
133041 +};   /* ML_defaultDTable */
133044 +static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddBits)
133046 +    void* ptr = dt;
133047 +    ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr;
133048 +    ZSTD_seqSymbol* const cell = dt + 1;
133050 +    DTableH->tableLog = 0;
133051 +    DTableH->fastMode = 0;
133053 +    cell->nbBits = 0;
133054 +    cell->nextState = 0;
133055 +    assert(nbAddBits < 255);
133056 +    cell->nbAdditionalBits = (BYTE)nbAddBits;
133057 +    cell->baseValue = baseValue;
133061 +/* ZSTD_buildFSETable() :
133062 + * generate FSE decoding table for one symbol (ll, ml or off)
133063 + * cannot fail if input is valid =>
133064 + * all inputs are presumed validated at this stage */
133065 +FORCE_INLINE_TEMPLATE
133066 +void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
133067 +            const short* normalizedCounter, unsigned maxSymbolValue,
133068 +            const U32* baseValue, const U32* nbAdditionalBits,
133069 +            unsigned tableLog, void* wksp, size_t wkspSize)
133071 +    ZSTD_seqSymbol* const tableDecode = dt+1;
133072 +    U32 const maxSV1 = maxSymbolValue + 1;
133073 +    U32 const tableSize = 1 << tableLog;
133075 +    U16* symbolNext = (U16*)wksp;
133076 +    BYTE* spread = (BYTE*)(symbolNext + MaxSeq + 1);
133077 +    U32 highThreshold = tableSize - 1;
133080 +    /* Sanity Checks */
133081 +    assert(maxSymbolValue <= MaxSeq);
133082 +    assert(tableLog <= MaxFSELog);
133083 +    assert(wkspSize >= ZSTD_BUILD_FSE_TABLE_WKSP_SIZE);
133084 +    (void)wkspSize;
133085 +    /* Init, lay down lowprob symbols */
133086 +    {   ZSTD_seqSymbol_header DTableH;
133087 +        DTableH.tableLog = tableLog;
133088 +        DTableH.fastMode = 1;
133089 +        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
133090 +            U32 s;
133091 +            for (s=0; s<maxSV1; s++) {
133092 +                if (normalizedCounter[s]==-1) {
133093 +                    tableDecode[highThreshold--].baseValue = s;
133094 +                    symbolNext[s] = 1;
133095 +                } else {
133096 +                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
133097 +                    assert(normalizedCounter[s]>=0);
133098 +                    symbolNext[s] = (U16)normalizedCounter[s];
133099 +        }   }   }
133100 +        ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
133101 +    }
133103 +    /* Spread symbols */
133104 +    assert(tableSize <= 512);
133105 +    /* Specialized symbol spreading for the case when there are
133106 +     * no low probability (-1 count) symbols. When compressing
133107 +     * small blocks we avoid low probability symbols to hit this
133108 +     * case, since header decoding speed matters more.
133109 +     */
133110 +    if (highThreshold == tableSize - 1) {
133111 +        size_t const tableMask = tableSize-1;
133112 +        size_t const step = FSE_TABLESTEP(tableSize);
133113 +        /* First lay down the symbols in order.
133114 +         * We use a uint64_t to lay down 8 bytes at a time. This reduces branch
133115 +         * misses since small blocks generally have small table logs, so nearly
133116 +         * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
133117 +         * our buffer to handle the over-write.
133118 +         */
133119 +        {
133120 +            U64 const add = 0x0101010101010101ull;
133121 +            size_t pos = 0;
133122 +            U64 sv = 0;
133123 +            U32 s;
133124 +            for (s=0; s<maxSV1; ++s, sv += add) {
133125 +                int i;
133126 +                int const n = normalizedCounter[s];
133127 +                MEM_write64(spread + pos, sv);
133128 +                for (i = 8; i < n; i += 8) {
133129 +                    MEM_write64(spread + pos + i, sv);
133130 +                }
133131 +                pos += n;
133132 +            }
133133 +        }
133134 +        /* Now we spread those positions across the table.
133135 +         * The benefit of doing it in two stages is that we avoid the the
133136 +         * variable size inner loop, which caused lots of branch misses.
133137 +         * Now we can run through all the positions without any branch misses.
133138 +         * We unroll the loop twice, since that is what emperically worked best.
133139 +         */
133140 +        {
133141 +            size_t position = 0;
133142 +            size_t s;
133143 +            size_t const unroll = 2;
133144 +            assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
133145 +            for (s = 0; s < (size_t)tableSize; s += unroll) {
133146 +                size_t u;
133147 +                for (u = 0; u < unroll; ++u) {
133148 +                    size_t const uPosition = (position + (u * step)) & tableMask;
133149 +                    tableDecode[uPosition].baseValue = spread[s + u];
133150 +                }
133151 +                position = (position + (unroll * step)) & tableMask;
133152 +            }
133153 +            assert(position == 0);
133154 +        }
133155 +    } else {
133156 +        U32 const tableMask = tableSize-1;
133157 +        U32 const step = FSE_TABLESTEP(tableSize);
133158 +        U32 s, position = 0;
133159 +        for (s=0; s<maxSV1; s++) {
133160 +            int i;
133161 +            int const n = normalizedCounter[s];
133162 +            for (i=0; i<n; i++) {
133163 +                tableDecode[position].baseValue = s;
133164 +                position = (position + step) & tableMask;
133165 +                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
133166 +        }   }
133167 +        assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
133168 +    }
133170 +    /* Build Decoding table */
133171 +    {
133172 +        U32 u;
133173 +        for (u=0; u<tableSize; u++) {
133174 +            U32 const symbol = tableDecode[u].baseValue;
133175 +            U32 const nextState = symbolNext[symbol]++;
133176 +            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
133177 +            tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
133178 +            assert(nbAdditionalBits[symbol] < 255);
133179 +            tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol];
133180 +            tableDecode[u].baseValue = baseValue[symbol];
133181 +        }
133182 +    }
133185 +/* Avoids the FORCE_INLINE of the _body() function. */
133186 +static void ZSTD_buildFSETable_body_default(ZSTD_seqSymbol* dt,
133187 +            const short* normalizedCounter, unsigned maxSymbolValue,
133188 +            const U32* baseValue, const U32* nbAdditionalBits,
133189 +            unsigned tableLog, void* wksp, size_t wkspSize)
133191 +    ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
133192 +            baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
133195 +#if DYNAMIC_BMI2
133196 +TARGET_ATTRIBUTE("bmi2") static void ZSTD_buildFSETable_body_bmi2(ZSTD_seqSymbol* dt,
133197 +            const short* normalizedCounter, unsigned maxSymbolValue,
133198 +            const U32* baseValue, const U32* nbAdditionalBits,
133199 +            unsigned tableLog, void* wksp, size_t wkspSize)
133201 +    ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
133202 +            baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
133204 +#endif
133206 +void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
133207 +            const short* normalizedCounter, unsigned maxSymbolValue,
133208 +            const U32* baseValue, const U32* nbAdditionalBits,
133209 +            unsigned tableLog, void* wksp, size_t wkspSize, int bmi2)
133211 +#if DYNAMIC_BMI2
133212 +    if (bmi2) {
133213 +        ZSTD_buildFSETable_body_bmi2(dt, normalizedCounter, maxSymbolValue,
133214 +                baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
133215 +        return;
133216 +    }
133217 +#endif
133218 +    (void)bmi2;
133219 +    ZSTD_buildFSETable_body_default(dt, normalizedCounter, maxSymbolValue,
133220 +            baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
133224 +/*! ZSTD_buildSeqTable() :
133225 + * @return : nb bytes read from src,
133226 + *           or an error code if it fails */
133227 +static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
133228 +                                 symbolEncodingType_e type, unsigned max, U32 maxLog,
133229 +                                 const void* src, size_t srcSize,
133230 +                                 const U32* baseValue, const U32* nbAdditionalBits,
133231 +                                 const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
133232 +                                 int ddictIsCold, int nbSeq, U32* wksp, size_t wkspSize,
133233 +                                 int bmi2)
133235 +    switch(type)
133236 +    {
133237 +    case set_rle :
133238 +        RETURN_ERROR_IF(!srcSize, srcSize_wrong, "");
133239 +        RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected, "");
133240 +        {   U32 const symbol = *(const BYTE*)src;
133241 +            U32 const baseline = baseValue[symbol];
133242 +            U32 const nbBits = nbAdditionalBits[symbol];
133243 +            ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
133244 +        }
133245 +        *DTablePtr = DTableSpace;
133246 +        return 1;
133247 +    case set_basic :
133248 +        *DTablePtr = defaultTable;
133249 +        return 0;
133250 +    case set_repeat:
133251 +        RETURN_ERROR_IF(!flagRepeatTable, corruption_detected, "");
133252 +        /* prefetch FSE table if used */
133253 +        if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
133254 +            const void* const pStart = *DTablePtr;
133255 +            size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog));
133256 +            PREFETCH_AREA(pStart, pSize);
133257 +        }
133258 +        return 0;
133259 +    case set_compressed :
133260 +        {   unsigned tableLog;
133261 +            S16 norm[MaxSeq+1];
133262 +            size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
133263 +            RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, "");
133264 +            RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, "");
133265 +            ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize, bmi2);
133266 +            *DTablePtr = DTableSpace;
133267 +            return headerSize;
133268 +        }
133269 +    default :
133270 +        assert(0);
133271 +        RETURN_ERROR(GENERIC, "impossible");
133272 +    }
133275 +size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
133276 +                             const void* src, size_t srcSize)
133278 +    const BYTE* const istart = (const BYTE*)src;
133279 +    const BYTE* const iend = istart + srcSize;
133280 +    const BYTE* ip = istart;
133281 +    int nbSeq;
133282 +    DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
133284 +    /* check */
133285 +    RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong, "");
133287 +    /* SeqHead */
133288 +    nbSeq = *ip++;
133289 +    if (!nbSeq) {
133290 +        *nbSeqPtr=0;
133291 +        RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, "");
133292 +        return 1;
133293 +    }
133294 +    if (nbSeq > 0x7F) {
133295 +        if (nbSeq == 0xFF) {
133296 +            RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, "");
133297 +            nbSeq = MEM_readLE16(ip) + LONGNBSEQ;
133298 +            ip+=2;
133299 +        } else {
133300 +            RETURN_ERROR_IF(ip >= iend, srcSize_wrong, "");
133301 +            nbSeq = ((nbSeq-0x80)<<8) + *ip++;
133302 +        }
133303 +    }
133304 +    *nbSeqPtr = nbSeq;
133306 +    /* FSE table descriptors */
133307 +    RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */
133308 +    {   symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
133309 +        symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
133310 +        symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
133311 +        ip++;
133313 +        /* Build DTables */
133314 +        {   size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
133315 +                                                      LLtype, MaxLL, LLFSELog,
133316 +                                                      ip, iend-ip,
133317 +                                                      LL_base, LL_bits,
133318 +                                                      LL_defaultDTable, dctx->fseEntropy,
133319 +                                                      dctx->ddictIsCold, nbSeq,
133320 +                                                      dctx->workspace, sizeof(dctx->workspace),
133321 +                                                      dctx->bmi2);
133322 +            RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed");
133323 +            ip += llhSize;
133324 +        }
133326 +        {   size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
133327 +                                                      OFtype, MaxOff, OffFSELog,
133328 +                                                      ip, iend-ip,
133329 +                                                      OF_base, OF_bits,
133330 +                                                      OF_defaultDTable, dctx->fseEntropy,
133331 +                                                      dctx->ddictIsCold, nbSeq,
133332 +                                                      dctx->workspace, sizeof(dctx->workspace),
133333 +                                                      dctx->bmi2);
133334 +            RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed");
133335 +            ip += ofhSize;
133336 +        }
133338 +        {   size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
133339 +                                                      MLtype, MaxML, MLFSELog,
133340 +                                                      ip, iend-ip,
133341 +                                                      ML_base, ML_bits,
133342 +                                                      ML_defaultDTable, dctx->fseEntropy,
133343 +                                                      dctx->ddictIsCold, nbSeq,
133344 +                                                      dctx->workspace, sizeof(dctx->workspace),
133345 +                                                      dctx->bmi2);
133346 +            RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed");
133347 +            ip += mlhSize;
133348 +        }
133349 +    }
133351 +    return ip-istart;
133355 +typedef struct {
133356 +    size_t litLength;
133357 +    size_t matchLength;
133358 +    size_t offset;
133359 +    const BYTE* match;
133360 +} seq_t;
133362 +typedef struct {
133363 +    size_t state;
133364 +    const ZSTD_seqSymbol* table;
133365 +} ZSTD_fseState;
133367 +typedef struct {
133368 +    BIT_DStream_t DStream;
133369 +    ZSTD_fseState stateLL;
133370 +    ZSTD_fseState stateOffb;
133371 +    ZSTD_fseState stateML;
133372 +    size_t prevOffset[ZSTD_REP_NUM];
133373 +    const BYTE* prefixStart;
133374 +    const BYTE* dictEnd;
133375 +    size_t pos;
133376 +} seqState_t;
133378 +/*! ZSTD_overlapCopy8() :
133379 + *  Copies 8 bytes from ip to op and updates op and ip where ip <= op.
133380 + *  If the offset is < 8 then the offset is spread to at least 8 bytes.
133382 + *  Precondition: *ip <= *op
133383 + *  Postcondition: *op - *op >= 8
133384 + */
133385 +HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {
133386 +    assert(*ip <= *op);
133387 +    if (offset < 8) {
133388 +        /* close range match, overlap */
133389 +        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
133390 +        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
133391 +        int const sub2 = dec64table[offset];
133392 +        (*op)[0] = (*ip)[0];
133393 +        (*op)[1] = (*ip)[1];
133394 +        (*op)[2] = (*ip)[2];
133395 +        (*op)[3] = (*ip)[3];
133396 +        *ip += dec32table[offset];
133397 +        ZSTD_copy4(*op+4, *ip);
133398 +        *ip -= sub2;
133399 +    } else {
133400 +        ZSTD_copy8(*op, *ip);
133401 +    }
133402 +    *ip += 8;
133403 +    *op += 8;
133404 +    assert(*op - *ip >= 8);
133407 +/*! ZSTD_safecopy() :
133408 + *  Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer
133409 + *  and write up to 16 bytes past oend_w (op >= oend_w is allowed).
133410 + *  This function is only called in the uncommon case where the sequence is near the end of the block. It
133411 + *  should be fast for a single long sequence, but can be slow for several short sequences.
133413 + *  @param ovtype controls the overlap detection
133414 + *         - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
133415 + *         - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.
133416 + *           The src buffer must be before the dst buffer.
133417 + */
133418 +static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {
133419 +    ptrdiff_t const diff = op - ip;
133420 +    BYTE* const oend = op + length;
133422 +    assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) ||
133423 +           (ovtype == ZSTD_overlap_src_before_dst && diff >= 0));
133425 +    if (length < 8) {
133426 +        /* Handle short lengths. */
133427 +        while (op < oend) *op++ = *ip++;
133428 +        return;
133429 +    }
133430 +    if (ovtype == ZSTD_overlap_src_before_dst) {
133431 +        /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
133432 +        assert(length >= 8);
133433 +        ZSTD_overlapCopy8(&op, &ip, diff);
133434 +        assert(op - ip >= 8);
133435 +        assert(op <= oend);
133436 +    }
133438 +    if (oend <= oend_w) {
133439 +        /* No risk of overwrite. */
133440 +        ZSTD_wildcopy(op, ip, length, ovtype);
133441 +        return;
133442 +    }
133443 +    if (op <= oend_w) {
133444 +        /* Wildcopy until we get close to the end. */
133445 +        assert(oend > oend_w);
133446 +        ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
133447 +        ip += oend_w - op;
133448 +        op = oend_w;
133449 +    }
133450 +    /* Handle the leftovers. */
133451 +    while (op < oend) *op++ = *ip++;
133454 +/* ZSTD_execSequenceEnd():
133455 + * This version handles cases that are near the end of the output buffer. It requires
133456 + * more careful checks to make sure there is no overflow. By separating out these hard
133457 + * and unlikely cases, we can speed up the common cases.
133459 + * NOTE: This function needs to be fast for a single long sequence, but doesn't need
133460 + * to be optimized for many small sequences, since those fall into ZSTD_execSequence().
133461 + */
133462 +FORCE_NOINLINE
133463 +size_t ZSTD_execSequenceEnd(BYTE* op,
133464 +                            BYTE* const oend, seq_t sequence,
133465 +                            const BYTE** litPtr, const BYTE* const litLimit,
133466 +                            const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
133468 +    BYTE* const oLitEnd = op + sequence.litLength;
133469 +    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
133470 +    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
133471 +    const BYTE* match = oLitEnd - sequence.offset;
133472 +    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
133474 +    /* bounds checks : careful of address space overflow in 32-bit mode */
133475 +    RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
133476 +    RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer");
133477 +    assert(op < op + sequenceLength);
133478 +    assert(oLitEnd < op + sequenceLength);
133480 +    /* copy literals */
133481 +    ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
133482 +    op = oLitEnd;
133483 +    *litPtr = iLitEnd;
133485 +    /* copy Match */
133486 +    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
133487 +        /* offset beyond prefix */
133488 +        RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
133489 +        match = dictEnd - (prefixStart-match);
133490 +        if (match + sequence.matchLength <= dictEnd) {
133491 +            ZSTD_memmove(oLitEnd, match, sequence.matchLength);
133492 +            return sequenceLength;
133493 +        }
133494 +        /* span extDict & currentPrefixSegment */
133495 +        {   size_t const length1 = dictEnd - match;
133496 +            ZSTD_memmove(oLitEnd, match, length1);
133497 +            op = oLitEnd + length1;
133498 +            sequence.matchLength -= length1;
133499 +            match = prefixStart;
133500 +    }   }
133501 +    ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
133502 +    return sequenceLength;
133505 +HINT_INLINE
133506 +size_t ZSTD_execSequence(BYTE* op,
133507 +                         BYTE* const oend, seq_t sequence,
133508 +                         const BYTE** litPtr, const BYTE* const litLimit,
133509 +                         const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
133511 +    BYTE* const oLitEnd = op + sequence.litLength;
133512 +    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
133513 +    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
133514 +    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;   /* risk : address space underflow on oend=NULL */
133515 +    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
133516 +    const BYTE* match = oLitEnd - sequence.offset;
133518 +    assert(op != NULL /* Precondition */);
133519 +    assert(oend_w < oend /* No underflow */);
133520 +    /* Handle edge cases in a slow path:
133521 +     *   - Read beyond end of literals
133522 +     *   - Match end is within WILDCOPY_OVERLIMIT of oend
133523 +     *   - 32-bit mode and the match length overflows
133524 +     */
133525 +    if (UNLIKELY(
133526 +            iLitEnd > litLimit ||
133527 +            oMatchEnd > oend_w ||
133528 +            (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
133529 +        return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
133531 +    /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
133532 +    assert(op <= oLitEnd /* No overflow */);
133533 +    assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
133534 +    assert(oMatchEnd <= oend /* No underflow */);
133535 +    assert(iLitEnd <= litLimit /* Literal length is in bounds */);
133536 +    assert(oLitEnd <= oend_w /* Can wildcopy literals */);
133537 +    assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
133539 +    /* Copy Literals:
133540 +     * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
133541 +     * We likely don't need the full 32-byte wildcopy.
133542 +     */
133543 +    assert(WILDCOPY_OVERLENGTH >= 16);
133544 +    ZSTD_copy16(op, (*litPtr));
133545 +    if (UNLIKELY(sequence.litLength > 16)) {
133546 +        ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap);
133547 +    }
133548 +    op = oLitEnd;
133549 +    *litPtr = iLitEnd;   /* update for next sequence */
133551 +    /* Copy Match */
133552 +    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
133553 +        /* offset beyond prefix -> go into extDict */
133554 +        RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
133555 +        match = dictEnd + (match - prefixStart);
133556 +        if (match + sequence.matchLength <= dictEnd) {
133557 +            ZSTD_memmove(oLitEnd, match, sequence.matchLength);
133558 +            return sequenceLength;
133559 +        }
133560 +        /* span extDict & currentPrefixSegment */
133561 +        {   size_t const length1 = dictEnd - match;
133562 +            ZSTD_memmove(oLitEnd, match, length1);
133563 +            op = oLitEnd + length1;
133564 +            sequence.matchLength -= length1;
133565 +            match = prefixStart;
133566 +    }   }
133567 +    /* Match within prefix of 1 or more bytes */
133568 +    assert(op <= oMatchEnd);
133569 +    assert(oMatchEnd <= oend_w);
133570 +    assert(match >= prefixStart);
133571 +    assert(sequence.matchLength >= 1);
133573 +    /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
133574 +     * without overlap checking.
133575 +     */
133576 +    if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) {
133577 +        /* We bet on a full wildcopy for matches, since we expect matches to be
133578 +         * longer than literals (in general). In silesia, ~10% of matches are longer
133579 +         * than 16 bytes.
133580 +         */
133581 +        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
133582 +        return sequenceLength;
133583 +    }
133584 +    assert(sequence.offset < WILDCOPY_VECLEN);
133586 +    /* Copy 8 bytes and spread the offset to be >= 8. */
133587 +    ZSTD_overlapCopy8(&op, &match, sequence.offset);
133589 +    /* If the match length is > 8 bytes, then continue with the wildcopy. */
133590 +    if (sequence.matchLength > 8) {
133591 +        assert(op < oMatchEnd);
133592 +        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);
133593 +    }
133594 +    return sequenceLength;
133597 +static void
133598 +ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
133600 +    const void* ptr = dt;
133601 +    const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
133602 +    DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
133603 +    DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
133604 +                (U32)DStatePtr->state, DTableH->tableLog);
133605 +    BIT_reloadDStream(bitD);
133606 +    DStatePtr->table = dt + 1;
133609 +FORCE_INLINE_TEMPLATE void
133610 +ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
133612 +    ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state];
133613 +    U32 const nbBits = DInfo.nbBits;
133614 +    size_t const lowBits = BIT_readBits(bitD, nbBits);
133615 +    DStatePtr->state = DInfo.nextState + lowBits;
133618 +FORCE_INLINE_TEMPLATE void
133619 +ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ZSTD_seqSymbol const DInfo)
133621 +    U32 const nbBits = DInfo.nbBits;
133622 +    size_t const lowBits = BIT_readBits(bitD, nbBits);
133623 +    DStatePtr->state = DInfo.nextState + lowBits;
133626 +/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
133627 + * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
133628 + * bits before reloading. This value is the maximum number of bytes we read
133629 + * after reloading when we are decoding long offsets.
133630 + */
133631 +#define LONG_OFFSETS_MAX_EXTRA_BITS_32                       \
133632 +    (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32       \
133633 +        ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32  \
133634 +        : 0)
133636 +typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
133637 +typedef enum { ZSTD_p_noPrefetch=0, ZSTD_p_prefetch=1 } ZSTD_prefetch_e;
133639 +FORCE_INLINE_TEMPLATE seq_t
133640 +ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const ZSTD_prefetch_e prefetch)
133642 +    seq_t seq;
133643 +    ZSTD_seqSymbol const llDInfo = seqState->stateLL.table[seqState->stateLL.state];
133644 +    ZSTD_seqSymbol const mlDInfo = seqState->stateML.table[seqState->stateML.state];
133645 +    ZSTD_seqSymbol const ofDInfo = seqState->stateOffb.table[seqState->stateOffb.state];
133646 +    U32 const llBase = llDInfo.baseValue;
133647 +    U32 const mlBase = mlDInfo.baseValue;
133648 +    U32 const ofBase = ofDInfo.baseValue;
133649 +    BYTE const llBits = llDInfo.nbAdditionalBits;
133650 +    BYTE const mlBits = mlDInfo.nbAdditionalBits;
133651 +    BYTE const ofBits = ofDInfo.nbAdditionalBits;
133652 +    BYTE const totalBits = llBits+mlBits+ofBits;
133654 +    /* sequence */
133655 +    {   size_t offset;
133656 +        if (ofBits > 1) {
133657 +            ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
133658 +            ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
133659 +            assert(ofBits <= MaxOff);
133660 +            if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
133661 +                U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
133662 +                offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
133663 +                BIT_reloadDStream(&seqState->DStream);
133664 +                if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
133665 +                assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32);   /* to avoid another reload */
133666 +            } else {
133667 +                offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/);   /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
133668 +                if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
133669 +            }
133670 +            seqState->prevOffset[2] = seqState->prevOffset[1];
133671 +            seqState->prevOffset[1] = seqState->prevOffset[0];
133672 +            seqState->prevOffset[0] = offset;
133673 +        } else {
133674 +            U32 const ll0 = (llBase == 0);
133675 +            if (LIKELY((ofBits == 0))) {
133676 +                if (LIKELY(!ll0))
133677 +                    offset = seqState->prevOffset[0];
133678 +                else {
133679 +                    offset = seqState->prevOffset[1];
133680 +                    seqState->prevOffset[1] = seqState->prevOffset[0];
133681 +                    seqState->prevOffset[0] = offset;
133682 +                }
133683 +            } else {
133684 +                offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);
133685 +                {   size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
133686 +                    temp += !temp;   /* 0 is not valid; input is corrupted; force offset to 1 */
133687 +                    if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
133688 +                    seqState->prevOffset[1] = seqState->prevOffset[0];
133689 +                    seqState->prevOffset[0] = offset = temp;
133690 +        }   }   }
133691 +        seq.offset = offset;
133692 +    }
133694 +    seq.matchLength = mlBase;
133695 +    if (mlBits > 0)
133696 +        seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/);
133698 +    if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
133699 +        BIT_reloadDStream(&seqState->DStream);
133700 +    if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
133701 +        BIT_reloadDStream(&seqState->DStream);
133702 +    /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
133703 +    ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
133705 +    seq.litLength = llBase;
133706 +    if (llBits > 0)
133707 +        seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/);
133709 +    if (MEM_32bits())
133710 +        BIT_reloadDStream(&seqState->DStream);
133712 +    DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
133713 +                (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
133715 +    if (prefetch == ZSTD_p_prefetch) {
133716 +        size_t const pos = seqState->pos + seq.litLength;
133717 +        const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
133718 +        seq.match = matchBase + pos - seq.offset;  /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
133719 +                                                    * No consequence though : no memory access will occur, offset is only used for prefetching */
133720 +        seqState->pos = pos + seq.matchLength;
133721 +    }
133723 +    /* ANS state update
133724 +     * gcc-9.0.0 does 2.5% worse with ZSTD_updateFseStateWithDInfo().
133725 +     * clang-9.2.0 does 7% worse with ZSTD_updateFseState().
133726 +     * Naturally it seems like ZSTD_updateFseStateWithDInfo() should be the
133727 +     * better option, so it is the default for other compilers. But, if you
133728 +     * measure that it is worse, please put up a pull request.
133729 +     */
133730 +    {
133731 +#if !defined(__clang__)
133732 +        const int kUseUpdateFseState = 1;
133733 +#else
133734 +        const int kUseUpdateFseState = 0;
133735 +#endif
133736 +        if (kUseUpdateFseState) {
133737 +            ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream);    /* <=  9 bits */
133738 +            ZSTD_updateFseState(&seqState->stateML, &seqState->DStream);    /* <=  9 bits */
133739 +            if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */
133740 +            ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream);  /* <=  8 bits */
133741 +        } else {
133742 +            ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llDInfo);    /* <=  9 bits */
133743 +            ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlDInfo);    /* <=  9 bits */
133744 +            if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */
133745 +            ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofDInfo);  /* <=  8 bits */
133746 +        }
133747 +    }
133749 +    return seq;
133752 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
133753 +MEM_STATIC int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd)
133755 +    size_t const windowSize = dctx->fParams.windowSize;
133756 +    /* No dictionary used. */
133757 +    if (dctx->dictContentEndForFuzzing == NULL) return 0;
133758 +    /* Dictionary is our prefix. */
133759 +    if (prefixStart == dctx->dictContentBeginForFuzzing) return 1;
133760 +    /* Dictionary is not our ext-dict. */
133761 +    if (dctx->dictEnd != dctx->dictContentEndForFuzzing) return 0;
133762 +    /* Dictionary is not within our window size. */
133763 +    if ((size_t)(oLitEnd - prefixStart) >= windowSize) return 0;
133764 +    /* Dictionary is active. */
133765 +    return 1;
133768 +MEM_STATIC void ZSTD_assertValidSequence(
133769 +        ZSTD_DCtx const* dctx,
133770 +        BYTE const* op, BYTE const* oend,
133771 +        seq_t const seq,
133772 +        BYTE const* prefixStart, BYTE const* virtualStart)
133774 +#if DEBUGLEVEL >= 1
133775 +    size_t const windowSize = dctx->fParams.windowSize;
133776 +    size_t const sequenceSize = seq.litLength + seq.matchLength;
133777 +    BYTE const* const oLitEnd = op + seq.litLength;
133778 +    DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u",
133779 +            (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
133780 +    assert(op <= oend);
133781 +    assert((size_t)(oend - op) >= sequenceSize);
133782 +    assert(sequenceSize <= ZSTD_BLOCKSIZE_MAX);
133783 +    if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) {
133784 +        size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing);
133785 +        /* Offset must be within the dictionary. */
133786 +        assert(seq.offset <= (size_t)(oLitEnd - virtualStart));
133787 +        assert(seq.offset <= windowSize + dictSize);
133788 +    } else {
133789 +        /* Offset must be within our window. */
133790 +        assert(seq.offset <= windowSize);
133791 +    }
133792 +#else
133793 +    (void)dctx, (void)op, (void)oend, (void)seq, (void)prefixStart, (void)virtualStart;
133794 +#endif
133796 +#endif
133798 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
133799 +FORCE_INLINE_TEMPLATE size_t
133800 +DONT_VECTORIZE
133801 +ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
133802 +                               void* dst, size_t maxDstSize,
133803 +                         const void* seqStart, size_t seqSize, int nbSeq,
133804 +                         const ZSTD_longOffset_e isLongOffset,
133805 +                         const int frame)
133807 +    const BYTE* ip = (const BYTE*)seqStart;
133808 +    const BYTE* const iend = ip + seqSize;
133809 +    BYTE* const ostart = (BYTE*)dst;
133810 +    BYTE* const oend = ostart + maxDstSize;
133811 +    BYTE* op = ostart;
133812 +    const BYTE* litPtr = dctx->litPtr;
133813 +    const BYTE* const litEnd = litPtr + dctx->litSize;
133814 +    const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
133815 +    const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
133816 +    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
133817 +    DEBUGLOG(5, "ZSTD_decompressSequences_body");
133818 +    (void)frame;
133820 +    /* Regen sequences */
133821 +    if (nbSeq) {
133822 +        seqState_t seqState;
133823 +        size_t error = 0;
133824 +        dctx->fseEntropy = 1;
133825 +        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
133826 +        RETURN_ERROR_IF(
133827 +            ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
133828 +            corruption_detected, "");
133829 +        ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
133830 +        ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
133831 +        ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
133832 +        assert(dst != NULL);
133834 +        ZSTD_STATIC_ASSERT(
133835 +                BIT_DStream_unfinished < BIT_DStream_completed &&
133836 +                BIT_DStream_endOfBuffer < BIT_DStream_completed &&
133837 +                BIT_DStream_completed < BIT_DStream_overflow);
133839 +#if defined(__x86_64__)
133840 +        /* Align the decompression loop to 32 + 16 bytes.
133841 +         *
133842 +         * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression
133843 +         * speed swings based on the alignment of the decompression loop. This
133844 +         * performance swing is caused by parts of the decompression loop falling
133845 +         * out of the DSB. The entire decompression loop should fit in the DSB,
133846 +         * when it can't we get much worse performance. You can measure if you've
133847 +         * hit the good case or the bad case with this perf command for some
133848 +         * compressed file test.zst:
133849 +         *
133850 +         *   perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \
133851 +         *             -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst
133852 +         *
133853 +         * If you see most cycles served out of the MITE you've hit the bad case.
133854 +         * If you see most cycles served out of the DSB you've hit the good case.
133855 +         * If it is pretty even then you may be in an okay case.
133856 +         *
133857 +         * I've been able to reproduce this issue on the following CPUs:
133858 +         *   - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9
133859 +         *               Use Instruments->Counters to get DSB/MITE cycles.
133860 +         *               I never got performance swings, but I was able to
133861 +         *               go from the good case of mostly DSB to half of the
133862 +         *               cycles served from MITE.
133863 +         *   - Coffeelake: Intel i9-9900k
133864 +         *
133865 +         * I haven't been able to reproduce the instability or DSB misses on any
133866 +         * of the following CPUS:
133867 +         *   - Haswell
133868 +         *   - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH
133869 +         *   - Skylake
133870 +         *
133871 +         * If you are seeing performance stability this script can help test.
133872 +         * It tests on 4 commits in zstd where I saw performance change.
133873 +         *
133874 +         *   https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4
133875 +         */
133876 +        __asm__(".p2align 5");
133877 +        __asm__("nop");
133878 +        __asm__(".p2align 4");
133879 +#endif
133880 +        for ( ; ; ) {
133881 +            seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_noPrefetch);
133882 +            size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
133883 +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
133884 +            assert(!ZSTD_isError(oneSeqSize));
133885 +            if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
133886 +#endif
133887 +            DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
133888 +            BIT_reloadDStream(&(seqState.DStream));
133889 +            op += oneSeqSize;
133890 +            /* gcc and clang both don't like early returns in this loop.
133891 +             * Instead break and check for an error at the end of the loop.
133892 +             */
133893 +            if (UNLIKELY(ZSTD_isError(oneSeqSize))) {
133894 +                error = oneSeqSize;
133895 +                break;
133896 +            }
133897 +            if (UNLIKELY(!--nbSeq)) break;
133898 +        }
133900 +        /* check if reached exact end */
133901 +        DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
133902 +        if (ZSTD_isError(error)) return error;
133903 +        RETURN_ERROR_IF(nbSeq, corruption_detected, "");
133904 +        RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
133905 +        /* save reps for next block */
133906 +        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
133907 +    }
133909 +    /* last literal segment */
133910 +    {   size_t const lastLLSize = litEnd - litPtr;
133911 +        RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
133912 +        if (op != NULL) {
133913 +            ZSTD_memcpy(op, litPtr, lastLLSize);
133914 +            op += lastLLSize;
133915 +        }
133916 +    }
133918 +    return op-ostart;
133921 +static size_t
133922 +ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
133923 +                                 void* dst, size_t maxDstSize,
133924 +                           const void* seqStart, size_t seqSize, int nbSeq,
133925 +                           const ZSTD_longOffset_e isLongOffset,
133926 +                           const int frame)
133928 +    return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
133930 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
133932 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
133933 +FORCE_INLINE_TEMPLATE size_t
133934 +ZSTD_decompressSequencesLong_body(
133935 +                               ZSTD_DCtx* dctx,
133936 +                               void* dst, size_t maxDstSize,
133937 +                         const void* seqStart, size_t seqSize, int nbSeq,
133938 +                         const ZSTD_longOffset_e isLongOffset,
133939 +                         const int frame)
133941 +    const BYTE* ip = (const BYTE*)seqStart;
133942 +    const BYTE* const iend = ip + seqSize;
133943 +    BYTE* const ostart = (BYTE*)dst;
133944 +    BYTE* const oend = ostart + maxDstSize;
133945 +    BYTE* op = ostart;
133946 +    const BYTE* litPtr = dctx->litPtr;
133947 +    const BYTE* const litEnd = litPtr + dctx->litSize;
133948 +    const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
133949 +    const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
133950 +    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
133951 +    (void)frame;
133953 +    /* Regen sequences */
133954 +    if (nbSeq) {
133955 +#define STORED_SEQS 4
133956 +#define STORED_SEQS_MASK (STORED_SEQS-1)
133957 +#define ADVANCED_SEQS 4
133958 +        seq_t sequences[STORED_SEQS];
133959 +        int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
133960 +        seqState_t seqState;
133961 +        int seqNb;
133962 +        dctx->fseEntropy = 1;
133963 +        { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
133964 +        seqState.prefixStart = prefixStart;
133965 +        seqState.pos = (size_t)(op-prefixStart);
133966 +        seqState.dictEnd = dictEnd;
133967 +        assert(dst != NULL);
133968 +        assert(iend >= ip);
133969 +        RETURN_ERROR_IF(
133970 +            ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
133971 +            corruption_detected, "");
133972 +        ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
133973 +        ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
133974 +        ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
133976 +        /* prepare in advance */
133977 +        for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
133978 +            sequences[seqNb] = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);
133979 +            PREFETCH_L1(sequences[seqNb].match); PREFETCH_L1(sequences[seqNb].match + sequences[seqNb].matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
133980 +        }
133981 +        RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected, "");
133983 +        /* decode and decompress */
133984 +        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
133985 +            seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);
133986 +            size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
133987 +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
133988 +            assert(!ZSTD_isError(oneSeqSize));
133989 +            if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
133990 +#endif
133991 +            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
133992 +            PREFETCH_L1(sequence.match); PREFETCH_L1(sequence.match + sequence.matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
133993 +            sequences[seqNb & STORED_SEQS_MASK] = sequence;
133994 +            op += oneSeqSize;
133995 +        }
133996 +        RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected, "");
133998 +        /* finish queue */
133999 +        seqNb -= seqAdvance;
134000 +        for ( ; seqNb<nbSeq ; seqNb++) {
134001 +            size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
134002 +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
134003 +            assert(!ZSTD_isError(oneSeqSize));
134004 +            if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
134005 +#endif
134006 +            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
134007 +            op += oneSeqSize;
134008 +        }
134010 +        /* save reps for next block */
134011 +        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
134012 +    }
134014 +    /* last literal segment */
134015 +    {   size_t const lastLLSize = litEnd - litPtr;
134016 +        RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
134017 +        if (op != NULL) {
134018 +            ZSTD_memcpy(op, litPtr, lastLLSize);
134019 +            op += lastLLSize;
134020 +        }
134021 +    }
134023 +    return op-ostart;
134026 +static size_t
134027 +ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
134028 +                                 void* dst, size_t maxDstSize,
134029 +                           const void* seqStart, size_t seqSize, int nbSeq,
134030 +                           const ZSTD_longOffset_e isLongOffset,
134031 +                           const int frame)
134033 +    return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
134035 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
134039 +#if DYNAMIC_BMI2
134041 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
134042 +static TARGET_ATTRIBUTE("bmi2") size_t
134043 +DONT_VECTORIZE
134044 +ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
134045 +                                 void* dst, size_t maxDstSize,
134046 +                           const void* seqStart, size_t seqSize, int nbSeq,
134047 +                           const ZSTD_longOffset_e isLongOffset,
134048 +                           const int frame)
134050 +    return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
134052 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
134054 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
134055 +static TARGET_ATTRIBUTE("bmi2") size_t
134056 +ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
134057 +                                 void* dst, size_t maxDstSize,
134058 +                           const void* seqStart, size_t seqSize, int nbSeq,
134059 +                           const ZSTD_longOffset_e isLongOffset,
134060 +                           const int frame)
134062 +    return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
134064 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
134066 +#endif /* DYNAMIC_BMI2 */
134068 +typedef size_t (*ZSTD_decompressSequences_t)(
134069 +                            ZSTD_DCtx* dctx,
134070 +                            void* dst, size_t maxDstSize,
134071 +                            const void* seqStart, size_t seqSize, int nbSeq,
134072 +                            const ZSTD_longOffset_e isLongOffset,
134073 +                            const int frame);
134075 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
134076 +static size_t
134077 +ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
134078 +                   const void* seqStart, size_t seqSize, int nbSeq,
134079 +                   const ZSTD_longOffset_e isLongOffset,
134080 +                   const int frame)
134082 +    DEBUGLOG(5, "ZSTD_decompressSequences");
134083 +#if DYNAMIC_BMI2
134084 +    if (dctx->bmi2) {
134085 +        return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
134086 +    }
134087 +#endif
134088 +  return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
134090 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
134093 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
134094 +/* ZSTD_decompressSequencesLong() :
134095 + * decompression function triggered when a minimum share of offsets is considered "long",
134096 + * aka out of cache.
134097 + * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance".
134098 + * This function will try to mitigate main memory latency through the use of prefetching */
134099 +static size_t
134100 +ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
134101 +                             void* dst, size_t maxDstSize,
134102 +                             const void* seqStart, size_t seqSize, int nbSeq,
134103 +                             const ZSTD_longOffset_e isLongOffset,
134104 +                             const int frame)
134106 +    DEBUGLOG(5, "ZSTD_decompressSequencesLong");
134107 +#if DYNAMIC_BMI2
134108 +    if (dctx->bmi2) {
134109 +        return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
134110 +    }
134111 +#endif
134112 +  return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
134114 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
134118 +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
134119 +    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
134120 +/* ZSTD_getLongOffsetsShare() :
134121 + * condition : offTable must be valid
134122 + * @return : "share" of long offsets (arbitrarily defined as > (1<<23))
134123 + *           compared to maximum possible of (1<<OffFSELog) */
134124 +static unsigned
134125 +ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
134127 +    const void* ptr = offTable;
134128 +    U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
134129 +    const ZSTD_seqSymbol* table = offTable + 1;
134130 +    U32 const max = 1 << tableLog;
134131 +    U32 u, total = 0;
134132 +    DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
134134 +    assert(max <= (1 << OffFSELog));  /* max not too large */
134135 +    for (u=0; u<max; u++) {
134136 +        if (table[u].nbAdditionalBits > 22) total += 1;
134137 +    }
134139 +    assert(tableLog <= OffFSELog);
134140 +    total <<= (OffFSELog - tableLog);  /* scale to OffFSELog */
134142 +    return total;
134144 +#endif
134146 +size_t
134147 +ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
134148 +                              void* dst, size_t dstCapacity,
134149 +                        const void* src, size_t srcSize, const int frame)
134150 +{   /* blockType == blockCompressed */
134151 +    const BYTE* ip = (const BYTE*)src;
134152 +    /* isLongOffset must be true if there are long offsets.
134153 +     * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
134154 +     * We don't expect that to be the case in 64-bit mode.
134155 +     * In block mode, window size is not known, so we have to be conservative.
134156 +     * (note: but it could be evaluated from current-lowLimit)
134157 +     */
134158 +    ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
134159 +    DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
134161 +    RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, "");
134163 +    /* Decode literals section */
134164 +    {   size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
134165 +        DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
134166 +        if (ZSTD_isError(litCSize)) return litCSize;
134167 +        ip += litCSize;
134168 +        srcSize -= litCSize;
134169 +    }
134171 +    /* Build Decoding Tables */
134172 +    {
134173 +        /* These macros control at build-time which decompressor implementation
134174 +         * we use. If neither is defined, we do some inspection and dispatch at
134175 +         * runtime.
134176 +         */
134177 +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
134178 +    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
134179 +        int usePrefetchDecoder = dctx->ddictIsCold;
134180 +#endif
134181 +        int nbSeq;
134182 +        size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
134183 +        if (ZSTD_isError(seqHSize)) return seqHSize;
134184 +        ip += seqHSize;
134185 +        srcSize -= seqHSize;
134187 +        RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled");
134189 +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
134190 +    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
134191 +        if ( !usePrefetchDecoder
134192 +          && (!frame || (dctx->fParams.windowSize > (1<<24)))
134193 +          && (nbSeq>ADVANCED_SEQS) ) {  /* could probably use a larger nbSeq limit */
134194 +            U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
134195 +            U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
134196 +            usePrefetchDecoder = (shareLongOffsets >= minShare);
134197 +        }
134198 +#endif
134200 +        dctx->ddictIsCold = 0;
134202 +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
134203 +    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
134204 +        if (usePrefetchDecoder)
134205 +#endif
134206 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
134207 +            return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
134208 +#endif
134210 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
134211 +        /* else */
134212 +        return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
134213 +#endif
134214 +    }
134218 +void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize)
134220 +    if (dst != dctx->previousDstEnd && dstSize > 0) {   /* not contiguous */
134221 +        dctx->dictEnd = dctx->previousDstEnd;
134222 +        dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
134223 +        dctx->prefixStart = dst;
134224 +        dctx->previousDstEnd = dst;
134225 +    }
134229 +size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
134230 +                            void* dst, size_t dstCapacity,
134231 +                      const void* src, size_t srcSize)
134233 +    size_t dSize;
134234 +    ZSTD_checkContinuity(dctx, dst, dstCapacity);
134235 +    dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
134236 +    dctx->previousDstEnd = (char*)dst + dSize;
134237 +    return dSize;
134239 diff --git a/lib/zstd/decompress/zstd_decompress_block.h b/lib/zstd/decompress/zstd_decompress_block.h
134240 new file mode 100644
134241 index 000000000000..e7f5f6689459
134242 --- /dev/null
134243 +++ b/lib/zstd/decompress/zstd_decompress_block.h
134244 @@ -0,0 +1,62 @@
134246 + * Copyright (c) Yann Collet, Facebook, Inc.
134247 + * All rights reserved.
134249 + * This source code is licensed under both the BSD-style license (found in the
134250 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
134251 + * in the COPYING file in the root directory of this source tree).
134252 + * You may select, at your option, one of the above-listed licenses.
134253 + */
134256 +#ifndef ZSTD_DEC_BLOCK_H
134257 +#define ZSTD_DEC_BLOCK_H
134259 +/*-*******************************************************
134260 + *  Dependencies
134261 + *********************************************************/
134262 +#include "../common/zstd_deps.h"   /* size_t */
134263 +#include <linux/zstd.h>    /* DCtx, and some public functions */
134264 +#include "../common/zstd_internal.h"  /* blockProperties_t, and some public functions */
134265 +#include "zstd_decompress_internal.h"  /* ZSTD_seqSymbol */
134268 +/* ===   Prototypes   === */
134270 +/* note: prototypes already published within `zstd.h` :
134271 + * ZSTD_decompressBlock()
134272 + */
134274 +/* note: prototypes already published within `zstd_internal.h` :
134275 + * ZSTD_getcBlockSize()
134276 + * ZSTD_decodeSeqHeaders()
134277 + */
134280 +/* ZSTD_decompressBlock_internal() :
134281 + * decompress block, starting at `src`,
134282 + * into destination buffer `dst`.
134283 + * @return : decompressed block size,
134284 + *           or an error code (which can be tested using ZSTD_isError())
134285 + */
134286 +size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
134287 +                               void* dst, size_t dstCapacity,
134288 +                         const void* src, size_t srcSize, const int frame);
134290 +/* ZSTD_buildFSETable() :
134291 + * generate FSE decoding table for one symbol (ll, ml or off)
134292 + * this function must be called with valid parameters only
134293 + * (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.)
134294 + * in which case it cannot fail.
134295 + * The workspace must be 4-byte aligned and at least ZSTD_BUILD_FSE_TABLE_WKSP_SIZE bytes, which is
134296 + * defined in zstd_decompress_internal.h.
134297 + * Internal use only.
134298 + */
134299 +void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
134300 +             const short* normalizedCounter, unsigned maxSymbolValue,
134301 +             const U32* baseValue, const U32* nbAdditionalBits,
134302 +                   unsigned tableLog, void* wksp, size_t wkspSize,
134303 +                   int bmi2);
134306 +#endif /* ZSTD_DEC_BLOCK_H */
134307 diff --git a/lib/zstd/decompress/zstd_decompress_internal.h b/lib/zstd/decompress/zstd_decompress_internal.h
134308 new file mode 100644
134309 index 000000000000..4b9052f68755
134310 --- /dev/null
134311 +++ b/lib/zstd/decompress/zstd_decompress_internal.h
134312 @@ -0,0 +1,202 @@
134314 + * Copyright (c) Yann Collet, Facebook, Inc.
134315 + * All rights reserved.
134317 + * This source code is licensed under both the BSD-style license (found in the
134318 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
134319 + * in the COPYING file in the root directory of this source tree).
134320 + * You may select, at your option, one of the above-listed licenses.
134321 + */
134324 +/* zstd_decompress_internal:
134325 + * objects and definitions shared within lib/decompress modules */
134327 + #ifndef ZSTD_DECOMPRESS_INTERNAL_H
134328 + #define ZSTD_DECOMPRESS_INTERNAL_H
134331 +/*-*******************************************************
134332 + *  Dependencies
134333 + *********************************************************/
134334 +#include "../common/mem.h"             /* BYTE, U16, U32 */
134335 +#include "../common/zstd_internal.h"   /* ZSTD_seqSymbol */
134339 +/*-*******************************************************
134340 + *  Constants
134341 + *********************************************************/
134342 +static UNUSED_ATTR const U32 LL_base[MaxLL+1] = {
134343 +                 0,    1,    2,     3,     4,     5,     6,      7,
134344 +                 8,    9,   10,    11,    12,    13,    14,     15,
134345 +                16,   18,   20,    22,    24,    28,    32,     40,
134346 +                48,   64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
134347 +                0x2000, 0x4000, 0x8000, 0x10000 };
134349 +static UNUSED_ATTR const U32 OF_base[MaxOff+1] = {
134350 +                 0,        1,       1,       5,     0xD,     0x1D,     0x3D,     0x7D,
134351 +                 0xFD,   0x1FD,   0x3FD,   0x7FD,   0xFFD,   0x1FFD,   0x3FFD,   0x7FFD,
134352 +                 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
134353 +                 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
134355 +static UNUSED_ATTR const U32 OF_bits[MaxOff+1] = {
134356 +                     0,  1,  2,  3,  4,  5,  6,  7,
134357 +                     8,  9, 10, 11, 12, 13, 14, 15,
134358 +                    16, 17, 18, 19, 20, 21, 22, 23,
134359 +                    24, 25, 26, 27, 28, 29, 30, 31 };
134361 +static UNUSED_ATTR const U32 ML_base[MaxML+1] = {
134362 +                     3,  4,  5,    6,     7,     8,     9,    10,
134363 +                    11, 12, 13,   14,    15,    16,    17,    18,
134364 +                    19, 20, 21,   22,    23,    24,    25,    26,
134365 +                    27, 28, 29,   30,    31,    32,    33,    34,
134366 +                    35, 37, 39,   41,    43,    47,    51,    59,
134367 +                    67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
134368 +                    0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
134371 +/*-*******************************************************
134372 + *  Decompression types
134373 + *********************************************************/
134374 + typedef struct {
134375 +     U32 fastMode;
134376 +     U32 tableLog;
134377 + } ZSTD_seqSymbol_header;
134379 + typedef struct {
134380 +     U16  nextState;
134381 +     BYTE nbAdditionalBits;
134382 +     BYTE nbBits;
134383 +     U32  baseValue;
134384 + } ZSTD_seqSymbol;
134386 + #define SEQSYMBOL_TABLE_SIZE(log)   (1 + (1 << (log)))
134388 +#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE (sizeof(S16) * (MaxSeq + 1) + (1u << MaxFSELog) + sizeof(U64))
134389 +#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32 ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32))
134391 +typedef struct {
134392 +    ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)];    /* Note : Space reserved for FSE Tables */
134393 +    ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)];   /* is also used as temporary workspace while building hufTable during DDict creation */
134394 +    ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)];    /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
134395 +    HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)];  /* can accommodate HUF_decompress4X */
134396 +    U32 rep[ZSTD_REP_NUM];
134397 +    U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32];
134398 +} ZSTD_entropyDTables_t;
134400 +typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
134401 +               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
134402 +               ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,
134403 +               ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;
134405 +typedef enum { zdss_init=0, zdss_loadHeader,
134406 +               zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
134408 +typedef enum {
134409 +    ZSTD_use_indefinitely = -1,  /* Use the dictionary indefinitely */
134410 +    ZSTD_dont_use = 0,           /* Do not use the dictionary (if one exists free it) */
134411 +    ZSTD_use_once = 1            /* Use the dictionary once and set to ZSTD_dont_use */
134412 +} ZSTD_dictUses_e;
134414 +/* Hashset for storing references to multiple ZSTD_DDict within ZSTD_DCtx */
134415 +typedef struct {
134416 +    const ZSTD_DDict** ddictPtrTable;
134417 +    size_t ddictPtrTableSize;
134418 +    size_t ddictPtrCount;
134419 +} ZSTD_DDictHashSet;
134421 +struct ZSTD_DCtx_s
134423 +    const ZSTD_seqSymbol* LLTptr;
134424 +    const ZSTD_seqSymbol* MLTptr;
134425 +    const ZSTD_seqSymbol* OFTptr;
134426 +    const HUF_DTable* HUFptr;
134427 +    ZSTD_entropyDTables_t entropy;
134428 +    U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];   /* space needed when building huffman tables */
134429 +    const void* previousDstEnd;   /* detect continuity */
134430 +    const void* prefixStart;      /* start of current segment */
134431 +    const void* virtualStart;     /* virtual start of previous segment if it was just before current one */
134432 +    const void* dictEnd;          /* end of previous segment */
134433 +    size_t expected;
134434 +    ZSTD_frameHeader fParams;
134435 +    U64 processedCSize;
134436 +    U64 decodedSize;
134437 +    blockType_e bType;            /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
134438 +    ZSTD_dStage stage;
134439 +    U32 litEntropy;
134440 +    U32 fseEntropy;
134441 +    struct xxh64_state xxhState;
134442 +    size_t headerSize;
134443 +    ZSTD_format_e format;
134444 +    ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum;   /* User specified: if == 1, will ignore checksums in compressed frame. Default == 0 */
134445 +    U32 validateChecksum;         /* if == 1, will validate checksum. Is == 1 if (fParams.checksumFlag == 1) and (forceIgnoreChecksum == 0). */
134446 +    const BYTE* litPtr;
134447 +    ZSTD_customMem customMem;
134448 +    size_t litSize;
134449 +    size_t rleSize;
134450 +    size_t staticSize;
134451 +    int bmi2;                     /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
134453 +    /* dictionary */
134454 +    ZSTD_DDict* ddictLocal;
134455 +    const ZSTD_DDict* ddict;     /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
134456 +    U32 dictID;
134457 +    int ddictIsCold;             /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
134458 +    ZSTD_dictUses_e dictUses;
134459 +    ZSTD_DDictHashSet* ddictSet;                    /* Hash set for multiple ddicts */
134460 +    ZSTD_refMultipleDDicts_e refMultipleDDicts;     /* User specified: if == 1, will allow references to multiple DDicts. Default == 0 (disabled) */
134462 +    /* streaming */
134463 +    ZSTD_dStreamStage streamStage;
134464 +    char*  inBuff;
134465 +    size_t inBuffSize;
134466 +    size_t inPos;
134467 +    size_t maxWindowSize;
134468 +    char*  outBuff;
134469 +    size_t outBuffSize;
134470 +    size_t outStart;
134471 +    size_t outEnd;
134472 +    size_t lhSize;
134473 +    void* legacyContext;
134474 +    U32 previousLegacyVersion;
134475 +    U32 legacyVersion;
134476 +    U32 hostageByte;
134477 +    int noForwardProgress;
134478 +    ZSTD_bufferMode_e outBufferMode;
134479 +    ZSTD_outBuffer expectedOutBuffer;
134481 +    /* workspace */
134482 +    BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
134483 +    BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
134485 +    size_t oversizedDuration;
134487 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
134488 +    void const* dictContentBeginForFuzzing;
134489 +    void const* dictContentEndForFuzzing;
134490 +#endif
134492 +    /* Tracing */
134493 +};  /* typedef'd to ZSTD_DCtx within "zstd.h" */
134496 +/*-*******************************************************
134497 + *  Shared internal functions
134498 + *********************************************************/
134500 +/*! ZSTD_loadDEntropy() :
134501 + *  dict : must point at beginning of a valid zstd dictionary.
134502 + * @return : size of dictionary header (size of magic number + dict ID + entropy tables) */
134503 +size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
134504 +                   const void* const dict, size_t const dictSize);
134506 +/*! ZSTD_checkContinuity() :
134507 + *  check if next `dst` follows previous position, where decompression ended.
134508 + *  If yes, do nothing (continue on current segment).
134509 + *  If not, classify previous segment as "external dictionary", and start a new segment.
134510 + *  This function cannot fail. */
134511 +void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize);
134514 +#endif /* ZSTD_DECOMPRESS_INTERNAL_H */
134515 diff --git a/lib/zstd/decompress_sources.h b/lib/zstd/decompress_sources.h
134516 new file mode 100644
134517 index 000000000000..f35bef03eb22
134518 --- /dev/null
134519 +++ b/lib/zstd/decompress_sources.h
134520 @@ -0,0 +1,28 @@
134521 +/* SPDX-License-Identifier: GPL-2.0-only */
134523 + * Copyright (c) Facebook, Inc.
134524 + * All rights reserved.
134526 + * This source code is licensed under both the BSD-style license (found in the
134527 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
134528 + * in the COPYING file in the root directory of this source tree).
134529 + * You may select, at your option, one of the above-listed licenses.
134530 + */
134533 + * This file includes every .c file needed for decompression.
134534 + * It is used by lib/decompress_unzstd.c to include the decompression
134535 + * source into the translation-unit, so it can be used for kernel
134536 + * decompression.
134537 + */
134539 +#include "common/debug.c"
134540 +#include "common/entropy_common.c"
134541 +#include "common/error_private.c"
134542 +#include "common/fse_decompress.c"
134543 +#include "common/zstd_common.c"
134544 +#include "decompress/huf_decompress.c"
134545 +#include "decompress/zstd_ddict.c"
134546 +#include "decompress/zstd_decompress.c"
134547 +#include "decompress/zstd_decompress_block.c"
134548 +#include "zstd_decompress_module.c"
134549 diff --git a/lib/zstd/entropy_common.c b/lib/zstd/entropy_common.c
134550 deleted file mode 100644
134551 index 2b0a643c32c4..000000000000
134552 --- a/lib/zstd/entropy_common.c
134553 +++ /dev/null
134554 @@ -1,243 +0,0 @@
134556 - * Common functions of New Generation Entropy library
134557 - * Copyright (C) 2016, Yann Collet.
134559 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
134561 - * Redistribution and use in source and binary forms, with or without
134562 - * modification, are permitted provided that the following conditions are
134563 - * met:
134565 - *   * Redistributions of source code must retain the above copyright
134566 - * notice, this list of conditions and the following disclaimer.
134567 - *   * Redistributions in binary form must reproduce the above
134568 - * copyright notice, this list of conditions and the following disclaimer
134569 - * in the documentation and/or other materials provided with the
134570 - * distribution.
134572 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
134573 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
134574 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
134575 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
134576 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
134577 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
134578 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
134579 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
134580 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
134581 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
134582 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
134584 - * This program is free software; you can redistribute it and/or modify it under
134585 - * the terms of the GNU General Public License version 2 as published by the
134586 - * Free Software Foundation. This program is dual-licensed; you may select
134587 - * either version 2 of the GNU General Public License ("GPL") or BSD license
134588 - * ("BSD").
134590 - * You can contact the author at :
134591 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
134592 - */
134594 -/* *************************************
134595 -*  Dependencies
134596 -***************************************/
134597 -#include "error_private.h" /* ERR_*, ERROR */
134598 -#include "fse.h"
134599 -#include "huf.h"
134600 -#include "mem.h"
134602 -/*===   Version   ===*/
134603 -unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
134605 -/*===   Error Management   ===*/
134606 -unsigned FSE_isError(size_t code) { return ERR_isError(code); }
134608 -unsigned HUF_isError(size_t code) { return ERR_isError(code); }
134610 -/*-**************************************************************
134611 -*  FSE NCount encoding-decoding
134612 -****************************************************************/
134613 -size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSVPtr, unsigned *tableLogPtr, const void *headerBuffer, size_t hbSize)
134615 -       const BYTE *const istart = (const BYTE *)headerBuffer;
134616 -       const BYTE *const iend = istart + hbSize;
134617 -       const BYTE *ip = istart;
134618 -       int nbBits;
134619 -       int remaining;
134620 -       int threshold;
134621 -       U32 bitStream;
134622 -       int bitCount;
134623 -       unsigned charnum = 0;
134624 -       int previous0 = 0;
134626 -       if (hbSize < 4)
134627 -               return ERROR(srcSize_wrong);
134628 -       bitStream = ZSTD_readLE32(ip);
134629 -       nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
134630 -       if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX)
134631 -               return ERROR(tableLog_tooLarge);
134632 -       bitStream >>= 4;
134633 -       bitCount = 4;
134634 -       *tableLogPtr = nbBits;
134635 -       remaining = (1 << nbBits) + 1;
134636 -       threshold = 1 << nbBits;
134637 -       nbBits++;
134639 -       while ((remaining > 1) & (charnum <= *maxSVPtr)) {
134640 -               if (previous0) {
134641 -                       unsigned n0 = charnum;
134642 -                       while ((bitStream & 0xFFFF) == 0xFFFF) {
134643 -                               n0 += 24;
134644 -                               if (ip < iend - 5) {
134645 -                                       ip += 2;
134646 -                                       bitStream = ZSTD_readLE32(ip) >> bitCount;
134647 -                               } else {
134648 -                                       bitStream >>= 16;
134649 -                                       bitCount += 16;
134650 -                               }
134651 -                       }
134652 -                       while ((bitStream & 3) == 3) {
134653 -                               n0 += 3;
134654 -                               bitStream >>= 2;
134655 -                               bitCount += 2;
134656 -                       }
134657 -                       n0 += bitStream & 3;
134658 -                       bitCount += 2;
134659 -                       if (n0 > *maxSVPtr)
134660 -                               return ERROR(maxSymbolValue_tooSmall);
134661 -                       while (charnum < n0)
134662 -                               normalizedCounter[charnum++] = 0;
134663 -                       if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) {
134664 -                               ip += bitCount >> 3;
134665 -                               bitCount &= 7;
134666 -                               bitStream = ZSTD_readLE32(ip) >> bitCount;
134667 -                       } else {
134668 -                               bitStream >>= 2;
134669 -                       }
134670 -               }
134671 -               {
134672 -                       int const max = (2 * threshold - 1) - remaining;
134673 -                       int count;
134675 -                       if ((bitStream & (threshold - 1)) < (U32)max) {
134676 -                               count = bitStream & (threshold - 1);
134677 -                               bitCount += nbBits - 1;
134678 -                       } else {
134679 -                               count = bitStream & (2 * threshold - 1);
134680 -                               if (count >= threshold)
134681 -                                       count -= max;
134682 -                               bitCount += nbBits;
134683 -                       }
134685 -                       count--;                                 /* extra accuracy */
134686 -                       remaining -= count < 0 ? -count : count; /* -1 means +1 */
134687 -                       normalizedCounter[charnum++] = (short)count;
134688 -                       previous0 = !count;
134689 -                       while (remaining < threshold) {
134690 -                               nbBits--;
134691 -                               threshold >>= 1;
134692 -                       }
134694 -                       if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) {
134695 -                               ip += bitCount >> 3;
134696 -                               bitCount &= 7;
134697 -                       } else {
134698 -                               bitCount -= (int)(8 * (iend - 4 - ip));
134699 -                               ip = iend - 4;
134700 -                       }
134701 -                       bitStream = ZSTD_readLE32(ip) >> (bitCount & 31);
134702 -               }
134703 -       } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */
134704 -       if (remaining != 1)
134705 -               return ERROR(corruption_detected);
134706 -       if (bitCount > 32)
134707 -               return ERROR(corruption_detected);
134708 -       *maxSVPtr = charnum - 1;
134710 -       ip += (bitCount + 7) >> 3;
134711 -       return ip - istart;
134714 -/*! HUF_readStats() :
134715 -       Read compact Huffman tree, saved by HUF_writeCTable().
134716 -       `huffWeight` is destination buffer.
134717 -       `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
134718 -       @return : size read from `src` , or an error Code .
134719 -       Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
134721 -size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
134723 -       U32 weightTotal;
134724 -       const BYTE *ip = (const BYTE *)src;
134725 -       size_t iSize;
134726 -       size_t oSize;
134728 -       if (!srcSize)
134729 -               return ERROR(srcSize_wrong);
134730 -       iSize = ip[0];
134731 -       /* memset(huffWeight, 0, hwSize);   */ /* is not necessary, even though some analyzer complain ... */
134733 -       if (iSize >= 128) { /* special header */
134734 -               oSize = iSize - 127;
134735 -               iSize = ((oSize + 1) / 2);
134736 -               if (iSize + 1 > srcSize)
134737 -                       return ERROR(srcSize_wrong);
134738 -               if (oSize >= hwSize)
134739 -                       return ERROR(corruption_detected);
134740 -               ip += 1;
134741 -               {
134742 -                       U32 n;
134743 -                       for (n = 0; n < oSize; n += 2) {
134744 -                               huffWeight[n] = ip[n / 2] >> 4;
134745 -                               huffWeight[n + 1] = ip[n / 2] & 15;
134746 -                       }
134747 -               }
134748 -       } else {                                                 /* header compressed with FSE (normal case) */
134749 -               if (iSize + 1 > srcSize)
134750 -                       return ERROR(srcSize_wrong);
134751 -               oSize = FSE_decompress_wksp(huffWeight, hwSize - 1, ip + 1, iSize, 6, workspace, workspaceSize); /* max (hwSize-1) values decoded, as last one is implied */
134752 -               if (FSE_isError(oSize))
134753 -                       return oSize;
134754 -       }
134756 -       /* collect weight stats */
134757 -       memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
134758 -       weightTotal = 0;
134759 -       {
134760 -               U32 n;
134761 -               for (n = 0; n < oSize; n++) {
134762 -                       if (huffWeight[n] >= HUF_TABLELOG_MAX)
134763 -                               return ERROR(corruption_detected);
134764 -                       rankStats[huffWeight[n]]++;
134765 -                       weightTotal += (1 << huffWeight[n]) >> 1;
134766 -               }
134767 -       }
134768 -       if (weightTotal == 0)
134769 -               return ERROR(corruption_detected);
134771 -       /* get last non-null symbol weight (implied, total must be 2^n) */
134772 -       {
134773 -               U32 const tableLog = BIT_highbit32(weightTotal) + 1;
134774 -               if (tableLog > HUF_TABLELOG_MAX)
134775 -                       return ERROR(corruption_detected);
134776 -               *tableLogPtr = tableLog;
134777 -               /* determine last weight */
134778 -               {
134779 -                       U32 const total = 1 << tableLog;
134780 -                       U32 const rest = total - weightTotal;
134781 -                       U32 const verif = 1 << BIT_highbit32(rest);
134782 -                       U32 const lastWeight = BIT_highbit32(rest) + 1;
134783 -                       if (verif != rest)
134784 -                               return ERROR(corruption_detected); /* last value must be a clean power of 2 */
134785 -                       huffWeight[oSize] = (BYTE)lastWeight;
134786 -                       rankStats[lastWeight]++;
134787 -               }
134788 -       }
134790 -       /* check tree construction validity */
134791 -       if ((rankStats[1] < 2) || (rankStats[1] & 1))
134792 -               return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
134794 -       /* results */
134795 -       *nbSymbolsPtr = (U32)(oSize + 1);
134796 -       return iSize + 1;
134798 diff --git a/lib/zstd/error_private.h b/lib/zstd/error_private.h
134799 deleted file mode 100644
134800 index 1a60b31f706c..000000000000
134801 --- a/lib/zstd/error_private.h
134802 +++ /dev/null
134803 @@ -1,53 +0,0 @@
134805 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
134806 - * All rights reserved.
134808 - * This source code is licensed under the BSD-style license found in the
134809 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
134810 - * An additional grant of patent rights can be found in the PATENTS file in the
134811 - * same directory.
134813 - * This program is free software; you can redistribute it and/or modify it under
134814 - * the terms of the GNU General Public License version 2 as published by the
134815 - * Free Software Foundation. This program is dual-licensed; you may select
134816 - * either version 2 of the GNU General Public License ("GPL") or BSD license
134817 - * ("BSD").
134818 - */
134820 -/* Note : this module is expected to remain private, do not expose it */
134822 -#ifndef ERROR_H_MODULE
134823 -#define ERROR_H_MODULE
134825 -/* ****************************************
134826 -*  Dependencies
134827 -******************************************/
134828 -#include <linux/types.h> /* size_t */
134829 -#include <linux/zstd.h>  /* enum list */
134831 -/* ****************************************
134832 -*  Compiler-specific
134833 -******************************************/
134834 -#define ERR_STATIC static __attribute__((unused))
134836 -/*-****************************************
134837 -*  Customization (error_public.h)
134838 -******************************************/
134839 -typedef ZSTD_ErrorCode ERR_enum;
134840 -#define PREFIX(name) ZSTD_error_##name
134842 -/*-****************************************
134843 -*  Error codes handling
134844 -******************************************/
134845 -#define ERROR(name) ((size_t)-PREFIX(name))
134847 -ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
134849 -ERR_STATIC ERR_enum ERR_getErrorCode(size_t code)
134851 -       if (!ERR_isError(code))
134852 -               return (ERR_enum)0;
134853 -       return (ERR_enum)(0 - code);
134856 -#endif /* ERROR_H_MODULE */
134857 diff --git a/lib/zstd/fse.h b/lib/zstd/fse.h
134858 deleted file mode 100644
134859 index 7460ab04b191..000000000000
134860 --- a/lib/zstd/fse.h
134861 +++ /dev/null
134862 @@ -1,575 +0,0 @@
134864 - * FSE : Finite State Entropy codec
134865 - * Public Prototypes declaration
134866 - * Copyright (C) 2013-2016, Yann Collet.
134868 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
134870 - * Redistribution and use in source and binary forms, with or without
134871 - * modification, are permitted provided that the following conditions are
134872 - * met:
134874 - *   * Redistributions of source code must retain the above copyright
134875 - * notice, this list of conditions and the following disclaimer.
134876 - *   * Redistributions in binary form must reproduce the above
134877 - * copyright notice, this list of conditions and the following disclaimer
134878 - * in the documentation and/or other materials provided with the
134879 - * distribution.
134881 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
134882 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
134883 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
134884 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
134885 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
134886 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
134887 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
134888 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
134889 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
134890 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
134891 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
134893 - * This program is free software; you can redistribute it and/or modify it under
134894 - * the terms of the GNU General Public License version 2 as published by the
134895 - * Free Software Foundation. This program is dual-licensed; you may select
134896 - * either version 2 of the GNU General Public License ("GPL") or BSD license
134897 - * ("BSD").
134899 - * You can contact the author at :
134900 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
134901 - */
134902 -#ifndef FSE_H
134903 -#define FSE_H
134905 -/*-*****************************************
134906 -*  Dependencies
134907 -******************************************/
134908 -#include <linux/types.h> /* size_t, ptrdiff_t */
134910 -/*-*****************************************
134911 -*  FSE_PUBLIC_API : control library symbols visibility
134912 -******************************************/
134913 -#define FSE_PUBLIC_API
134915 -/*------   Version   ------*/
134916 -#define FSE_VERSION_MAJOR 0
134917 -#define FSE_VERSION_MINOR 9
134918 -#define FSE_VERSION_RELEASE 0
134920 -#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
134921 -#define FSE_QUOTE(str) #str
134922 -#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
134923 -#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
134925 -#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR * 100 * 100 + FSE_VERSION_MINOR * 100 + FSE_VERSION_RELEASE)
134926 -FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */
134928 -/*-*****************************************
134929 -*  Tool functions
134930 -******************************************/
134931 -FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
134933 -/* Error Management */
134934 -FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
134936 -/*-*****************************************
134937 -*  FSE detailed API
134938 -******************************************/
134940 -FSE_compress() does the following:
134941 -1. count symbol occurrence from source[] into table count[]
134942 -2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
134943 -3. save normalized counters to memory buffer using writeNCount()
134944 -4. build encoding table 'CTable' from normalized counters
134945 -5. encode the data stream using encoding table 'CTable'
134947 -FSE_decompress() does the following:
134948 -1. read normalized counters with readNCount()
134949 -2. build decoding table 'DTable' from normalized counters
134950 -3. decode the data stream using decoding table 'DTable'
134952 -The following API allows targeting specific sub-functions for advanced tasks.
134953 -For example, it's possible to compress several blocks using the same 'CTable',
134954 -or to save and provide normalized distribution using external method.
134957 -/* *** COMPRESSION *** */
134958 -/*! FSE_optimalTableLog():
134959 -       dynamically downsize 'tableLog' when conditions are met.
134960 -       It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
134961 -       @return : recommended tableLog (necessarily <= 'maxTableLog') */
134962 -FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
134964 -/*! FSE_normalizeCount():
134965 -       normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
134966 -       'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
134967 -       @return : tableLog,
134968 -                         or an errorCode, which can be tested using FSE_isError() */
134969 -FSE_PUBLIC_API size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t srcSize, unsigned maxSymbolValue);
134971 -/*! FSE_NCountWriteBound():
134972 -       Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
134973 -       Typically useful for allocation purpose. */
134974 -FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
134976 -/*! FSE_writeNCount():
134977 -       Compactly save 'normalizedCounter' into 'buffer'.
134978 -       @return : size of the compressed table,
134979 -                         or an errorCode, which can be tested using FSE_isError(). */
134980 -FSE_PUBLIC_API size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
134982 -/*! Constructor and Destructor of FSE_CTable.
134983 -       Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
134984 -typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
134986 -/*! FSE_compress_usingCTable():
134987 -       Compress `src` using `ct` into `dst` which must be already allocated.
134988 -       @return : size of compressed data (<= `dstCapacity`),
134989 -                         or 0 if compressed data could not fit into `dst`,
134990 -                         or an errorCode, which can be tested using FSE_isError() */
134991 -FSE_PUBLIC_API size_t FSE_compress_usingCTable(void *dst, size_t dstCapacity, const void *src, size_t srcSize, const FSE_CTable *ct);
134994 -Tutorial :
134995 -----------
134996 -The first step is to count all symbols. FSE_count() does this job very fast.
134997 -Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
134998 -'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
134999 -maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
135000 -FSE_count() will return the number of occurrence of the most frequent symbol.
135001 -This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
135002 -If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
135004 -The next step is to normalize the frequencies.
135005 -FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
135006 -It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
135007 -You can use 'tableLog'==0 to mean "use default tableLog value".
135008 -If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
135009 -which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
135011 -The result of FSE_normalizeCount() will be saved into a table,
135012 -called 'normalizedCounter', which is a table of signed short.
135013 -'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
135014 -The return value is tableLog if everything proceeded as expected.
135015 -It is 0 if there is a single symbol within distribution.
135016 -If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
135018 -'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
135019 -'buffer' must be already allocated.
135020 -For guaranteed success, buffer size must be at least FSE_headerBound().
135021 -The result of the function is the number of bytes written into 'buffer'.
135022 -If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
135024 -'normalizedCounter' can then be used to create the compression table 'CTable'.
135025 -The space required by 'CTable' must be already allocated, using FSE_createCTable().
135026 -You can then use FSE_buildCTable() to fill 'CTable'.
135027 -If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
135029 -'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
135030 -Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
135031 -The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
135032 -If it returns '0', compressed data could not fit into 'dst'.
135033 -If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
135036 -/* *** DECOMPRESSION *** */
135038 -/*! FSE_readNCount():
135039 -       Read compactly saved 'normalizedCounter' from 'rBuffer'.
135040 -       @return : size read from 'rBuffer',
135041 -                         or an errorCode, which can be tested using FSE_isError().
135042 -                         maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
135043 -FSE_PUBLIC_API size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSymbolValuePtr, unsigned *tableLogPtr, const void *rBuffer, size_t rBuffSize);
135045 -/*! Constructor and Destructor of FSE_DTable.
135046 -       Note that its size depends on 'tableLog' */
135047 -typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
135049 -/*! FSE_buildDTable():
135050 -       Builds 'dt', which must be already allocated, using FSE_createDTable().
135051 -       return : 0, or an errorCode, which can be tested using FSE_isError() */
135052 -FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize);
135054 -/*! FSE_decompress_usingDTable():
135055 -       Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
135056 -       into `dst` which must be already allocated.
135057 -       @return : size of regenerated data (necessarily <= `dstCapacity`),
135058 -                         or an errorCode, which can be tested using FSE_isError() */
135059 -FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt);
135062 -Tutorial :
135063 -----------
135064 -(Note : these functions only decompress FSE-compressed blocks.
135065 - If block is uncompressed, use memcpy() instead
135066 - If block is a single repeated byte, use memset() instead )
135068 -The first step is to obtain the normalized frequencies of symbols.
135069 -This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
135070 -'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
135071 -In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
135072 -or size the table to handle worst case situations (typically 256).
135073 -FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
135074 -The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
135075 -Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
135076 -If there is an error, the function will return an error code, which can be tested using FSE_isError().
135078 -The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
135079 -This is performed by the function FSE_buildDTable().
135080 -The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
135081 -If there is an error, the function will return an error code, which can be tested using FSE_isError().
135083 -`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
135084 -`cSrcSize` must be strictly correct, otherwise decompression will fail.
135085 -FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
135086 -If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
135089 -/* *** Dependency *** */
135090 -#include "bitstream.h"
135092 -/* *****************************************
135093 -*  Static allocation
135094 -*******************************************/
135095 -/* FSE buffer bounds */
135096 -#define FSE_NCOUNTBOUND 512
135097 -#define FSE_BLOCKBOUND(size) (size + (size >> 7))
135098 -#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
135100 -/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
135101 -#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1 << (maxTableLog - 1)) + ((maxSymbolValue + 1) * 2))
135102 -#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1 << maxTableLog))
135104 -/* *****************************************
135105 -*  FSE advanced API
135106 -*******************************************/
135107 -/* FSE_count_wksp() :
135108 - * Same as FSE_count(), but using an externally provided scratch buffer.
135109 - * `workSpace` size must be table of >= `1024` unsigned
135110 - */
135111 -size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace);
135113 -/* FSE_countFast_wksp() :
135114 - * Same as FSE_countFast(), but using an externally provided scratch buffer.
135115 - * `workSpace` must be a table of minimum `1024` unsigned
135116 - */
135117 -size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize, unsigned *workSpace);
135119 -/*! FSE_count_simple
135120 - * Same as FSE_countFast(), but does not use any additional memory (not even on stack).
135121 - * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`).
135123 -size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize);
135125 -unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
135126 -/**< same as FSE_optimalTableLog(), which used `minus==2` */
135128 -size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits);
135129 -/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
135131 -size_t FSE_buildCTable_rle(FSE_CTable *ct, unsigned char symbolValue);
135132 -/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
135134 -/* FSE_buildCTable_wksp() :
135135 - * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
135136 - * `wkspSize` must be >= `(1<<tableLog)`.
135137 - */
135138 -size_t FSE_buildCTable_wksp(FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, size_t wkspSize);
135140 -size_t FSE_buildDTable_raw(FSE_DTable *dt, unsigned nbBits);
135141 -/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
135143 -size_t FSE_buildDTable_rle(FSE_DTable *dt, unsigned char symbolValue);
135144 -/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
135146 -size_t FSE_decompress_wksp(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workspace, size_t workspaceSize);
135147 -/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
135149 -/* *****************************************
135150 -*  FSE symbol compression API
135151 -*******************************************/
135153 -   This API consists of small unitary functions, which highly benefit from being inlined.
135154 -   Hence their body are included in next section.
135156 -typedef struct {
135157 -       ptrdiff_t value;
135158 -       const void *stateTable;
135159 -       const void *symbolTT;
135160 -       unsigned stateLog;
135161 -} FSE_CState_t;
135163 -static void FSE_initCState(FSE_CState_t *CStatePtr, const FSE_CTable *ct);
135165 -static void FSE_encodeSymbol(BIT_CStream_t *bitC, FSE_CState_t *CStatePtr, unsigned symbol);
135167 -static void FSE_flushCState(BIT_CStream_t *bitC, const FSE_CState_t *CStatePtr);
135169 -/**<
135170 -These functions are inner components of FSE_compress_usingCTable().
135171 -They allow the creation of custom streams, mixing multiple tables and bit sources.
135173 -A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
135174 -So the first symbol you will encode is the last you will decode, like a LIFO stack.
135176 -You will need a few variables to track your CStream. They are :
135178 -FSE_CTable    ct;         // Provided by FSE_buildCTable()
135179 -BIT_CStream_t bitStream;  // bitStream tracking structure
135180 -FSE_CState_t  state;      // State tracking structure (can have several)
135183 -The first thing to do is to init bitStream and state.
135184 -       size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
135185 -       FSE_initCState(&state, ct);
135187 -Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
135188 -You can then encode your input data, byte after byte.
135189 -FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
135190 -Remember decoding will be done in reverse direction.
135191 -       FSE_encodeByte(&bitStream, &state, symbol);
135193 -At any time, you can also add any bit sequence.
135194 -Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
135195 -       BIT_addBits(&bitStream, bitField, nbBits);
135197 -The above methods don't commit data to memory, they just store it into local register, for speed.
135198 -Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
135199 -Writing data to memory is a manual operation, performed by the flushBits function.
135200 -       BIT_flushBits(&bitStream);
135202 -Your last FSE encoding operation shall be to flush your last state value(s).
135203 -       FSE_flushState(&bitStream, &state);
135205 -Finally, you must close the bitStream.
135206 -The function returns the size of CStream in bytes.
135207 -If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
135208 -If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
135209 -       size_t size = BIT_closeCStream(&bitStream);
135212 -/* *****************************************
135213 -*  FSE symbol decompression API
135214 -*******************************************/
135215 -typedef struct {
135216 -       size_t state;
135217 -       const void *table; /* precise table may vary, depending on U16 */
135218 -} FSE_DState_t;
135220 -static void FSE_initDState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt);
135222 -static unsigned char FSE_decodeSymbol(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD);
135224 -static unsigned FSE_endOfDState(const FSE_DState_t *DStatePtr);
135226 -/**<
135227 -Let's now decompose FSE_decompress_usingDTable() into its unitary components.
135228 -You will decode FSE-encoded symbols from the bitStream,
135229 -and also any other bitFields you put in, **in reverse order**.
135231 -You will need a few variables to track your bitStream. They are :
135233 -BIT_DStream_t DStream;    // Stream context
135234 -FSE_DState_t  DState;     // State context. Multiple ones are possible
135235 -FSE_DTable*   DTablePtr;  // Decoding table, provided by FSE_buildDTable()
135237 -The first thing to do is to init the bitStream.
135238 -       errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
135240 -You should then retrieve your initial state(s)
135241 -(in reverse flushing order if you have several ones) :
135242 -       errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
135244 -You can then decode your data, symbol after symbol.
135245 -For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
135246 -Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
135247 -       unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
135249 -You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
135250 -Note : maximum allowed nbBits is 25, for 32-bits compatibility
135251 -       size_t bitField = BIT_readBits(&DStream, nbBits);
135253 -All above operations only read from local register (which size depends on size_t).
135254 -Refueling the register from memory is manually performed by the reload method.
135255 -       endSignal = FSE_reloadDStream(&DStream);
135257 -BIT_reloadDStream() result tells if there is still some more data to read from DStream.
135258 -BIT_DStream_unfinished : there is still some data left into the DStream.
135259 -BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
135260 -BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
135261 -BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
135263 -When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
135264 -to properly detect the exact end of stream.
135265 -After each decoded symbol, check if DStream is fully consumed using this simple test :
135266 -       BIT_reloadDStream(&DStream) >= BIT_DStream_completed
135268 -When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
135269 -Checking if DStream has reached its end is performed by :
135270 -       BIT_endOfDStream(&DStream);
135271 -Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
135272 -       FSE_endOfDState(&DState);
135275 -/* *****************************************
135276 -*  FSE unsafe API
135277 -*******************************************/
135278 -static unsigned char FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD);
135279 -/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
135281 -/* *****************************************
135282 -*  Implementation of inlined functions
135283 -*******************************************/
135284 -typedef struct {
135285 -       int deltaFindState;
135286 -       U32 deltaNbBits;
135287 -} FSE_symbolCompressionTransform; /* total 8 bytes */
135289 -ZSTD_STATIC void FSE_initCState(FSE_CState_t *statePtr, const FSE_CTable *ct)
135291 -       const void *ptr = ct;
135292 -       const U16 *u16ptr = (const U16 *)ptr;
135293 -       const U32 tableLog = ZSTD_read16(ptr);
135294 -       statePtr->value = (ptrdiff_t)1 << tableLog;
135295 -       statePtr->stateTable = u16ptr + 2;
135296 -       statePtr->symbolTT = ((const U32 *)ct + 1 + (tableLog ? (1 << (tableLog - 1)) : 1));
135297 -       statePtr->stateLog = tableLog;
135300 -/*! FSE_initCState2() :
135301 -*   Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
135302 -*   uses the smallest state value possible, saving the cost of this symbol */
135303 -ZSTD_STATIC void FSE_initCState2(FSE_CState_t *statePtr, const FSE_CTable *ct, U32 symbol)
135305 -       FSE_initCState(statePtr, ct);
135306 -       {
135307 -               const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol];
135308 -               const U16 *stateTable = (const U16 *)(statePtr->stateTable);
135309 -               U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1 << 15)) >> 16);
135310 -               statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
135311 -               statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
135312 -       }
135315 -ZSTD_STATIC void FSE_encodeSymbol(BIT_CStream_t *bitC, FSE_CState_t *statePtr, U32 symbol)
135317 -       const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol];
135318 -       const U16 *const stateTable = (const U16 *)(statePtr->stateTable);
135319 -       U32 nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
135320 -       BIT_addBits(bitC, statePtr->value, nbBitsOut);
135321 -       statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
135324 -ZSTD_STATIC void FSE_flushCState(BIT_CStream_t *bitC, const FSE_CState_t *statePtr)
135326 -       BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
135327 -       BIT_flushBits(bitC);
135330 -/* ======    Decompression    ====== */
135332 -typedef struct {
135333 -       U16 tableLog;
135334 -       U16 fastMode;
135335 -} FSE_DTableHeader; /* sizeof U32 */
135337 -typedef struct {
135338 -       unsigned short newState;
135339 -       unsigned char symbol;
135340 -       unsigned char nbBits;
135341 -} FSE_decode_t; /* size == U32 */
135343 -ZSTD_STATIC void FSE_initDState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt)
135345 -       const void *ptr = dt;
135346 -       const FSE_DTableHeader *const DTableH = (const FSE_DTableHeader *)ptr;
135347 -       DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
135348 -       BIT_reloadDStream(bitD);
135349 -       DStatePtr->table = dt + 1;
135352 -ZSTD_STATIC BYTE FSE_peekSymbol(const FSE_DState_t *DStatePtr)
135354 -       FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
135355 -       return DInfo.symbol;
135358 -ZSTD_STATIC void FSE_updateState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
135360 -       FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
135361 -       U32 const nbBits = DInfo.nbBits;
135362 -       size_t const lowBits = BIT_readBits(bitD, nbBits);
135363 -       DStatePtr->state = DInfo.newState + lowBits;
135366 -ZSTD_STATIC BYTE FSE_decodeSymbol(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
135368 -       FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
135369 -       U32 const nbBits = DInfo.nbBits;
135370 -       BYTE const symbol = DInfo.symbol;
135371 -       size_t const lowBits = BIT_readBits(bitD, nbBits);
135373 -       DStatePtr->state = DInfo.newState + lowBits;
135374 -       return symbol;
135377 -/*! FSE_decodeSymbolFast() :
135378 -       unsafe, only works if no symbol has a probability > 50% */
135379 -ZSTD_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
135381 -       FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
135382 -       U32 const nbBits = DInfo.nbBits;
135383 -       BYTE const symbol = DInfo.symbol;
135384 -       size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
135386 -       DStatePtr->state = DInfo.newState + lowBits;
135387 -       return symbol;
135390 -ZSTD_STATIC unsigned FSE_endOfDState(const FSE_DState_t *DStatePtr) { return DStatePtr->state == 0; }
135392 -/* **************************************************************
135393 -*  Tuning parameters
135394 -****************************************************************/
135395 -/*!MEMORY_USAGE :
135396 -*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
135397 -*  Increasing memory usage improves compression ratio
135398 -*  Reduced memory usage can improve speed, due to cache effect
135399 -*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
135400 -#ifndef FSE_MAX_MEMORY_USAGE
135401 -#define FSE_MAX_MEMORY_USAGE 14
135402 -#endif
135403 -#ifndef FSE_DEFAULT_MEMORY_USAGE
135404 -#define FSE_DEFAULT_MEMORY_USAGE 13
135405 -#endif
135407 -/*!FSE_MAX_SYMBOL_VALUE :
135408 -*  Maximum symbol value authorized.
135409 -*  Required for proper stack allocation */
135410 -#ifndef FSE_MAX_SYMBOL_VALUE
135411 -#define FSE_MAX_SYMBOL_VALUE 255
135412 -#endif
135414 -/* **************************************************************
135415 -*  template functions type & suffix
135416 -****************************************************************/
135417 -#define FSE_FUNCTION_TYPE BYTE
135418 -#define FSE_FUNCTION_EXTENSION
135419 -#define FSE_DECODE_TYPE FSE_decode_t
135421 -/* ***************************************************************
135422 -*  Constants
135423 -*****************************************************************/
135424 -#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE - 2)
135425 -#define FSE_MAX_TABLESIZE (1U << FSE_MAX_TABLELOG)
135426 -#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE - 1)
135427 -#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE - 2)
135428 -#define FSE_MIN_TABLELOG 5
135430 -#define FSE_TABLELOG_ABSOLUTE_MAX 15
135431 -#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
135432 -#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
135433 -#endif
135435 -#define FSE_TABLESTEP(tableSize) ((tableSize >> 1) + (tableSize >> 3) + 3)
135437 -#endif /* FSE_H */
135438 diff --git a/lib/zstd/fse_compress.c b/lib/zstd/fse_compress.c
135439 deleted file mode 100644
135440 index ef3d1741d532..000000000000
135441 --- a/lib/zstd/fse_compress.c
135442 +++ /dev/null
135443 @@ -1,795 +0,0 @@
135445 - * FSE : Finite State Entropy encoder
135446 - * Copyright (C) 2013-2015, Yann Collet.
135448 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
135450 - * Redistribution and use in source and binary forms, with or without
135451 - * modification, are permitted provided that the following conditions are
135452 - * met:
135454 - *   * Redistributions of source code must retain the above copyright
135455 - * notice, this list of conditions and the following disclaimer.
135456 - *   * Redistributions in binary form must reproduce the above
135457 - * copyright notice, this list of conditions and the following disclaimer
135458 - * in the documentation and/or other materials provided with the
135459 - * distribution.
135461 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
135462 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
135463 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
135464 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
135465 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
135466 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
135467 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
135468 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
135469 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
135470 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
135471 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
135473 - * This program is free software; you can redistribute it and/or modify it under
135474 - * the terms of the GNU General Public License version 2 as published by the
135475 - * Free Software Foundation. This program is dual-licensed; you may select
135476 - * either version 2 of the GNU General Public License ("GPL") or BSD license
135477 - * ("BSD").
135479 - * You can contact the author at :
135480 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
135481 - */
135483 -/* **************************************************************
135484 -*  Compiler specifics
135485 -****************************************************************/
135486 -#define FORCE_INLINE static __always_inline
135488 -/* **************************************************************
135489 -*  Includes
135490 -****************************************************************/
135491 -#include "bitstream.h"
135492 -#include "fse.h"
135493 -#include <linux/compiler.h>
135494 -#include <linux/kernel.h>
135495 -#include <linux/math64.h>
135496 -#include <linux/string.h> /* memcpy, memset */
135498 -/* **************************************************************
135499 -*  Error Management
135500 -****************************************************************/
135501 -#define FSE_STATIC_ASSERT(c)                                   \
135502 -       {                                                      \
135503 -               enum { FSE_static_assert = 1 / (int)(!!(c)) }; \
135504 -       } /* use only *after* variable declarations */
135506 -/* **************************************************************
135507 -*  Templates
135508 -****************************************************************/
135510 -  designed to be included
135511 -  for type-specific functions (template emulation in C)
135512 -  Objective is to write these functions only once, for improved maintenance
135515 -/* safety checks */
135516 -#ifndef FSE_FUNCTION_EXTENSION
135517 -#error "FSE_FUNCTION_EXTENSION must be defined"
135518 -#endif
135519 -#ifndef FSE_FUNCTION_TYPE
135520 -#error "FSE_FUNCTION_TYPE must be defined"
135521 -#endif
135523 -/* Function names */
135524 -#define FSE_CAT(X, Y) X##Y
135525 -#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y)
135526 -#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y)
135528 -/* Function templates */
135530 -/* FSE_buildCTable_wksp() :
135531 - * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
135532 - * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
135533 - * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
135534 - */
135535 -size_t FSE_buildCTable_wksp(FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize)
135537 -       U32 const tableSize = 1 << tableLog;
135538 -       U32 const tableMask = tableSize - 1;
135539 -       void *const ptr = ct;
135540 -       U16 *const tableU16 = ((U16 *)ptr) + 2;
135541 -       void *const FSCT = ((U32 *)ptr) + 1 /* header */ + (tableLog ? tableSize >> 1 : 1);
135542 -       FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT);
135543 -       U32 const step = FSE_TABLESTEP(tableSize);
135544 -       U32 highThreshold = tableSize - 1;
135546 -       U32 *cumul;
135547 -       FSE_FUNCTION_TYPE *tableSymbol;
135548 -       size_t spaceUsed32 = 0;
135550 -       cumul = (U32 *)workspace + spaceUsed32;
135551 -       spaceUsed32 += FSE_MAX_SYMBOL_VALUE + 2;
135552 -       tableSymbol = (FSE_FUNCTION_TYPE *)((U32 *)workspace + spaceUsed32);
135553 -       spaceUsed32 += ALIGN(sizeof(FSE_FUNCTION_TYPE) * ((size_t)1 << tableLog), sizeof(U32)) >> 2;
135555 -       if ((spaceUsed32 << 2) > workspaceSize)
135556 -               return ERROR(tableLog_tooLarge);
135557 -       workspace = (U32 *)workspace + spaceUsed32;
135558 -       workspaceSize -= (spaceUsed32 << 2);
135560 -       /* CTable header */
135561 -       tableU16[-2] = (U16)tableLog;
135562 -       tableU16[-1] = (U16)maxSymbolValue;
135564 -       /* For explanations on how to distribute symbol values over the table :
135565 -       *  http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
135567 -       /* symbol start positions */
135568 -       {
135569 -               U32 u;
135570 -               cumul[0] = 0;
135571 -               for (u = 1; u <= maxSymbolValue + 1; u++) {
135572 -                       if (normalizedCounter[u - 1] == -1) { /* Low proba symbol */
135573 -                               cumul[u] = cumul[u - 1] + 1;
135574 -                               tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u - 1);
135575 -                       } else {
135576 -                               cumul[u] = cumul[u - 1] + normalizedCounter[u - 1];
135577 -                       }
135578 -               }
135579 -               cumul[maxSymbolValue + 1] = tableSize + 1;
135580 -       }
135582 -       /* Spread symbols */
135583 -       {
135584 -               U32 position = 0;
135585 -               U32 symbol;
135586 -               for (symbol = 0; symbol <= maxSymbolValue; symbol++) {
135587 -                       int nbOccurences;
135588 -                       for (nbOccurences = 0; nbOccurences < normalizedCounter[symbol]; nbOccurences++) {
135589 -                               tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
135590 -                               position = (position + step) & tableMask;
135591 -                               while (position > highThreshold)
135592 -                                       position = (position + step) & tableMask; /* Low proba area */
135593 -                       }
135594 -               }
135596 -               if (position != 0)
135597 -                       return ERROR(GENERIC); /* Must have gone through all positions */
135598 -       }
135600 -       /* Build table */
135601 -       {
135602 -               U32 u;
135603 -               for (u = 0; u < tableSize; u++) {
135604 -                       FSE_FUNCTION_TYPE s = tableSymbol[u];   /* note : static analyzer may not understand tableSymbol is properly initialized */
135605 -                       tableU16[cumul[s]++] = (U16)(tableSize + u); /* TableU16 : sorted by symbol order; gives next state value */
135606 -               }
135607 -       }
135609 -       /* Build Symbol Transformation Table */
135610 -       {
135611 -               unsigned total = 0;
135612 -               unsigned s;
135613 -               for (s = 0; s <= maxSymbolValue; s++) {
135614 -                       switch (normalizedCounter[s]) {
135615 -                       case 0: break;
135617 -                       case -1:
135618 -                       case 1:
135619 -                               symbolTT[s].deltaNbBits = (tableLog << 16) - (1 << tableLog);
135620 -                               symbolTT[s].deltaFindState = total - 1;
135621 -                               total++;
135622 -                               break;
135623 -                       default: {
135624 -                               U32 const maxBitsOut = tableLog - BIT_highbit32(normalizedCounter[s] - 1);
135625 -                               U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
135626 -                               symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
135627 -                               symbolTT[s].deltaFindState = total - normalizedCounter[s];
135628 -                               total += normalizedCounter[s];
135629 -                       }
135630 -                       }
135631 -               }
135632 -       }
135634 -       return 0;
135637 -/*-**************************************************************
135638 -*  FSE NCount encoding-decoding
135639 -****************************************************************/
135640 -size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
135642 -       size_t const maxHeaderSize = (((maxSymbolValue + 1) * tableLog) >> 3) + 3;
135643 -       return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
135646 -static size_t FSE_writeNCount_generic(void *header, size_t headerBufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
135647 -                                     unsigned writeIsSafe)
135649 -       BYTE *const ostart = (BYTE *)header;
135650 -       BYTE *out = ostart;
135651 -       BYTE *const oend = ostart + headerBufferSize;
135652 -       int nbBits;
135653 -       const int tableSize = 1 << tableLog;
135654 -       int remaining;
135655 -       int threshold;
135656 -       U32 bitStream;
135657 -       int bitCount;
135658 -       unsigned charnum = 0;
135659 -       int previous0 = 0;
135661 -       bitStream = 0;
135662 -       bitCount = 0;
135663 -       /* Table Size */
135664 -       bitStream += (tableLog - FSE_MIN_TABLELOG) << bitCount;
135665 -       bitCount += 4;
135667 -       /* Init */
135668 -       remaining = tableSize + 1; /* +1 for extra accuracy */
135669 -       threshold = tableSize;
135670 -       nbBits = tableLog + 1;
135672 -       while (remaining > 1) { /* stops at 1 */
135673 -               if (previous0) {
135674 -                       unsigned start = charnum;
135675 -                       while (!normalizedCounter[charnum])
135676 -                               charnum++;
135677 -                       while (charnum >= start + 24) {
135678 -                               start += 24;
135679 -                               bitStream += 0xFFFFU << bitCount;
135680 -                               if ((!writeIsSafe) && (out > oend - 2))
135681 -                                       return ERROR(dstSize_tooSmall); /* Buffer overflow */
135682 -                               out[0] = (BYTE)bitStream;
135683 -                               out[1] = (BYTE)(bitStream >> 8);
135684 -                               out += 2;
135685 -                               bitStream >>= 16;
135686 -                       }
135687 -                       while (charnum >= start + 3) {
135688 -                               start += 3;
135689 -                               bitStream += 3 << bitCount;
135690 -                               bitCount += 2;
135691 -                       }
135692 -                       bitStream += (charnum - start) << bitCount;
135693 -                       bitCount += 2;
135694 -                       if (bitCount > 16) {
135695 -                               if ((!writeIsSafe) && (out > oend - 2))
135696 -                                       return ERROR(dstSize_tooSmall); /* Buffer overflow */
135697 -                               out[0] = (BYTE)bitStream;
135698 -                               out[1] = (BYTE)(bitStream >> 8);
135699 -                               out += 2;
135700 -                               bitStream >>= 16;
135701 -                               bitCount -= 16;
135702 -                       }
135703 -               }
135704 -               {
135705 -                       int count = normalizedCounter[charnum++];
135706 -                       int const max = (2 * threshold - 1) - remaining;
135707 -                       remaining -= count < 0 ? -count : count;
135708 -                       count++; /* +1 for extra accuracy */
135709 -                       if (count >= threshold)
135710 -                               count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
135711 -                       bitStream += count << bitCount;
135712 -                       bitCount += nbBits;
135713 -                       bitCount -= (count < max);
135714 -                       previous0 = (count == 1);
135715 -                       if (remaining < 1)
135716 -                               return ERROR(GENERIC);
135717 -                       while (remaining < threshold)
135718 -                               nbBits--, threshold >>= 1;
135719 -               }
135720 -               if (bitCount > 16) {
135721 -                       if ((!writeIsSafe) && (out > oend - 2))
135722 -                               return ERROR(dstSize_tooSmall); /* Buffer overflow */
135723 -                       out[0] = (BYTE)bitStream;
135724 -                       out[1] = (BYTE)(bitStream >> 8);
135725 -                       out += 2;
135726 -                       bitStream >>= 16;
135727 -                       bitCount -= 16;
135728 -               }
135729 -       }
135731 -       /* flush remaining bitStream */
135732 -       if ((!writeIsSafe) && (out > oend - 2))
135733 -               return ERROR(dstSize_tooSmall); /* Buffer overflow */
135734 -       out[0] = (BYTE)bitStream;
135735 -       out[1] = (BYTE)(bitStream >> 8);
135736 -       out += (bitCount + 7) / 8;
135738 -       if (charnum > maxSymbolValue + 1)
135739 -               return ERROR(GENERIC);
135741 -       return (out - ostart);
135744 -size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
135746 -       if (tableLog > FSE_MAX_TABLELOG)
135747 -               return ERROR(tableLog_tooLarge); /* Unsupported */
135748 -       if (tableLog < FSE_MIN_TABLELOG)
135749 -               return ERROR(GENERIC); /* Unsupported */
135751 -       if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
135752 -               return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
135754 -       return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1);
135757 -/*-**************************************************************
135758 -*  Counting histogram
135759 -****************************************************************/
135760 -/*! FSE_count_simple
135761 -       This function counts byte values within `src`, and store the histogram into table `count`.
135762 -       It doesn't use any additional memory.
135763 -       But this function is unsafe : it doesn't check that all values within `src` can fit into `count`.
135764 -       For this reason, prefer using a table `count` with 256 elements.
135765 -       @return : count of most numerous element
135767 -size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize)
135769 -       const BYTE *ip = (const BYTE *)src;
135770 -       const BYTE *const end = ip + srcSize;
135771 -       unsigned maxSymbolValue = *maxSymbolValuePtr;
135772 -       unsigned max = 0;
135774 -       memset(count, 0, (maxSymbolValue + 1) * sizeof(*count));
135775 -       if (srcSize == 0) {
135776 -               *maxSymbolValuePtr = 0;
135777 -               return 0;
135778 -       }
135780 -       while (ip < end)
135781 -               count[*ip++]++;
135783 -       while (!count[maxSymbolValue])
135784 -               maxSymbolValue--;
135785 -       *maxSymbolValuePtr = maxSymbolValue;
135787 -       {
135788 -               U32 s;
135789 -               for (s = 0; s <= maxSymbolValue; s++)
135790 -                       if (count[s] > max)
135791 -                               max = count[s];
135792 -       }
135794 -       return (size_t)max;
135797 -/* FSE_count_parallel_wksp() :
135798 - * Same as FSE_count_parallel(), but using an externally provided scratch buffer.
135799 - * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */
135800 -static size_t FSE_count_parallel_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned checkMax,
135801 -                                     unsigned *const workSpace)
135803 -       const BYTE *ip = (const BYTE *)source;
135804 -       const BYTE *const iend = ip + sourceSize;
135805 -       unsigned maxSymbolValue = *maxSymbolValuePtr;
135806 -       unsigned max = 0;
135807 -       U32 *const Counting1 = workSpace;
135808 -       U32 *const Counting2 = Counting1 + 256;
135809 -       U32 *const Counting3 = Counting2 + 256;
135810 -       U32 *const Counting4 = Counting3 + 256;
135812 -       memset(Counting1, 0, 4 * 256 * sizeof(unsigned));
135814 -       /* safety checks */
135815 -       if (!sourceSize) {
135816 -               memset(count, 0, maxSymbolValue + 1);
135817 -               *maxSymbolValuePtr = 0;
135818 -               return 0;
135819 -       }
135820 -       if (!maxSymbolValue)
135821 -               maxSymbolValue = 255; /* 0 == default */
135823 -       /* by stripes of 16 bytes */
135824 -       {
135825 -               U32 cached = ZSTD_read32(ip);
135826 -               ip += 4;
135827 -               while (ip < iend - 15) {
135828 -                       U32 c = cached;
135829 -                       cached = ZSTD_read32(ip);
135830 -                       ip += 4;
135831 -                       Counting1[(BYTE)c]++;
135832 -                       Counting2[(BYTE)(c >> 8)]++;
135833 -                       Counting3[(BYTE)(c >> 16)]++;
135834 -                       Counting4[c >> 24]++;
135835 -                       c = cached;
135836 -                       cached = ZSTD_read32(ip);
135837 -                       ip += 4;
135838 -                       Counting1[(BYTE)c]++;
135839 -                       Counting2[(BYTE)(c >> 8)]++;
135840 -                       Counting3[(BYTE)(c >> 16)]++;
135841 -                       Counting4[c >> 24]++;
135842 -                       c = cached;
135843 -                       cached = ZSTD_read32(ip);
135844 -                       ip += 4;
135845 -                       Counting1[(BYTE)c]++;
135846 -                       Counting2[(BYTE)(c >> 8)]++;
135847 -                       Counting3[(BYTE)(c >> 16)]++;
135848 -                       Counting4[c >> 24]++;
135849 -                       c = cached;
135850 -                       cached = ZSTD_read32(ip);
135851 -                       ip += 4;
135852 -                       Counting1[(BYTE)c]++;
135853 -                       Counting2[(BYTE)(c >> 8)]++;
135854 -                       Counting3[(BYTE)(c >> 16)]++;
135855 -                       Counting4[c >> 24]++;
135856 -               }
135857 -               ip -= 4;
135858 -       }
135860 -       /* finish last symbols */
135861 -       while (ip < iend)
135862 -               Counting1[*ip++]++;
135864 -       if (checkMax) { /* verify stats will fit into destination table */
135865 -               U32 s;
135866 -               for (s = 255; s > maxSymbolValue; s--) {
135867 -                       Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
135868 -                       if (Counting1[s])
135869 -                               return ERROR(maxSymbolValue_tooSmall);
135870 -               }
135871 -       }
135873 -       {
135874 -               U32 s;
135875 -               for (s = 0; s <= maxSymbolValue; s++) {
135876 -                       count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
135877 -                       if (count[s] > max)
135878 -                               max = count[s];
135879 -               }
135880 -       }
135882 -       while (!count[maxSymbolValue])
135883 -               maxSymbolValue--;
135884 -       *maxSymbolValuePtr = maxSymbolValue;
135885 -       return (size_t)max;
135888 -/* FSE_countFast_wksp() :
135889 - * Same as FSE_countFast(), but using an externally provided scratch buffer.
135890 - * `workSpace` size must be table of >= `1024` unsigned */
135891 -size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace)
135893 -       if (sourceSize < 1500)
135894 -               return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
135895 -       return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
135898 -/* FSE_count_wksp() :
135899 - * Same as FSE_count(), but using an externally provided scratch buffer.
135900 - * `workSpace` size must be table of >= `1024` unsigned */
135901 -size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace)
135903 -       if (*maxSymbolValuePtr < 255)
135904 -               return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace);
135905 -       *maxSymbolValuePtr = 255;
135906 -       return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace);
135909 -/*-**************************************************************
135910 -*  FSE Compression Code
135911 -****************************************************************/
135912 -/*! FSE_sizeof_CTable() :
135913 -       FSE_CTable is a variable size structure which contains :
135914 -       `U16 tableLog;`
135915 -       `U16 maxSymbolValue;`
135916 -       `U16 nextStateNumber[1 << tableLog];`                         // This size is variable
135917 -       `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];`  // This size is variable
135918 -Allocation is manual (C standard does not support variable-size structures).
135920 -size_t FSE_sizeof_CTable(unsigned maxSymbolValue, unsigned tableLog)
135922 -       if (tableLog > FSE_MAX_TABLELOG)
135923 -               return ERROR(tableLog_tooLarge);
135924 -       return FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue) * sizeof(U32);
135927 -/* provides the minimum logSize to safely represent a distribution */
135928 -static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
135930 -       U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1;
135931 -       U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
135932 -       U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
135933 -       return minBits;
135936 -unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
135938 -       U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
135939 -       U32 tableLog = maxTableLog;
135940 -       U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
135941 -       if (tableLog == 0)
135942 -               tableLog = FSE_DEFAULT_TABLELOG;
135943 -       if (maxBitsSrc < tableLog)
135944 -               tableLog = maxBitsSrc; /* Accuracy can be reduced */
135945 -       if (minBits > tableLog)
135946 -               tableLog = minBits; /* Need a minimum to safely represent all symbol values */
135947 -       if (tableLog < FSE_MIN_TABLELOG)
135948 -               tableLog = FSE_MIN_TABLELOG;
135949 -       if (tableLog > FSE_MAX_TABLELOG)
135950 -               tableLog = FSE_MAX_TABLELOG;
135951 -       return tableLog;
135954 -unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
135956 -       return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
135959 -/* Secondary normalization method.
135960 -   To be used when primary method fails. */
135962 -static size_t FSE_normalizeM2(short *norm, U32 tableLog, const unsigned *count, size_t total, U32 maxSymbolValue)
135964 -       short const NOT_YET_ASSIGNED = -2;
135965 -       U32 s;
135966 -       U32 distributed = 0;
135967 -       U32 ToDistribute;
135969 -       /* Init */
135970 -       U32 const lowThreshold = (U32)(total >> tableLog);
135971 -       U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
135973 -       for (s = 0; s <= maxSymbolValue; s++) {
135974 -               if (count[s] == 0) {
135975 -                       norm[s] = 0;
135976 -                       continue;
135977 -               }
135978 -               if (count[s] <= lowThreshold) {
135979 -                       norm[s] = -1;
135980 -                       distributed++;
135981 -                       total -= count[s];
135982 -                       continue;
135983 -               }
135984 -               if (count[s] <= lowOne) {
135985 -                       norm[s] = 1;
135986 -                       distributed++;
135987 -                       total -= count[s];
135988 -                       continue;
135989 -               }
135991 -               norm[s] = NOT_YET_ASSIGNED;
135992 -       }
135993 -       ToDistribute = (1 << tableLog) - distributed;
135995 -       if ((total / ToDistribute) > lowOne) {
135996 -               /* risk of rounding to zero */
135997 -               lowOne = (U32)((total * 3) / (ToDistribute * 2));
135998 -               for (s = 0; s <= maxSymbolValue; s++) {
135999 -                       if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
136000 -                               norm[s] = 1;
136001 -                               distributed++;
136002 -                               total -= count[s];
136003 -                               continue;
136004 -                       }
136005 -               }
136006 -               ToDistribute = (1 << tableLog) - distributed;
136007 -       }
136009 -       if (distributed == maxSymbolValue + 1) {
136010 -               /* all values are pretty poor;
136011 -                  probably incompressible data (should have already been detected);
136012 -                  find max, then give all remaining points to max */
136013 -               U32 maxV = 0, maxC = 0;
136014 -               for (s = 0; s <= maxSymbolValue; s++)
136015 -                       if (count[s] > maxC)
136016 -                               maxV = s, maxC = count[s];
136017 -               norm[maxV] += (short)ToDistribute;
136018 -               return 0;
136019 -       }
136021 -       if (total == 0) {
136022 -               /* all of the symbols were low enough for the lowOne or lowThreshold */
136023 -               for (s = 0; ToDistribute > 0; s = (s + 1) % (maxSymbolValue + 1))
136024 -                       if (norm[s] > 0)
136025 -                               ToDistribute--, norm[s]++;
136026 -               return 0;
136027 -       }
136029 -       {
136030 -               U64 const vStepLog = 62 - tableLog;
136031 -               U64 const mid = (1ULL << (vStepLog - 1)) - 1;
136032 -               U64 const rStep = div_u64((((U64)1 << vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */
136033 -               U64 tmpTotal = mid;
136034 -               for (s = 0; s <= maxSymbolValue; s++) {
136035 -                       if (norm[s] == NOT_YET_ASSIGNED) {
136036 -                               U64 const end = tmpTotal + (count[s] * rStep);
136037 -                               U32 const sStart = (U32)(tmpTotal >> vStepLog);
136038 -                               U32 const sEnd = (U32)(end >> vStepLog);
136039 -                               U32 const weight = sEnd - sStart;
136040 -                               if (weight < 1)
136041 -                                       return ERROR(GENERIC);
136042 -                               norm[s] = (short)weight;
136043 -                               tmpTotal = end;
136044 -                       }
136045 -               }
136046 -       }
136048 -       return 0;
136051 -size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t total, unsigned maxSymbolValue)
136053 -       /* Sanity checks */
136054 -       if (tableLog == 0)
136055 -               tableLog = FSE_DEFAULT_TABLELOG;
136056 -       if (tableLog < FSE_MIN_TABLELOG)
136057 -               return ERROR(GENERIC); /* Unsupported size */
136058 -       if (tableLog > FSE_MAX_TABLELOG)
136059 -               return ERROR(tableLog_tooLarge); /* Unsupported size */
136060 -       if (tableLog < FSE_minTableLog(total, maxSymbolValue))
136061 -               return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
136063 -       {
136064 -               U32 const rtbTable[] = {0, 473195, 504333, 520860, 550000, 700000, 750000, 830000};
136065 -               U64 const scale = 62 - tableLog;
136066 -               U64 const step = div_u64((U64)1 << 62, (U32)total); /* <== here, one division ! */
136067 -               U64 const vStep = 1ULL << (scale - 20);
136068 -               int stillToDistribute = 1 << tableLog;
136069 -               unsigned s;
136070 -               unsigned largest = 0;
136071 -               short largestP = 0;
136072 -               U32 lowThreshold = (U32)(total >> tableLog);
136074 -               for (s = 0; s <= maxSymbolValue; s++) {
136075 -                       if (count[s] == total)
136076 -                               return 0; /* rle special case */
136077 -                       if (count[s] == 0) {
136078 -                               normalizedCounter[s] = 0;
136079 -                               continue;
136080 -                       }
136081 -                       if (count[s] <= lowThreshold) {
136082 -                               normalizedCounter[s] = -1;
136083 -                               stillToDistribute--;
136084 -                       } else {
136085 -                               short proba = (short)((count[s] * step) >> scale);
136086 -                               if (proba < 8) {
136087 -                                       U64 restToBeat = vStep * rtbTable[proba];
136088 -                                       proba += (count[s] * step) - ((U64)proba << scale) > restToBeat;
136089 -                               }
136090 -                               if (proba > largestP)
136091 -                                       largestP = proba, largest = s;
136092 -                               normalizedCounter[s] = proba;
136093 -                               stillToDistribute -= proba;
136094 -                       }
136095 -               }
136096 -               if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
136097 -                       /* corner case, need another normalization method */
136098 -                       size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
136099 -                       if (FSE_isError(errorCode))
136100 -                               return errorCode;
136101 -               } else
136102 -                       normalizedCounter[largest] += (short)stillToDistribute;
136103 -       }
136105 -       return tableLog;
136108 -/* fake FSE_CTable, for raw (uncompressed) input */
136109 -size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits)
136111 -       const unsigned tableSize = 1 << nbBits;
136112 -       const unsigned tableMask = tableSize - 1;
136113 -       const unsigned maxSymbolValue = tableMask;
136114 -       void *const ptr = ct;
136115 -       U16 *const tableU16 = ((U16 *)ptr) + 2;
136116 -       void *const FSCT = ((U32 *)ptr) + 1 /* header */ + (tableSize >> 1); /* assumption : tableLog >= 1 */
136117 -       FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT);
136118 -       unsigned s;
136120 -       /* Sanity checks */
136121 -       if (nbBits < 1)
136122 -               return ERROR(GENERIC); /* min size */
136124 -       /* header */
136125 -       tableU16[-2] = (U16)nbBits;
136126 -       tableU16[-1] = (U16)maxSymbolValue;
136128 -       /* Build table */
136129 -       for (s = 0; s < tableSize; s++)
136130 -               tableU16[s] = (U16)(tableSize + s);
136132 -       /* Build Symbol Transformation Table */
136133 -       {
136134 -               const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
136135 -               for (s = 0; s <= maxSymbolValue; s++) {
136136 -                       symbolTT[s].deltaNbBits = deltaNbBits;
136137 -                       symbolTT[s].deltaFindState = s - 1;
136138 -               }
136139 -       }
136141 -       return 0;
136144 -/* fake FSE_CTable, for rle input (always same symbol) */
136145 -size_t FSE_buildCTable_rle(FSE_CTable *ct, BYTE symbolValue)
136147 -       void *ptr = ct;
136148 -       U16 *tableU16 = ((U16 *)ptr) + 2;
136149 -       void *FSCTptr = (U32 *)ptr + 2;
136150 -       FSE_symbolCompressionTransform *symbolTT = (FSE_symbolCompressionTransform *)FSCTptr;
136152 -       /* header */
136153 -       tableU16[-2] = (U16)0;
136154 -       tableU16[-1] = (U16)symbolValue;
136156 -       /* Build table */
136157 -       tableU16[0] = 0;
136158 -       tableU16[1] = 0; /* just in case */
136160 -       /* Build Symbol Transformation Table */
136161 -       symbolTT[symbolValue].deltaNbBits = 0;
136162 -       symbolTT[symbolValue].deltaFindState = 0;
136164 -       return 0;
136167 -static size_t FSE_compress_usingCTable_generic(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct, const unsigned fast)
136169 -       const BYTE *const istart = (const BYTE *)src;
136170 -       const BYTE *const iend = istart + srcSize;
136171 -       const BYTE *ip = iend;
136173 -       BIT_CStream_t bitC;
136174 -       FSE_CState_t CState1, CState2;
136176 -       /* init */
136177 -       if (srcSize <= 2)
136178 -               return 0;
136179 -       {
136180 -               size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
136181 -               if (FSE_isError(initError))
136182 -                       return 0; /* not enough space available to write a bitstream */
136183 -       }
136185 -#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
136187 -       if (srcSize & 1) {
136188 -               FSE_initCState2(&CState1, ct, *--ip);
136189 -               FSE_initCState2(&CState2, ct, *--ip);
136190 -               FSE_encodeSymbol(&bitC, &CState1, *--ip);
136191 -               FSE_FLUSHBITS(&bitC);
136192 -       } else {
136193 -               FSE_initCState2(&CState2, ct, *--ip);
136194 -               FSE_initCState2(&CState1, ct, *--ip);
136195 -       }
136197 -       /* join to mod 4 */
136198 -       srcSize -= 2;
136199 -       if ((sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) && (srcSize & 2)) { /* test bit 2 */
136200 -               FSE_encodeSymbol(&bitC, &CState2, *--ip);
136201 -               FSE_encodeSymbol(&bitC, &CState1, *--ip);
136202 -               FSE_FLUSHBITS(&bitC);
136203 -       }
136205 -       /* 2 or 4 encoding per loop */
136206 -       while (ip > istart) {
136208 -               FSE_encodeSymbol(&bitC, &CState2, *--ip);
136210 -               if (sizeof(bitC.bitContainer) * 8 < FSE_MAX_TABLELOG * 2 + 7) /* this test must be static */
136211 -                       FSE_FLUSHBITS(&bitC);
136213 -               FSE_encodeSymbol(&bitC, &CState1, *--ip);
136215 -               if (sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) { /* this test must be static */
136216 -                       FSE_encodeSymbol(&bitC, &CState2, *--ip);
136217 -                       FSE_encodeSymbol(&bitC, &CState1, *--ip);
136218 -               }
136220 -               FSE_FLUSHBITS(&bitC);
136221 -       }
136223 -       FSE_flushCState(&bitC, &CState2);
136224 -       FSE_flushCState(&bitC, &CState1);
136225 -       return BIT_closeCStream(&bitC);
136228 -size_t FSE_compress_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct)
136230 -       unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
136232 -       if (fast)
136233 -               return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
136234 -       else
136235 -               return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
136238 -size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
136239 diff --git a/lib/zstd/fse_decompress.c b/lib/zstd/fse_decompress.c
136240 deleted file mode 100644
136241 index 0b353530fb3f..000000000000
136242 --- a/lib/zstd/fse_decompress.c
136243 +++ /dev/null
136244 @@ -1,325 +0,0 @@
136246 - * FSE : Finite State Entropy decoder
136247 - * Copyright (C) 2013-2015, Yann Collet.
136249 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
136251 - * Redistribution and use in source and binary forms, with or without
136252 - * modification, are permitted provided that the following conditions are
136253 - * met:
136255 - *   * Redistributions of source code must retain the above copyright
136256 - * notice, this list of conditions and the following disclaimer.
136257 - *   * Redistributions in binary form must reproduce the above
136258 - * copyright notice, this list of conditions and the following disclaimer
136259 - * in the documentation and/or other materials provided with the
136260 - * distribution.
136262 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
136263 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
136264 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
136265 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
136266 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
136267 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
136268 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
136269 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
136270 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
136271 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
136272 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
136274 - * This program is free software; you can redistribute it and/or modify it under
136275 - * the terms of the GNU General Public License version 2 as published by the
136276 - * Free Software Foundation. This program is dual-licensed; you may select
136277 - * either version 2 of the GNU General Public License ("GPL") or BSD license
136278 - * ("BSD").
136280 - * You can contact the author at :
136281 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
136282 - */
136284 -/* **************************************************************
136285 -*  Compiler specifics
136286 -****************************************************************/
136287 -#define FORCE_INLINE static __always_inline
136289 -/* **************************************************************
136290 -*  Includes
136291 -****************************************************************/
136292 -#include "bitstream.h"
136293 -#include "fse.h"
136294 -#include "zstd_internal.h"
136295 -#include <linux/compiler.h>
136296 -#include <linux/kernel.h>
136297 -#include <linux/string.h> /* memcpy, memset */
136299 -/* **************************************************************
136300 -*  Error Management
136301 -****************************************************************/
136302 -#define FSE_isError ERR_isError
136303 -#define FSE_STATIC_ASSERT(c)                                   \
136304 -       {                                                      \
136305 -               enum { FSE_static_assert = 1 / (int)(!!(c)) }; \
136306 -       } /* use only *after* variable declarations */
136308 -/* **************************************************************
136309 -*  Templates
136310 -****************************************************************/
136312 -  designed to be included
136313 -  for type-specific functions (template emulation in C)
136314 -  Objective is to write these functions only once, for improved maintenance
136317 -/* safety checks */
136318 -#ifndef FSE_FUNCTION_EXTENSION
136319 -#error "FSE_FUNCTION_EXTENSION must be defined"
136320 -#endif
136321 -#ifndef FSE_FUNCTION_TYPE
136322 -#error "FSE_FUNCTION_TYPE must be defined"
136323 -#endif
136325 -/* Function names */
136326 -#define FSE_CAT(X, Y) X##Y
136327 -#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y)
136328 -#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y)
136330 -/* Function templates */
136332 -size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize)
136334 -       void *const tdPtr = dt + 1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
136335 -       FSE_DECODE_TYPE *const tableDecode = (FSE_DECODE_TYPE *)(tdPtr);
136336 -       U16 *symbolNext = (U16 *)workspace;
136338 -       U32 const maxSV1 = maxSymbolValue + 1;
136339 -       U32 const tableSize = 1 << tableLog;
136340 -       U32 highThreshold = tableSize - 1;
136342 -       /* Sanity Checks */
136343 -       if (workspaceSize < sizeof(U16) * (FSE_MAX_SYMBOL_VALUE + 1))
136344 -               return ERROR(tableLog_tooLarge);
136345 -       if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE)
136346 -               return ERROR(maxSymbolValue_tooLarge);
136347 -       if (tableLog > FSE_MAX_TABLELOG)
136348 -               return ERROR(tableLog_tooLarge);
136350 -       /* Init, lay down lowprob symbols */
136351 -       {
136352 -               FSE_DTableHeader DTableH;
136353 -               DTableH.tableLog = (U16)tableLog;
136354 -               DTableH.fastMode = 1;
136355 -               {
136356 -                       S16 const largeLimit = (S16)(1 << (tableLog - 1));
136357 -                       U32 s;
136358 -                       for (s = 0; s < maxSV1; s++) {
136359 -                               if (normalizedCounter[s] == -1) {
136360 -                                       tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
136361 -                                       symbolNext[s] = 1;
136362 -                               } else {
136363 -                                       if (normalizedCounter[s] >= largeLimit)
136364 -                                               DTableH.fastMode = 0;
136365 -                                       symbolNext[s] = normalizedCounter[s];
136366 -                               }
136367 -                       }
136368 -               }
136369 -               memcpy(dt, &DTableH, sizeof(DTableH));
136370 -       }
136372 -       /* Spread symbols */
136373 -       {
136374 -               U32 const tableMask = tableSize - 1;
136375 -               U32 const step = FSE_TABLESTEP(tableSize);
136376 -               U32 s, position = 0;
136377 -               for (s = 0; s < maxSV1; s++) {
136378 -                       int i;
136379 -                       for (i = 0; i < normalizedCounter[s]; i++) {
136380 -                               tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
136381 -                               position = (position + step) & tableMask;
136382 -                               while (position > highThreshold)
136383 -                                       position = (position + step) & tableMask; /* lowprob area */
136384 -                       }
136385 -               }
136386 -               if (position != 0)
136387 -                       return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
136388 -       }
136390 -       /* Build Decoding table */
136391 -       {
136392 -               U32 u;
136393 -               for (u = 0; u < tableSize; u++) {
136394 -                       FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
136395 -                       U16 nextState = symbolNext[symbol]++;
136396 -                       tableDecode[u].nbBits = (BYTE)(tableLog - BIT_highbit32((U32)nextState));
136397 -                       tableDecode[u].newState = (U16)((nextState << tableDecode[u].nbBits) - tableSize);
136398 -               }
136399 -       }
136401 -       return 0;
136404 -/*-*******************************************************
136405 -*  Decompression (Byte symbols)
136406 -*********************************************************/
136407 -size_t FSE_buildDTable_rle(FSE_DTable *dt, BYTE symbolValue)
136409 -       void *ptr = dt;
136410 -       FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr;
136411 -       void *dPtr = dt + 1;
136412 -       FSE_decode_t *const cell = (FSE_decode_t *)dPtr;
136414 -       DTableH->tableLog = 0;
136415 -       DTableH->fastMode = 0;
136417 -       cell->newState = 0;
136418 -       cell->symbol = symbolValue;
136419 -       cell->nbBits = 0;
136421 -       return 0;
136424 -size_t FSE_buildDTable_raw(FSE_DTable *dt, unsigned nbBits)
136426 -       void *ptr = dt;
136427 -       FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr;
136428 -       void *dPtr = dt + 1;
136429 -       FSE_decode_t *const dinfo = (FSE_decode_t *)dPtr;
136430 -       const unsigned tableSize = 1 << nbBits;
136431 -       const unsigned tableMask = tableSize - 1;
136432 -       const unsigned maxSV1 = tableMask + 1;
136433 -       unsigned s;
136435 -       /* Sanity checks */
136436 -       if (nbBits < 1)
136437 -               return ERROR(GENERIC); /* min size */
136439 -       /* Build Decoding Table */
136440 -       DTableH->tableLog = (U16)nbBits;
136441 -       DTableH->fastMode = 1;
136442 -       for (s = 0; s < maxSV1; s++) {
136443 -               dinfo[s].newState = 0;
136444 -               dinfo[s].symbol = (BYTE)s;
136445 -               dinfo[s].nbBits = (BYTE)nbBits;
136446 -       }
136448 -       return 0;
136451 -FORCE_INLINE size_t FSE_decompress_usingDTable_generic(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt,
136452 -                                                      const unsigned fast)
136454 -       BYTE *const ostart = (BYTE *)dst;
136455 -       BYTE *op = ostart;
136456 -       BYTE *const omax = op + maxDstSize;
136457 -       BYTE *const olimit = omax - 3;
136459 -       BIT_DStream_t bitD;
136460 -       FSE_DState_t state1;
136461 -       FSE_DState_t state2;
136463 -       /* Init */
136464 -       CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
136466 -       FSE_initDState(&state1, &bitD, dt);
136467 -       FSE_initDState(&state2, &bitD, dt);
136469 -#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
136471 -       /* 4 symbols per loop */
136472 -       for (; (BIT_reloadDStream(&bitD) == BIT_DStream_unfinished) & (op < olimit); op += 4) {
136473 -               op[0] = FSE_GETSYMBOL(&state1);
136475 -               if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
136476 -                       BIT_reloadDStream(&bitD);
136478 -               op[1] = FSE_GETSYMBOL(&state2);
136480 -               if (FSE_MAX_TABLELOG * 4 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
136481 -               {
136482 -                       if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) {
136483 -                               op += 2;
136484 -                               break;
136485 -                       }
136486 -               }
136488 -               op[2] = FSE_GETSYMBOL(&state1);
136490 -               if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
136491 -                       BIT_reloadDStream(&bitD);
136493 -               op[3] = FSE_GETSYMBOL(&state2);
136494 -       }
136496 -       /* tail */
136497 -       /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
136498 -       while (1) {
136499 -               if (op > (omax - 2))
136500 -                       return ERROR(dstSize_tooSmall);
136501 -               *op++ = FSE_GETSYMBOL(&state1);
136502 -               if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) {
136503 -                       *op++ = FSE_GETSYMBOL(&state2);
136504 -                       break;
136505 -               }
136507 -               if (op > (omax - 2))
136508 -                       return ERROR(dstSize_tooSmall);
136509 -               *op++ = FSE_GETSYMBOL(&state2);
136510 -               if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) {
136511 -                       *op++ = FSE_GETSYMBOL(&state1);
136512 -                       break;
136513 -               }
136514 -       }
136516 -       return op - ostart;
136519 -size_t FSE_decompress_usingDTable(void *dst, size_t originalSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt)
136521 -       const void *ptr = dt;
136522 -       const FSE_DTableHeader *DTableH = (const FSE_DTableHeader *)ptr;
136523 -       const U32 fastMode = DTableH->fastMode;
136525 -       /* select fast mode (static) */
136526 -       if (fastMode)
136527 -               return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
136528 -       return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
136531 -size_t FSE_decompress_wksp(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workspace, size_t workspaceSize)
136533 -       const BYTE *const istart = (const BYTE *)cSrc;
136534 -       const BYTE *ip = istart;
136535 -       unsigned tableLog;
136536 -       unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
136537 -       size_t NCountLength;
136539 -       FSE_DTable *dt;
136540 -       short *counting;
136541 -       size_t spaceUsed32 = 0;
136543 -       FSE_STATIC_ASSERT(sizeof(FSE_DTable) == sizeof(U32));
136545 -       dt = (FSE_DTable *)((U32 *)workspace + spaceUsed32);
136546 -       spaceUsed32 += FSE_DTABLE_SIZE_U32(maxLog);
136547 -       counting = (short *)((U32 *)workspace + spaceUsed32);
136548 -       spaceUsed32 += ALIGN(sizeof(short) * (FSE_MAX_SYMBOL_VALUE + 1), sizeof(U32)) >> 2;
136550 -       if ((spaceUsed32 << 2) > workspaceSize)
136551 -               return ERROR(tableLog_tooLarge);
136552 -       workspace = (U32 *)workspace + spaceUsed32;
136553 -       workspaceSize -= (spaceUsed32 << 2);
136555 -       /* normal FSE decoding mode */
136556 -       NCountLength = FSE_readNCount(counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
136557 -       if (FSE_isError(NCountLength))
136558 -               return NCountLength;
136559 -       // if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size; supposed to be already checked in NCountLength, only remaining
136560 -       // case : NCountLength==cSrcSize */
136561 -       if (tableLog > maxLog)
136562 -               return ERROR(tableLog_tooLarge);
136563 -       ip += NCountLength;
136564 -       cSrcSize -= NCountLength;
136566 -       CHECK_F(FSE_buildDTable_wksp(dt, counting, maxSymbolValue, tableLog, workspace, workspaceSize));
136568 -       return FSE_decompress_usingDTable(dst, dstCapacity, ip, cSrcSize, dt); /* always return, even if it is an error code */
136570 diff --git a/lib/zstd/huf.h b/lib/zstd/huf.h
136571 deleted file mode 100644
136572 index 2143da28d952..000000000000
136573 --- a/lib/zstd/huf.h
136574 +++ /dev/null
136575 @@ -1,212 +0,0 @@
136577 - * Huffman coder, part of New Generation Entropy library
136578 - * header file
136579 - * Copyright (C) 2013-2016, Yann Collet.
136581 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
136583 - * Redistribution and use in source and binary forms, with or without
136584 - * modification, are permitted provided that the following conditions are
136585 - * met:
136587 - *   * Redistributions of source code must retain the above copyright
136588 - * notice, this list of conditions and the following disclaimer.
136589 - *   * Redistributions in binary form must reproduce the above
136590 - * copyright notice, this list of conditions and the following disclaimer
136591 - * in the documentation and/or other materials provided with the
136592 - * distribution.
136594 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
136595 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
136596 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
136597 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
136598 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
136599 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
136600 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
136601 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
136602 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
136603 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
136604 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
136606 - * This program is free software; you can redistribute it and/or modify it under
136607 - * the terms of the GNU General Public License version 2 as published by the
136608 - * Free Software Foundation. This program is dual-licensed; you may select
136609 - * either version 2 of the GNU General Public License ("GPL") or BSD license
136610 - * ("BSD").
136612 - * You can contact the author at :
136613 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
136614 - */
136615 -#ifndef HUF_H_298734234
136616 -#define HUF_H_298734234
136618 -/* *** Dependencies *** */
136619 -#include <linux/types.h> /* size_t */
136621 -/* ***   Tool functions *** */
136622 -#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
136623 -size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
136625 -/* Error Management */
136626 -unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
136628 -/* ***   Advanced function   *** */
136630 -/** HUF_compress4X_wksp() :
136631 -*   Same as HUF_compress2(), but uses externally allocated `workSpace`, which must be a table of >= 1024 unsigned */
136632 -size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
136633 -                          size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
136635 -/* *** Dependencies *** */
136636 -#include "mem.h" /* U32 */
136638 -/* *** Constants *** */
136639 -#define HUF_TABLELOG_MAX 12     /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
136640 -#define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */
136641 -#define HUF_SYMBOLVALUE_MAX 255
136643 -#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
136644 -#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
136645 -#error "HUF_TABLELOG_MAX is too large !"
136646 -#endif
136648 -/* ****************************************
136649 -*  Static allocation
136650 -******************************************/
136651 -/* HUF buffer bounds */
136652 -#define HUF_CTABLEBOUND 129
136653 -#define HUF_BLOCKBOUND(size) (size + (size >> 8) + 8)                   /* only true if incompressible pre-filtered with fast heuristic */
136654 -#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
136656 -/* static allocation of HUF's Compression Table */
136657 -#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
136658 -       U32 name##hb[maxSymbolValue + 1];              \
136659 -       void *name##hv = &(name##hb);                  \
136660 -       HUF_CElt *name = (HUF_CElt *)(name##hv) /* no final ; */
136662 -/* static allocation of HUF's DTable */
136663 -typedef U32 HUF_DTable;
136664 -#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1 << (maxTableLog)))
136665 -#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = {((U32)((maxTableLog)-1) * 0x01000001)}
136666 -#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = {((U32)(maxTableLog)*0x01000001)}
136668 -/* The workspace must have alignment at least 4 and be at least this large */
136669 -#define HUF_COMPRESS_WORKSPACE_SIZE (6 << 10)
136670 -#define HUF_COMPRESS_WORKSPACE_SIZE_U32 (HUF_COMPRESS_WORKSPACE_SIZE / sizeof(U32))
136672 -/* The workspace must have alignment at least 4 and be at least this large */
136673 -#define HUF_DECOMPRESS_WORKSPACE_SIZE (3 << 10)
136674 -#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
136676 -/* ****************************************
136677 -*  Advanced decompression functions
136678 -******************************************/
136679 -size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize); /**< decodes RLE and uncompressed */
136680 -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
136681 -                               size_t workspaceSize);                                                         /**< considers RLE and uncompressed as errors */
136682 -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
136683 -                                  size_t workspaceSize); /**< single-symbol decoder */
136684 -size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
136685 -                                  size_t workspaceSize); /**< double-symbols decoder */
136687 -/* ****************************************
136688 -*  HUF detailed API
136689 -******************************************/
136691 -HUF_compress() does the following:
136692 -1. count symbol occurrence from source[] into table count[] using FSE_count()
136693 -2. (optional) refine tableLog using HUF_optimalTableLog()
136694 -3. build Huffman table from count using HUF_buildCTable()
136695 -4. save Huffman table to memory buffer using HUF_writeCTable_wksp()
136696 -5. encode the data stream using HUF_compress4X_usingCTable()
136698 -The following API allows targeting specific sub-functions for advanced tasks.
136699 -For example, it's possible to compress several blocks using the same 'CTable',
136700 -or to save and regenerate 'CTable' using external methods.
136702 -/* FSE_count() : find it within "fse.h" */
136703 -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
136704 -typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */
136705 -size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, unsigned maxSymbolValue, unsigned huffLog, void *workspace, size_t workspaceSize);
136706 -size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable);
136708 -typedef enum {
136709 -       HUF_repeat_none,  /**< Cannot use the previous table */
136710 -       HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1,
136711 -                            4}X_repeat */
136712 -       HUF_repeat_valid  /**< Can use the previous table and it is asumed to be valid */
136713 -} HUF_repeat;
136714 -/** HUF_compress4X_repeat() :
136715 -*   Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
136716 -*   If it uses hufTable it does not modify hufTable or repeat.
136717 -*   If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
136718 -*   If preferRepeat then the old table will always be used if valid. */
136719 -size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
136720 -                            size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat,
136721 -                            int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
136723 -/** HUF_buildCTable_wksp() :
136724 - *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
136725 - *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
136726 - */
136727 -size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize);
136729 -/*! HUF_readStats() :
136730 -       Read compact Huffman tree, saved by HUF_writeCTable().
136731 -       `huffWeight` is destination buffer.
136732 -       @return : size read from `src` , or an error Code .
136733 -       Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
136734 -size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize,
136735 -                         void *workspace, size_t workspaceSize);
136737 -/** HUF_readCTable() :
136738 -*   Loading a CTable saved with HUF_writeCTable() */
136739 -size_t HUF_readCTable_wksp(HUF_CElt *CTable, unsigned maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
136742 -HUF_decompress() does the following:
136743 -1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
136744 -2. build Huffman table from save, using HUF_readDTableXn()
136745 -3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable
136748 -/** HUF_selectDecoder() :
136749 -*   Tells which decoder is likely to decode faster,
136750 -*   based on a set of pre-determined metrics.
136751 -*   @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
136752 -*   Assumption : 0 < cSrcSize < dstSize <= 128 KB */
136753 -U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize);
136755 -size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
136756 -size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
136758 -size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
136759 -size_t HUF_decompress4X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
136760 -size_t HUF_decompress4X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
136762 -/* single stream variants */
136764 -size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
136765 -                          size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
136766 -size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable);
136767 -/** HUF_compress1X_repeat() :
136768 -*   Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
136769 -*   If it uses hufTable it does not modify hufTable or repeat.
136770 -*   If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
136771 -*   If preferRepeat then the old table will always be used if valid. */
136772 -size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
136773 -                            size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat,
136774 -                            int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
136776 -size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize);
136777 -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
136778 -                                  size_t workspaceSize); /**< single-symbol decoder */
136779 -size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
136780 -                                  size_t workspaceSize); /**< double-symbols decoder */
136782 -size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize,
136783 -                                   const HUF_DTable *DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */
136784 -size_t HUF_decompress1X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
136785 -size_t HUF_decompress1X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
136787 -#endif /* HUF_H_298734234 */
136788 diff --git a/lib/zstd/huf_compress.c b/lib/zstd/huf_compress.c
136789 deleted file mode 100644
136790 index fd32838c185f..000000000000
136791 --- a/lib/zstd/huf_compress.c
136792 +++ /dev/null
136793 @@ -1,773 +0,0 @@
136795 - * Huffman encoder, part of New Generation Entropy library
136796 - * Copyright (C) 2013-2016, Yann Collet.
136798 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
136800 - * Redistribution and use in source and binary forms, with or without
136801 - * modification, are permitted provided that the following conditions are
136802 - * met:
136804 - *   * Redistributions of source code must retain the above copyright
136805 - * notice, this list of conditions and the following disclaimer.
136806 - *   * Redistributions in binary form must reproduce the above
136807 - * copyright notice, this list of conditions and the following disclaimer
136808 - * in the documentation and/or other materials provided with the
136809 - * distribution.
136811 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
136812 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
136813 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
136814 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
136815 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
136816 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
136817 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
136818 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
136819 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
136820 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
136821 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
136823 - * This program is free software; you can redistribute it and/or modify it under
136824 - * the terms of the GNU General Public License version 2 as published by the
136825 - * Free Software Foundation. This program is dual-licensed; you may select
136826 - * either version 2 of the GNU General Public License ("GPL") or BSD license
136827 - * ("BSD").
136829 - * You can contact the author at :
136830 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
136831 - */
136833 -/* **************************************************************
136834 -*  Includes
136835 -****************************************************************/
136836 -#include "bitstream.h"
136837 -#include "fse.h" /* header compression */
136838 -#include "huf.h"
136839 -#include <linux/kernel.h>
136840 -#include <linux/string.h> /* memcpy, memset */
136842 -/* **************************************************************
136843 -*  Error Management
136844 -****************************************************************/
136845 -#define HUF_STATIC_ASSERT(c)                                   \
136846 -       {                                                      \
136847 -               enum { HUF_static_assert = 1 / (int)(!!(c)) }; \
136848 -       } /* use only *after* variable declarations */
136849 -#define CHECK_V_F(e, f)     \
136850 -       size_t const e = f; \
136851 -       if (ERR_isError(e)) \
136852 -       return f
136853 -#define CHECK_F(f)                        \
136854 -       {                                 \
136855 -               CHECK_V_F(_var_err__, f); \
136856 -       }
136858 -/* **************************************************************
136859 -*  Utils
136860 -****************************************************************/
136861 -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
136863 -       return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
136866 -/* *******************************************************
136867 -*  HUF : Huffman block compression
136868 -*********************************************************/
136869 -/* HUF_compressWeights() :
136870 - * Same as FSE_compress(), but dedicated to huff0's weights compression.
136871 - * The use case needs much less stack memory.
136872 - * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
136873 - */
136874 -#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
136875 -size_t HUF_compressWeights_wksp(void *dst, size_t dstSize, const void *weightTable, size_t wtSize, void *workspace, size_t workspaceSize)
136877 -       BYTE *const ostart = (BYTE *)dst;
136878 -       BYTE *op = ostart;
136879 -       BYTE *const oend = ostart + dstSize;
136881 -       U32 maxSymbolValue = HUF_TABLELOG_MAX;
136882 -       U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
136884 -       FSE_CTable *CTable;
136885 -       U32 *count;
136886 -       S16 *norm;
136887 -       size_t spaceUsed32 = 0;
136889 -       HUF_STATIC_ASSERT(sizeof(FSE_CTable) == sizeof(U32));
136891 -       CTable = (FSE_CTable *)((U32 *)workspace + spaceUsed32);
136892 -       spaceUsed32 += FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX);
136893 -       count = (U32 *)workspace + spaceUsed32;
136894 -       spaceUsed32 += HUF_TABLELOG_MAX + 1;
136895 -       norm = (S16 *)((U32 *)workspace + spaceUsed32);
136896 -       spaceUsed32 += ALIGN(sizeof(S16) * (HUF_TABLELOG_MAX + 1), sizeof(U32)) >> 2;
136898 -       if ((spaceUsed32 << 2) > workspaceSize)
136899 -               return ERROR(tableLog_tooLarge);
136900 -       workspace = (U32 *)workspace + spaceUsed32;
136901 -       workspaceSize -= (spaceUsed32 << 2);
136903 -       /* init conditions */
136904 -       if (wtSize <= 1)
136905 -               return 0; /* Not compressible */
136907 -       /* Scan input and build symbol stats */
136908 -       {
136909 -               CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize));
136910 -               if (maxCount == wtSize)
136911 -                       return 1; /* only a single symbol in src : rle */
136912 -               if (maxCount == 1)
136913 -                       return 0; /* each symbol present maximum once => not compressible */
136914 -       }
136916 -       tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
136917 -       CHECK_F(FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue));
136919 -       /* Write table description header */
136920 -       {
136921 -               CHECK_V_F(hSize, FSE_writeNCount(op, oend - op, norm, maxSymbolValue, tableLog));
136922 -               op += hSize;
136923 -       }
136925 -       /* Compress */
136926 -       CHECK_F(FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, workspace, workspaceSize));
136927 -       {
136928 -               CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable));
136929 -               if (cSize == 0)
136930 -                       return 0; /* not enough space for compressed data */
136931 -               op += cSize;
136932 -       }
136934 -       return op - ostart;
136937 -struct HUF_CElt_s {
136938 -       U16 val;
136939 -       BYTE nbBits;
136940 -}; /* typedef'd to HUF_CElt within "huf.h" */
136942 -/*! HUF_writeCTable_wksp() :
136943 -       `CTable` : Huffman tree to save, using huf representation.
136944 -       @return : size of saved CTable */
136945 -size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, U32 maxSymbolValue, U32 huffLog, void *workspace, size_t workspaceSize)
136947 -       BYTE *op = (BYTE *)dst;
136948 -       U32 n;
136950 -       BYTE *bitsToWeight;
136951 -       BYTE *huffWeight;
136952 -       size_t spaceUsed32 = 0;
136954 -       bitsToWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
136955 -       spaceUsed32 += ALIGN(HUF_TABLELOG_MAX + 1, sizeof(U32)) >> 2;
136956 -       huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
136957 -       spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX, sizeof(U32)) >> 2;
136959 -       if ((spaceUsed32 << 2) > workspaceSize)
136960 -               return ERROR(tableLog_tooLarge);
136961 -       workspace = (U32 *)workspace + spaceUsed32;
136962 -       workspaceSize -= (spaceUsed32 << 2);
136964 -       /* check conditions */
136965 -       if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
136966 -               return ERROR(maxSymbolValue_tooLarge);
136968 -       /* convert to weight */
136969 -       bitsToWeight[0] = 0;
136970 -       for (n = 1; n < huffLog + 1; n++)
136971 -               bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
136972 -       for (n = 0; n < maxSymbolValue; n++)
136973 -               huffWeight[n] = bitsToWeight[CTable[n].nbBits];
136975 -       /* attempt weights compression by FSE */
136976 -       {
136977 -               CHECK_V_F(hSize, HUF_compressWeights_wksp(op + 1, maxDstSize - 1, huffWeight, maxSymbolValue, workspace, workspaceSize));
136978 -               if ((hSize > 1) & (hSize < maxSymbolValue / 2)) { /* FSE compressed */
136979 -                       op[0] = (BYTE)hSize;
136980 -                       return hSize + 1;
136981 -               }
136982 -       }
136984 -       /* write raw values as 4-bits (max : 15) */
136985 -       if (maxSymbolValue > (256 - 128))
136986 -               return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
136987 -       if (((maxSymbolValue + 1) / 2) + 1 > maxDstSize)
136988 -               return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
136989 -       op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue - 1));
136990 -       huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
136991 -       for (n = 0; n < maxSymbolValue; n += 2)
136992 -               op[(n / 2) + 1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n + 1]);
136993 -       return ((maxSymbolValue + 1) / 2) + 1;
136996 -size_t HUF_readCTable_wksp(HUF_CElt *CTable, U32 maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
136998 -       U32 *rankVal;
136999 -       BYTE *huffWeight;
137000 -       U32 tableLog = 0;
137001 -       U32 nbSymbols = 0;
137002 -       size_t readSize;
137003 -       size_t spaceUsed32 = 0;
137005 -       rankVal = (U32 *)workspace + spaceUsed32;
137006 -       spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
137007 -       huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
137008 -       spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
137010 -       if ((spaceUsed32 << 2) > workspaceSize)
137011 -               return ERROR(tableLog_tooLarge);
137012 -       workspace = (U32 *)workspace + spaceUsed32;
137013 -       workspaceSize -= (spaceUsed32 << 2);
137015 -       /* get symbol weights */
137016 -       readSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
137017 -       if (ERR_isError(readSize))
137018 -               return readSize;
137020 -       /* check result */
137021 -       if (tableLog > HUF_TABLELOG_MAX)
137022 -               return ERROR(tableLog_tooLarge);
137023 -       if (nbSymbols > maxSymbolValue + 1)
137024 -               return ERROR(maxSymbolValue_tooSmall);
137026 -       /* Prepare base value per rank */
137027 -       {
137028 -               U32 n, nextRankStart = 0;
137029 -               for (n = 1; n <= tableLog; n++) {
137030 -                       U32 curr = nextRankStart;
137031 -                       nextRankStart += (rankVal[n] << (n - 1));
137032 -                       rankVal[n] = curr;
137033 -               }
137034 -       }
137036 -       /* fill nbBits */
137037 -       {
137038 -               U32 n;
137039 -               for (n = 0; n < nbSymbols; n++) {
137040 -                       const U32 w = huffWeight[n];
137041 -                       CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
137042 -               }
137043 -       }
137045 -       /* fill val */
137046 -       {
137047 -               U16 nbPerRank[HUF_TABLELOG_MAX + 2] = {0}; /* support w=0=>n=tableLog+1 */
137048 -               U16 valPerRank[HUF_TABLELOG_MAX + 2] = {0};
137049 -               {
137050 -                       U32 n;
137051 -                       for (n = 0; n < nbSymbols; n++)
137052 -                               nbPerRank[CTable[n].nbBits]++;
137053 -               }
137054 -               /* determine stating value per rank */
137055 -               valPerRank[tableLog + 1] = 0; /* for w==0 */
137056 -               {
137057 -                       U16 min = 0;
137058 -                       U32 n;
137059 -                       for (n = tableLog; n > 0; n--) { /* start at n=tablelog <-> w=1 */
137060 -                               valPerRank[n] = min;     /* get starting value within each rank */
137061 -                               min += nbPerRank[n];
137062 -                               min >>= 1;
137063 -                       }
137064 -               }
137065 -               /* assign value within rank, symbol order */
137066 -               {
137067 -                       U32 n;
137068 -                       for (n = 0; n <= maxSymbolValue; n++)
137069 -                               CTable[n].val = valPerRank[CTable[n].nbBits]++;
137070 -               }
137071 -       }
137073 -       return readSize;
137076 -typedef struct nodeElt_s {
137077 -       U32 count;
137078 -       U16 parent;
137079 -       BYTE byte;
137080 -       BYTE nbBits;
137081 -} nodeElt;
137083 -static U32 HUF_setMaxHeight(nodeElt *huffNode, U32 lastNonNull, U32 maxNbBits)
137085 -       const U32 largestBits = huffNode[lastNonNull].nbBits;
137086 -       if (largestBits <= maxNbBits)
137087 -               return largestBits; /* early exit : no elt > maxNbBits */
137089 -       /* there are several too large elements (at least >= 2) */
137090 -       {
137091 -               int totalCost = 0;
137092 -               const U32 baseCost = 1 << (largestBits - maxNbBits);
137093 -               U32 n = lastNonNull;
137095 -               while (huffNode[n].nbBits > maxNbBits) {
137096 -                       totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
137097 -                       huffNode[n].nbBits = (BYTE)maxNbBits;
137098 -                       n--;
137099 -               } /* n stops at huffNode[n].nbBits <= maxNbBits */
137100 -               while (huffNode[n].nbBits == maxNbBits)
137101 -                       n--; /* n end at index of smallest symbol using < maxNbBits */
137103 -               /* renorm totalCost */
137104 -               totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */
137106 -               /* repay normalized cost */
137107 -               {
137108 -                       U32 const noSymbol = 0xF0F0F0F0;
137109 -                       U32 rankLast[HUF_TABLELOG_MAX + 2];
137110 -                       int pos;
137112 -                       /* Get pos of last (smallest) symbol per rank */
137113 -                       memset(rankLast, 0xF0, sizeof(rankLast));
137114 -                       {
137115 -                               U32 currNbBits = maxNbBits;
137116 -                               for (pos = n; pos >= 0; pos--) {
137117 -                                       if (huffNode[pos].nbBits >= currNbBits)
137118 -                                               continue;
137119 -                                       currNbBits = huffNode[pos].nbBits; /* < maxNbBits */
137120 -                                       rankLast[maxNbBits - currNbBits] = pos;
137121 -                               }
137122 -                       }
137124 -                       while (totalCost > 0) {
137125 -                               U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;
137126 -                               for (; nBitsToDecrease > 1; nBitsToDecrease--) {
137127 -                                       U32 highPos = rankLast[nBitsToDecrease];
137128 -                                       U32 lowPos = rankLast[nBitsToDecrease - 1];
137129 -                                       if (highPos == noSymbol)
137130 -                                               continue;
137131 -                                       if (lowPos == noSymbol)
137132 -                                               break;
137133 -                                       {
137134 -                                               U32 const highTotal = huffNode[highPos].count;
137135 -                                               U32 const lowTotal = 2 * huffNode[lowPos].count;
137136 -                                               if (highTotal <= lowTotal)
137137 -                                                       break;
137138 -                                       }
137139 -                               }
137140 -                               /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
137141 -                               /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
137142 -                               while ((nBitsToDecrease <= HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
137143 -                                       nBitsToDecrease++;
137144 -                               totalCost -= 1 << (nBitsToDecrease - 1);
137145 -                               if (rankLast[nBitsToDecrease - 1] == noSymbol)
137146 -                                       rankLast[nBitsToDecrease - 1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */
137147 -                               huffNode[rankLast[nBitsToDecrease]].nbBits++;
137148 -                               if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
137149 -                                       rankLast[nBitsToDecrease] = noSymbol;
137150 -                               else {
137151 -                                       rankLast[nBitsToDecrease]--;
137152 -                                       if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits - nBitsToDecrease)
137153 -                                               rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
137154 -                               }
137155 -                       } /* while (totalCost > 0) */
137157 -                       while (totalCost < 0) {                /* Sometimes, cost correction overshoot */
137158 -                               if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0
137159 -                                                                 (using maxNbBits) */
137160 -                                       while (huffNode[n].nbBits == maxNbBits)
137161 -                                               n--;
137162 -                                       huffNode[n + 1].nbBits--;
137163 -                                       rankLast[1] = n + 1;
137164 -                                       totalCost++;
137165 -                                       continue;
137166 -                               }
137167 -                               huffNode[rankLast[1] + 1].nbBits--;
137168 -                               rankLast[1]++;
137169 -                               totalCost++;
137170 -                       }
137171 -               }
137172 -       } /* there are several too large elements (at least >= 2) */
137174 -       return maxNbBits;
137177 -typedef struct {
137178 -       U32 base;
137179 -       U32 curr;
137180 -} rankPos;
137182 -static void HUF_sort(nodeElt *huffNode, const U32 *count, U32 maxSymbolValue)
137184 -       rankPos rank[32];
137185 -       U32 n;
137187 -       memset(rank, 0, sizeof(rank));
137188 -       for (n = 0; n <= maxSymbolValue; n++) {
137189 -               U32 r = BIT_highbit32(count[n] + 1);
137190 -               rank[r].base++;
137191 -       }
137192 -       for (n = 30; n > 0; n--)
137193 -               rank[n - 1].base += rank[n].base;
137194 -       for (n = 0; n < 32; n++)
137195 -               rank[n].curr = rank[n].base;
137196 -       for (n = 0; n <= maxSymbolValue; n++) {
137197 -               U32 const c = count[n];
137198 -               U32 const r = BIT_highbit32(c + 1) + 1;
137199 -               U32 pos = rank[r].curr++;
137200 -               while ((pos > rank[r].base) && (c > huffNode[pos - 1].count))
137201 -                       huffNode[pos] = huffNode[pos - 1], pos--;
137202 -               huffNode[pos].count = c;
137203 -               huffNode[pos].byte = (BYTE)n;
137204 -       }
137207 -/** HUF_buildCTable_wksp() :
137208 - *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
137209 - *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
137210 - */
137211 -#define STARTNODE (HUF_SYMBOLVALUE_MAX + 1)
137212 -typedef nodeElt huffNodeTable[2 * HUF_SYMBOLVALUE_MAX + 1 + 1];
137213 -size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize)
137215 -       nodeElt *const huffNode0 = (nodeElt *)workSpace;
137216 -       nodeElt *const huffNode = huffNode0 + 1;
137217 -       U32 n, nonNullRank;
137218 -       int lowS, lowN;
137219 -       U16 nodeNb = STARTNODE;
137220 -       U32 nodeRoot;
137222 -       /* safety checks */
137223 -       if (wkspSize < sizeof(huffNodeTable))
137224 -               return ERROR(GENERIC); /* workSpace is not large enough */
137225 -       if (maxNbBits == 0)
137226 -               maxNbBits = HUF_TABLELOG_DEFAULT;
137227 -       if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
137228 -               return ERROR(GENERIC);
137229 -       memset(huffNode0, 0, sizeof(huffNodeTable));
137231 -       /* sort, decreasing order */
137232 -       HUF_sort(huffNode, count, maxSymbolValue);
137234 -       /* init for parents */
137235 -       nonNullRank = maxSymbolValue;
137236 -       while (huffNode[nonNullRank].count == 0)
137237 -               nonNullRank--;
137238 -       lowS = nonNullRank;
137239 -       nodeRoot = nodeNb + lowS - 1;
137240 -       lowN = nodeNb;
137241 -       huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS - 1].count;
137242 -       huffNode[lowS].parent = huffNode[lowS - 1].parent = nodeNb;
137243 -       nodeNb++;
137244 -       lowS -= 2;
137245 -       for (n = nodeNb; n <= nodeRoot; n++)
137246 -               huffNode[n].count = (U32)(1U << 30);
137247 -       huffNode0[0].count = (U32)(1U << 31); /* fake entry, strong barrier */
137249 -       /* create parents */
137250 -       while (nodeNb <= nodeRoot) {
137251 -               U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
137252 -               U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
137253 -               huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
137254 -               huffNode[n1].parent = huffNode[n2].parent = nodeNb;
137255 -               nodeNb++;
137256 -       }
137258 -       /* distribute weights (unlimited tree height) */
137259 -       huffNode[nodeRoot].nbBits = 0;
137260 -       for (n = nodeRoot - 1; n >= STARTNODE; n--)
137261 -               huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1;
137262 -       for (n = 0; n <= nonNullRank; n++)
137263 -               huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1;
137265 -       /* enforce maxTableLog */
137266 -       maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
137268 -       /* fill result into tree (val, nbBits) */
137269 -       {
137270 -               U16 nbPerRank[HUF_TABLELOG_MAX + 1] = {0};
137271 -               U16 valPerRank[HUF_TABLELOG_MAX + 1] = {0};
137272 -               if (maxNbBits > HUF_TABLELOG_MAX)
137273 -                       return ERROR(GENERIC); /* check fit into table */
137274 -               for (n = 0; n <= nonNullRank; n++)
137275 -                       nbPerRank[huffNode[n].nbBits]++;
137276 -               /* determine stating value per rank */
137277 -               {
137278 -                       U16 min = 0;
137279 -                       for (n = maxNbBits; n > 0; n--) {
137280 -                               valPerRank[n] = min; /* get starting value within each rank */
137281 -                               min += nbPerRank[n];
137282 -                               min >>= 1;
137283 -                       }
137284 -               }
137285 -               for (n = 0; n <= maxSymbolValue; n++)
137286 -                       tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
137287 -               for (n = 0; n <= maxSymbolValue; n++)
137288 -                       tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */
137289 -       }
137291 -       return maxNbBits;
137294 -static size_t HUF_estimateCompressedSize(HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue)
137296 -       size_t nbBits = 0;
137297 -       int s;
137298 -       for (s = 0; s <= (int)maxSymbolValue; ++s) {
137299 -               nbBits += CTable[s].nbBits * count[s];
137300 -       }
137301 -       return nbBits >> 3;
137304 -static int HUF_validateCTable(const HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue)
137306 -       int bad = 0;
137307 -       int s;
137308 -       for (s = 0; s <= (int)maxSymbolValue; ++s) {
137309 -               bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
137310 -       }
137311 -       return !bad;
137314 -static void HUF_encodeSymbol(BIT_CStream_t *bitCPtr, U32 symbol, const HUF_CElt *CTable)
137316 -       BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
137319 -size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
137321 -#define HUF_FLUSHBITS(s)  BIT_flushBits(s)
137323 -#define HUF_FLUSHBITS_1(stream)                                            \
137324 -       if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 2 + 7) \
137325 -       HUF_FLUSHBITS(stream)
137327 -#define HUF_FLUSHBITS_2(stream)                                            \
137328 -       if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 4 + 7) \
137329 -       HUF_FLUSHBITS(stream)
137331 -size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable)
137333 -       const BYTE *ip = (const BYTE *)src;
137334 -       BYTE *const ostart = (BYTE *)dst;
137335 -       BYTE *const oend = ostart + dstSize;
137336 -       BYTE *op = ostart;
137337 -       size_t n;
137338 -       BIT_CStream_t bitC;
137340 -       /* init */
137341 -       if (dstSize < 8)
137342 -               return 0; /* not enough space to compress */
137343 -       {
137344 -               size_t const initErr = BIT_initCStream(&bitC, op, oend - op);
137345 -               if (HUF_isError(initErr))
137346 -                       return 0;
137347 -       }
137349 -       n = srcSize & ~3; /* join to mod 4 */
137350 -       switch (srcSize & 3) {
137351 -       case 3: HUF_encodeSymbol(&bitC, ip[n + 2], CTable); HUF_FLUSHBITS_2(&bitC);
137352 -               fallthrough;
137353 -       case 2: HUF_encodeSymbol(&bitC, ip[n + 1], CTable); HUF_FLUSHBITS_1(&bitC);
137354 -               fallthrough;
137355 -       case 1: HUF_encodeSymbol(&bitC, ip[n + 0], CTable); HUF_FLUSHBITS(&bitC);
137356 -               fallthrough;
137357 -       case 0:
137358 -       default:;
137359 -       }
137361 -       for (; n > 0; n -= 4) { /* note : n&3==0 at this stage */
137362 -               HUF_encodeSymbol(&bitC, ip[n - 1], CTable);
137363 -               HUF_FLUSHBITS_1(&bitC);
137364 -               HUF_encodeSymbol(&bitC, ip[n - 2], CTable);
137365 -               HUF_FLUSHBITS_2(&bitC);
137366 -               HUF_encodeSymbol(&bitC, ip[n - 3], CTable);
137367 -               HUF_FLUSHBITS_1(&bitC);
137368 -               HUF_encodeSymbol(&bitC, ip[n - 4], CTable);
137369 -               HUF_FLUSHBITS(&bitC);
137370 -       }
137372 -       return BIT_closeCStream(&bitC);
137375 -size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable)
137377 -       size_t const segmentSize = (srcSize + 3) / 4; /* first 3 segments */
137378 -       const BYTE *ip = (const BYTE *)src;
137379 -       const BYTE *const iend = ip + srcSize;
137380 -       BYTE *const ostart = (BYTE *)dst;
137381 -       BYTE *const oend = ostart + dstSize;
137382 -       BYTE *op = ostart;
137384 -       if (dstSize < 6 + 1 + 1 + 1 + 8)
137385 -               return 0; /* minimum space to compress successfully */
137386 -       if (srcSize < 12)
137387 -               return 0; /* no saving possible : too small input */
137388 -       op += 6;          /* jumpTable */
137390 -       {
137391 -               CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
137392 -               if (cSize == 0)
137393 -                       return 0;
137394 -               ZSTD_writeLE16(ostart, (U16)cSize);
137395 -               op += cSize;
137396 -       }
137398 -       ip += segmentSize;
137399 -       {
137400 -               CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
137401 -               if (cSize == 0)
137402 -                       return 0;
137403 -               ZSTD_writeLE16(ostart + 2, (U16)cSize);
137404 -               op += cSize;
137405 -       }
137407 -       ip += segmentSize;
137408 -       {
137409 -               CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
137410 -               if (cSize == 0)
137411 -                       return 0;
137412 -               ZSTD_writeLE16(ostart + 4, (U16)cSize);
137413 -               op += cSize;
137414 -       }
137416 -       ip += segmentSize;
137417 -       {
137418 -               CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, iend - ip, CTable));
137419 -               if (cSize == 0)
137420 -                       return 0;
137421 -               op += cSize;
137422 -       }
137424 -       return op - ostart;
137427 -static size_t HUF_compressCTable_internal(BYTE *const ostart, BYTE *op, BYTE *const oend, const void *src, size_t srcSize, unsigned singleStream,
137428 -                                         const HUF_CElt *CTable)
137430 -       size_t const cSize =
137431 -           singleStream ? HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) : HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable);
137432 -       if (HUF_isError(cSize)) {
137433 -               return cSize;
137434 -       }
137435 -       if (cSize == 0) {
137436 -               return 0;
137437 -       } /* uncompressible */
137438 -       op += cSize;
137439 -       /* check compressibility */
137440 -       if ((size_t)(op - ostart) >= srcSize - 1) {
137441 -               return 0;
137442 -       }
137443 -       return op - ostart;
137446 -/* `workSpace` must a table of at least 1024 unsigned */
137447 -static size_t HUF_compress_internal(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog,
137448 -                                   unsigned singleStream, void *workSpace, size_t wkspSize, HUF_CElt *oldHufTable, HUF_repeat *repeat, int preferRepeat)
137450 -       BYTE *const ostart = (BYTE *)dst;
137451 -       BYTE *const oend = ostart + dstSize;
137452 -       BYTE *op = ostart;
137454 -       U32 *count;
137455 -       size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1);
137456 -       HUF_CElt *CTable;
137457 -       size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1);
137459 -       /* checks & inits */
137460 -       if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize)
137461 -               return ERROR(GENERIC);
137462 -       if (!srcSize)
137463 -               return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */
137464 -       if (!dstSize)
137465 -               return 0; /* cannot fit within dst budget */
137466 -       if (srcSize > HUF_BLOCKSIZE_MAX)
137467 -               return ERROR(srcSize_wrong); /* curr block size limit */
137468 -       if (huffLog > HUF_TABLELOG_MAX)
137469 -               return ERROR(tableLog_tooLarge);
137470 -       if (!maxSymbolValue)
137471 -               maxSymbolValue = HUF_SYMBOLVALUE_MAX;
137472 -       if (!huffLog)
137473 -               huffLog = HUF_TABLELOG_DEFAULT;
137475 -       count = (U32 *)workSpace;
137476 -       workSpace = (BYTE *)workSpace + countSize;
137477 -       wkspSize -= countSize;
137478 -       CTable = (HUF_CElt *)workSpace;
137479 -       workSpace = (BYTE *)workSpace + CTableSize;
137480 -       wkspSize -= CTableSize;
137482 -       /* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */
137483 -       if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
137484 -               return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
137485 -       }
137487 -       /* Scan input and build symbol stats */
137488 -       {
137489 -               CHECK_V_F(largest, FSE_count_wksp(count, &maxSymbolValue, (const BYTE *)src, srcSize, (U32 *)workSpace));
137490 -               if (largest == srcSize) {
137491 -                       *ostart = ((const BYTE *)src)[0];
137492 -                       return 1;
137493 -               } /* single symbol, rle */
137494 -               if (largest <= (srcSize >> 7) + 1)
137495 -                       return 0; /* Fast heuristic : not compressible enough */
137496 -       }
137498 -       /* Check validity of previous table */
137499 -       if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) {
137500 -               *repeat = HUF_repeat_none;
137501 -       }
137502 -       /* Heuristic : use existing table for small inputs */
137503 -       if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
137504 -               return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
137505 -       }
137507 -       /* Build Huffman Tree */
137508 -       huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
137509 -       {
137510 -               CHECK_V_F(maxBits, HUF_buildCTable_wksp(CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize));
137511 -               huffLog = (U32)maxBits;
137512 -               /* Zero the unused symbols so we can check it for validity */
137513 -               memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt));
137514 -       }
137516 -       /* Write table description header */
137517 -       {
137518 -               CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, CTable, maxSymbolValue, huffLog, workSpace, wkspSize));
137519 -               /* Check if using the previous table will be beneficial */
137520 -               if (repeat && *repeat != HUF_repeat_none) {
137521 -                       size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue);
137522 -                       size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue);
137523 -                       if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
137524 -                               return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
137525 -                       }
137526 -               }
137527 -               /* Use the new table */
137528 -               if (hSize + 12ul >= srcSize) {
137529 -                       return 0;
137530 -               }
137531 -               op += hSize;
137532 -               if (repeat) {
137533 -                       *repeat = HUF_repeat_none;
137534 -               }
137535 -               if (oldHufTable) {
137536 -                       memcpy(oldHufTable, CTable, CTableSize);
137537 -               } /* Save the new table */
137538 -       }
137539 -       return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable);
137542 -size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
137543 -                          size_t wkspSize)
137545 -       return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0);
137548 -size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
137549 -                            size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat)
137551 -       return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat,
137552 -                                    preferRepeat);
137555 -size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
137556 -                          size_t wkspSize)
137558 -       return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0);
137561 -size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
137562 -                            size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat)
137564 -       return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat,
137565 -                                    preferRepeat);
137567 diff --git a/lib/zstd/huf_decompress.c b/lib/zstd/huf_decompress.c
137568 deleted file mode 100644
137569 index 6526482047dc..000000000000
137570 --- a/lib/zstd/huf_decompress.c
137571 +++ /dev/null
137572 @@ -1,960 +0,0 @@
137574 - * Huffman decoder, part of New Generation Entropy library
137575 - * Copyright (C) 2013-2016, Yann Collet.
137577 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
137579 - * Redistribution and use in source and binary forms, with or without
137580 - * modification, are permitted provided that the following conditions are
137581 - * met:
137583 - *   * Redistributions of source code must retain the above copyright
137584 - * notice, this list of conditions and the following disclaimer.
137585 - *   * Redistributions in binary form must reproduce the above
137586 - * copyright notice, this list of conditions and the following disclaimer
137587 - * in the documentation and/or other materials provided with the
137588 - * distribution.
137590 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
137591 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
137592 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
137593 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
137594 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
137595 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
137596 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
137597 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
137598 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
137599 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
137600 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
137602 - * This program is free software; you can redistribute it and/or modify it under
137603 - * the terms of the GNU General Public License version 2 as published by the
137604 - * Free Software Foundation. This program is dual-licensed; you may select
137605 - * either version 2 of the GNU General Public License ("GPL") or BSD license
137606 - * ("BSD").
137608 - * You can contact the author at :
137609 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
137610 - */
137612 -/* **************************************************************
137613 -*  Compiler specifics
137614 -****************************************************************/
137615 -#define FORCE_INLINE static __always_inline
137617 -/* **************************************************************
137618 -*  Dependencies
137619 -****************************************************************/
137620 -#include "bitstream.h" /* BIT_* */
137621 -#include "fse.h"       /* header compression */
137622 -#include "huf.h"
137623 -#include <linux/compiler.h>
137624 -#include <linux/kernel.h>
137625 -#include <linux/string.h> /* memcpy, memset */
137627 -/* **************************************************************
137628 -*  Error Management
137629 -****************************************************************/
137630 -#define HUF_STATIC_ASSERT(c)                                   \
137631 -       {                                                      \
137632 -               enum { HUF_static_assert = 1 / (int)(!!(c)) }; \
137633 -       } /* use only *after* variable declarations */
137635 -/*-***************************/
137636 -/*  generic DTableDesc       */
137637 -/*-***************************/
137639 -typedef struct {
137640 -       BYTE maxTableLog;
137641 -       BYTE tableType;
137642 -       BYTE tableLog;
137643 -       BYTE reserved;
137644 -} DTableDesc;
137646 -static DTableDesc HUF_getDTableDesc(const HUF_DTable *table)
137648 -       DTableDesc dtd;
137649 -       memcpy(&dtd, table, sizeof(dtd));
137650 -       return dtd;
137653 -/*-***************************/
137654 -/*  single-symbol decoding   */
137655 -/*-***************************/
137657 -typedef struct {
137658 -       BYTE byte;
137659 -       BYTE nbBits;
137660 -} HUF_DEltX2; /* single-symbol decoding */
137662 -size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
137664 -       U32 tableLog = 0;
137665 -       U32 nbSymbols = 0;
137666 -       size_t iSize;
137667 -       void *const dtPtr = DTable + 1;
137668 -       HUF_DEltX2 *const dt = (HUF_DEltX2 *)dtPtr;
137670 -       U32 *rankVal;
137671 -       BYTE *huffWeight;
137672 -       size_t spaceUsed32 = 0;
137674 -       rankVal = (U32 *)workspace + spaceUsed32;
137675 -       spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
137676 -       huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
137677 -       spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
137679 -       if ((spaceUsed32 << 2) > workspaceSize)
137680 -               return ERROR(tableLog_tooLarge);
137681 -       workspace = (U32 *)workspace + spaceUsed32;
137682 -       workspaceSize -= (spaceUsed32 << 2);
137684 -       HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
137685 -       /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
137687 -       iSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
137688 -       if (HUF_isError(iSize))
137689 -               return iSize;
137691 -       /* Table header */
137692 -       {
137693 -               DTableDesc dtd = HUF_getDTableDesc(DTable);
137694 -               if (tableLog > (U32)(dtd.maxTableLog + 1))
137695 -                       return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
137696 -               dtd.tableType = 0;
137697 -               dtd.tableLog = (BYTE)tableLog;
137698 -               memcpy(DTable, &dtd, sizeof(dtd));
137699 -       }
137701 -       /* Calculate starting value for each rank */
137702 -       {
137703 -               U32 n, nextRankStart = 0;
137704 -               for (n = 1; n < tableLog + 1; n++) {
137705 -                       U32 const curr = nextRankStart;
137706 -                       nextRankStart += (rankVal[n] << (n - 1));
137707 -                       rankVal[n] = curr;
137708 -               }
137709 -       }
137711 -       /* fill DTable */
137712 -       {
137713 -               U32 n;
137714 -               for (n = 0; n < nbSymbols; n++) {
137715 -                       U32 const w = huffWeight[n];
137716 -                       U32 const length = (1 << w) >> 1;
137717 -                       U32 u;
137718 -                       HUF_DEltX2 D;
137719 -                       D.byte = (BYTE)n;
137720 -                       D.nbBits = (BYTE)(tableLog + 1 - w);
137721 -                       for (u = rankVal[w]; u < rankVal[w] + length; u++)
137722 -                               dt[u] = D;
137723 -                       rankVal[w] += length;
137724 -               }
137725 -       }
137727 -       return iSize;
137730 -static BYTE HUF_decodeSymbolX2(BIT_DStream_t *Dstream, const HUF_DEltX2 *dt, const U32 dtLog)
137732 -       size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
137733 -       BYTE const c = dt[val].byte;
137734 -       BIT_skipBits(Dstream, dt[val].nbBits);
137735 -       return c;
137738 -#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
137740 -#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr)         \
137741 -       if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \
137742 -       HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
137744 -#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
137745 -       if (ZSTD_64bits())                     \
137746 -       HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
137748 -FORCE_INLINE size_t HUF_decodeStreamX2(BYTE *p, BIT_DStream_t *const bitDPtr, BYTE *const pEnd, const HUF_DEltX2 *const dt, const U32 dtLog)
137750 -       BYTE *const pStart = p;
137752 -       /* up to 4 symbols at a time */
137753 -       while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd - 4)) {
137754 -               HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
137755 -               HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
137756 -               HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
137757 -               HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
137758 -       }
137760 -       /* closer to the end */
137761 -       while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
137762 -               HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
137764 -       /* no more data to retrieve from bitstream, hence no need to reload */
137765 -       while (p < pEnd)
137766 -               HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
137768 -       return pEnd - pStart;
137771 -static size_t HUF_decompress1X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
137773 -       BYTE *op = (BYTE *)dst;
137774 -       BYTE *const oend = op + dstSize;
137775 -       const void *dtPtr = DTable + 1;
137776 -       const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr;
137777 -       BIT_DStream_t bitD;
137778 -       DTableDesc const dtd = HUF_getDTableDesc(DTable);
137779 -       U32 const dtLog = dtd.tableLog;
137781 -       {
137782 -               size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
137783 -               if (HUF_isError(errorCode))
137784 -                       return errorCode;
137785 -       }
137787 -       HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog);
137789 -       /* check */
137790 -       if (!BIT_endOfDStream(&bitD))
137791 -               return ERROR(corruption_detected);
137793 -       return dstSize;
137796 -size_t HUF_decompress1X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
137798 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
137799 -       if (dtd.tableType != 0)
137800 -               return ERROR(GENERIC);
137801 -       return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
137804 -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
137806 -       const BYTE *ip = (const BYTE *)cSrc;
137808 -       size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize);
137809 -       if (HUF_isError(hSize))
137810 -               return hSize;
137811 -       if (hSize >= cSrcSize)
137812 -               return ERROR(srcSize_wrong);
137813 -       ip += hSize;
137814 -       cSrcSize -= hSize;
137816 -       return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx);
137819 -static size_t HUF_decompress4X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
137821 -       /* Check */
137822 -       if (cSrcSize < 10)
137823 -               return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
137825 -       {
137826 -               const BYTE *const istart = (const BYTE *)cSrc;
137827 -               BYTE *const ostart = (BYTE *)dst;
137828 -               BYTE *const oend = ostart + dstSize;
137829 -               const void *const dtPtr = DTable + 1;
137830 -               const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr;
137832 -               /* Init */
137833 -               BIT_DStream_t bitD1;
137834 -               BIT_DStream_t bitD2;
137835 -               BIT_DStream_t bitD3;
137836 -               BIT_DStream_t bitD4;
137837 -               size_t const length1 = ZSTD_readLE16(istart);
137838 -               size_t const length2 = ZSTD_readLE16(istart + 2);
137839 -               size_t const length3 = ZSTD_readLE16(istart + 4);
137840 -               size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
137841 -               const BYTE *const istart1 = istart + 6; /* jumpTable */
137842 -               const BYTE *const istart2 = istart1 + length1;
137843 -               const BYTE *const istart3 = istart2 + length2;
137844 -               const BYTE *const istart4 = istart3 + length3;
137845 -               const size_t segmentSize = (dstSize + 3) / 4;
137846 -               BYTE *const opStart2 = ostart + segmentSize;
137847 -               BYTE *const opStart3 = opStart2 + segmentSize;
137848 -               BYTE *const opStart4 = opStart3 + segmentSize;
137849 -               BYTE *op1 = ostart;
137850 -               BYTE *op2 = opStart2;
137851 -               BYTE *op3 = opStart3;
137852 -               BYTE *op4 = opStart4;
137853 -               U32 endSignal;
137854 -               DTableDesc const dtd = HUF_getDTableDesc(DTable);
137855 -               U32 const dtLog = dtd.tableLog;
137857 -               if (length4 > cSrcSize)
137858 -                       return ERROR(corruption_detected); /* overflow */
137859 -               {
137860 -                       size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
137861 -                       if (HUF_isError(errorCode))
137862 -                               return errorCode;
137863 -               }
137864 -               {
137865 -                       size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
137866 -                       if (HUF_isError(errorCode))
137867 -                               return errorCode;
137868 -               }
137869 -               {
137870 -                       size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
137871 -                       if (HUF_isError(errorCode))
137872 -                               return errorCode;
137873 -               }
137874 -               {
137875 -                       size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
137876 -                       if (HUF_isError(errorCode))
137877 -                               return errorCode;
137878 -               }
137880 -               /* 16-32 symbols per loop (4-8 symbols per stream) */
137881 -               endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
137882 -               for (; (endSignal == BIT_DStream_unfinished) && (op4 < (oend - 7));) {
137883 -                       HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
137884 -                       HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
137885 -                       HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
137886 -                       HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
137887 -                       HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
137888 -                       HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
137889 -                       HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
137890 -                       HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
137891 -                       HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
137892 -                       HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
137893 -                       HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
137894 -                       HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
137895 -                       HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
137896 -                       HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
137897 -                       HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
137898 -                       HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
137899 -                       endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
137900 -               }
137902 -               /* check corruption */
137903 -               if (op1 > opStart2)
137904 -                       return ERROR(corruption_detected);
137905 -               if (op2 > opStart3)
137906 -                       return ERROR(corruption_detected);
137907 -               if (op3 > opStart4)
137908 -                       return ERROR(corruption_detected);
137909 -               /* note : op4 supposed already verified within main loop */
137911 -               /* finish bitStreams one by one */
137912 -               HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
137913 -               HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
137914 -               HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
137915 -               HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
137917 -               /* check */
137918 -               endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
137919 -               if (!endSignal)
137920 -                       return ERROR(corruption_detected);
137922 -               /* decoded size */
137923 -               return dstSize;
137924 -       }
137927 -size_t HUF_decompress4X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
137929 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
137930 -       if (dtd.tableType != 0)
137931 -               return ERROR(GENERIC);
137932 -       return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
137935 -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
137937 -       const BYTE *ip = (const BYTE *)cSrc;
137939 -       size_t const hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize);
137940 -       if (HUF_isError(hSize))
137941 -               return hSize;
137942 -       if (hSize >= cSrcSize)
137943 -               return ERROR(srcSize_wrong);
137944 -       ip += hSize;
137945 -       cSrcSize -= hSize;
137947 -       return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
137950 -/* *************************/
137951 -/* double-symbols decoding */
137952 -/* *************************/
137953 -typedef struct {
137954 -       U16 sequence;
137955 -       BYTE nbBits;
137956 -       BYTE length;
137957 -} HUF_DEltX4; /* double-symbols decoding */
137959 -typedef struct {
137960 -       BYTE symbol;
137961 -       BYTE weight;
137962 -} sortedSymbol_t;
137964 -/* HUF_fillDTableX4Level2() :
137965 - * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
137966 -static void HUF_fillDTableX4Level2(HUF_DEltX4 *DTable, U32 sizeLog, const U32 consumed, const U32 *rankValOrigin, const int minWeight,
137967 -                                  const sortedSymbol_t *sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq)
137969 -       HUF_DEltX4 DElt;
137970 -       U32 rankVal[HUF_TABLELOG_MAX + 1];
137972 -       /* get pre-calculated rankVal */
137973 -       memcpy(rankVal, rankValOrigin, sizeof(rankVal));
137975 -       /* fill skipped values */
137976 -       if (minWeight > 1) {
137977 -               U32 i, skipSize = rankVal[minWeight];
137978 -               ZSTD_writeLE16(&(DElt.sequence), baseSeq);
137979 -               DElt.nbBits = (BYTE)(consumed);
137980 -               DElt.length = 1;
137981 -               for (i = 0; i < skipSize; i++)
137982 -                       DTable[i] = DElt;
137983 -       }
137985 -       /* fill DTable */
137986 -       {
137987 -               U32 s;
137988 -               for (s = 0; s < sortedListSize; s++) { /* note : sortedSymbols already skipped */
137989 -                       const U32 symbol = sortedSymbols[s].symbol;
137990 -                       const U32 weight = sortedSymbols[s].weight;
137991 -                       const U32 nbBits = nbBitsBaseline - weight;
137992 -                       const U32 length = 1 << (sizeLog - nbBits);
137993 -                       const U32 start = rankVal[weight];
137994 -                       U32 i = start;
137995 -                       const U32 end = start + length;
137997 -                       ZSTD_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
137998 -                       DElt.nbBits = (BYTE)(nbBits + consumed);
137999 -                       DElt.length = 2;
138000 -                       do {
138001 -                               DTable[i++] = DElt;
138002 -                       } while (i < end); /* since length >= 1 */
138004 -                       rankVal[weight] += length;
138005 -               }
138006 -       }
138009 -typedef U32 rankVal_t[HUF_TABLELOG_MAX][HUF_TABLELOG_MAX + 1];
138010 -typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
138012 -static void HUF_fillDTableX4(HUF_DEltX4 *DTable, const U32 targetLog, const sortedSymbol_t *sortedList, const U32 sortedListSize, const U32 *rankStart,
138013 -                            rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline)
138015 -       U32 rankVal[HUF_TABLELOG_MAX + 1];
138016 -       const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
138017 -       const U32 minBits = nbBitsBaseline - maxWeight;
138018 -       U32 s;
138020 -       memcpy(rankVal, rankValOrigin, sizeof(rankVal));
138022 -       /* fill DTable */
138023 -       for (s = 0; s < sortedListSize; s++) {
138024 -               const U16 symbol = sortedList[s].symbol;
138025 -               const U32 weight = sortedList[s].weight;
138026 -               const U32 nbBits = nbBitsBaseline - weight;
138027 -               const U32 start = rankVal[weight];
138028 -               const U32 length = 1 << (targetLog - nbBits);
138030 -               if (targetLog - nbBits >= minBits) { /* enough room for a second symbol */
138031 -                       U32 sortedRank;
138032 -                       int minWeight = nbBits + scaleLog;
138033 -                       if (minWeight < 1)
138034 -                               minWeight = 1;
138035 -                       sortedRank = rankStart[minWeight];
138036 -                       HUF_fillDTableX4Level2(DTable + start, targetLog - nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList + sortedRank,
138037 -                                              sortedListSize - sortedRank, nbBitsBaseline, symbol);
138038 -               } else {
138039 -                       HUF_DEltX4 DElt;
138040 -                       ZSTD_writeLE16(&(DElt.sequence), symbol);
138041 -                       DElt.nbBits = (BYTE)(nbBits);
138042 -                       DElt.length = 1;
138043 -                       {
138044 -                               U32 const end = start + length;
138045 -                               U32 u;
138046 -                               for (u = start; u < end; u++)
138047 -                                       DTable[u] = DElt;
138048 -                       }
138049 -               }
138050 -               rankVal[weight] += length;
138051 -       }
138054 -size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
138056 -       U32 tableLog, maxW, sizeOfSort, nbSymbols;
138057 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
138058 -       U32 const maxTableLog = dtd.maxTableLog;
138059 -       size_t iSize;
138060 -       void *dtPtr = DTable + 1; /* force compiler to avoid strict-aliasing */
138061 -       HUF_DEltX4 *const dt = (HUF_DEltX4 *)dtPtr;
138062 -       U32 *rankStart;
138064 -       rankValCol_t *rankVal;
138065 -       U32 *rankStats;
138066 -       U32 *rankStart0;
138067 -       sortedSymbol_t *sortedSymbol;
138068 -       BYTE *weightList;
138069 -       size_t spaceUsed32 = 0;
138071 -       HUF_STATIC_ASSERT((sizeof(rankValCol_t) & 3) == 0);
138073 -       rankVal = (rankValCol_t *)((U32 *)workspace + spaceUsed32);
138074 -       spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2;
138075 -       rankStats = (U32 *)workspace + spaceUsed32;
138076 -       spaceUsed32 += HUF_TABLELOG_MAX + 1;
138077 -       rankStart0 = (U32 *)workspace + spaceUsed32;
138078 -       spaceUsed32 += HUF_TABLELOG_MAX + 2;
138079 -       sortedSymbol = (sortedSymbol_t *)((U32 *)workspace + spaceUsed32);
138080 -       spaceUsed32 += ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2;
138081 -       weightList = (BYTE *)((U32 *)workspace + spaceUsed32);
138082 -       spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
138084 -       if ((spaceUsed32 << 2) > workspaceSize)
138085 -               return ERROR(tableLog_tooLarge);
138086 -       workspace = (U32 *)workspace + spaceUsed32;
138087 -       workspaceSize -= (spaceUsed32 << 2);
138089 -       rankStart = rankStart0 + 1;
138090 -       memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
138092 -       HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
138093 -       if (maxTableLog > HUF_TABLELOG_MAX)
138094 -               return ERROR(tableLog_tooLarge);
138095 -       /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
138097 -       iSize = HUF_readStats_wksp(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
138098 -       if (HUF_isError(iSize))
138099 -               return iSize;
138101 -       /* check result */
138102 -       if (tableLog > maxTableLog)
138103 -               return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
138105 -       /* find maxWeight */
138106 -       for (maxW = tableLog; rankStats[maxW] == 0; maxW--) {
138107 -       } /* necessarily finds a solution before 0 */
138109 -       /* Get start index of each weight */
138110 -       {
138111 -               U32 w, nextRankStart = 0;
138112 -               for (w = 1; w < maxW + 1; w++) {
138113 -                       U32 curr = nextRankStart;
138114 -                       nextRankStart += rankStats[w];
138115 -                       rankStart[w] = curr;
138116 -               }
138117 -               rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
138118 -               sizeOfSort = nextRankStart;
138119 -       }
138121 -       /* sort symbols by weight */
138122 -       {
138123 -               U32 s;
138124 -               for (s = 0; s < nbSymbols; s++) {
138125 -                       U32 const w = weightList[s];
138126 -                       U32 const r = rankStart[w]++;
138127 -                       sortedSymbol[r].symbol = (BYTE)s;
138128 -                       sortedSymbol[r].weight = (BYTE)w;
138129 -               }
138130 -               rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
138131 -       }
138133 -       /* Build rankVal */
138134 -       {
138135 -               U32 *const rankVal0 = rankVal[0];
138136 -               {
138137 -                       int const rescale = (maxTableLog - tableLog) - 1; /* tableLog <= maxTableLog */
138138 -                       U32 nextRankVal = 0;
138139 -                       U32 w;
138140 -                       for (w = 1; w < maxW + 1; w++) {
138141 -                               U32 curr = nextRankVal;
138142 -                               nextRankVal += rankStats[w] << (w + rescale);
138143 -                               rankVal0[w] = curr;
138144 -                       }
138145 -               }
138146 -               {
138147 -                       U32 const minBits = tableLog + 1 - maxW;
138148 -                       U32 consumed;
138149 -                       for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
138150 -                               U32 *const rankValPtr = rankVal[consumed];
138151 -                               U32 w;
138152 -                               for (w = 1; w < maxW + 1; w++) {
138153 -                                       rankValPtr[w] = rankVal0[w] >> consumed;
138154 -                               }
138155 -                       }
138156 -               }
138157 -       }
138159 -       HUF_fillDTableX4(dt, maxTableLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog + 1);
138161 -       dtd.tableLog = (BYTE)maxTableLog;
138162 -       dtd.tableType = 1;
138163 -       memcpy(DTable, &dtd, sizeof(dtd));
138164 -       return iSize;
138167 -static U32 HUF_decodeSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog)
138169 -       size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
138170 -       memcpy(op, dt + val, 2);
138171 -       BIT_skipBits(DStream, dt[val].nbBits);
138172 -       return dt[val].length;
138175 -static U32 HUF_decodeLastSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog)
138177 -       size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
138178 -       memcpy(op, dt + val, 1);
138179 -       if (dt[val].length == 1)
138180 -               BIT_skipBits(DStream, dt[val].nbBits);
138181 -       else {
138182 -               if (DStream->bitsConsumed < (sizeof(DStream->bitContainer) * 8)) {
138183 -                       BIT_skipBits(DStream, dt[val].nbBits);
138184 -                       if (DStream->bitsConsumed > (sizeof(DStream->bitContainer) * 8))
138185 -                               /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
138186 -                               DStream->bitsConsumed = (sizeof(DStream->bitContainer) * 8);
138187 -               }
138188 -       }
138189 -       return 1;
138192 -#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
138194 -#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr)         \
138195 -       if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \
138196 -       ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
138198 -#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
138199 -       if (ZSTD_64bits())                     \
138200 -       ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
138202 -FORCE_INLINE size_t HUF_decodeStreamX4(BYTE *p, BIT_DStream_t *bitDPtr, BYTE *const pEnd, const HUF_DEltX4 *const dt, const U32 dtLog)
138204 -       BYTE *const pStart = p;
138206 -       /* up to 8 symbols at a time */
138207 -       while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd - (sizeof(bitDPtr->bitContainer) - 1))) {
138208 -               HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
138209 -               HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
138210 -               HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
138211 -               HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
138212 -       }
138214 -       /* closer to end : up to 2 symbols at a time */
138215 -       while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd - 2))
138216 -               HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
138218 -       while (p <= pEnd - 2)
138219 -               HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
138221 -       if (p < pEnd)
138222 -               p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
138224 -       return p - pStart;
138227 -static size_t HUF_decompress1X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
138229 -       BIT_DStream_t bitD;
138231 -       /* Init */
138232 -       {
138233 -               size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
138234 -               if (HUF_isError(errorCode))
138235 -                       return errorCode;
138236 -       }
138238 -       /* decode */
138239 -       {
138240 -               BYTE *const ostart = (BYTE *)dst;
138241 -               BYTE *const oend = ostart + dstSize;
138242 -               const void *const dtPtr = DTable + 1; /* force compiler to not use strict-aliasing */
138243 -               const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr;
138244 -               DTableDesc const dtd = HUF_getDTableDesc(DTable);
138245 -               HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
138246 -       }
138248 -       /* check */
138249 -       if (!BIT_endOfDStream(&bitD))
138250 -               return ERROR(corruption_detected);
138252 -       /* decoded size */
138253 -       return dstSize;
138256 -size_t HUF_decompress1X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
138258 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
138259 -       if (dtd.tableType != 1)
138260 -               return ERROR(GENERIC);
138261 -       return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
138264 -size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
138266 -       const BYTE *ip = (const BYTE *)cSrc;
138268 -       size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize);
138269 -       if (HUF_isError(hSize))
138270 -               return hSize;
138271 -       if (hSize >= cSrcSize)
138272 -               return ERROR(srcSize_wrong);
138273 -       ip += hSize;
138274 -       cSrcSize -= hSize;
138276 -       return HUF_decompress1X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx);
138279 -static size_t HUF_decompress4X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
138281 -       if (cSrcSize < 10)
138282 -               return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
138284 -       {
138285 -               const BYTE *const istart = (const BYTE *)cSrc;
138286 -               BYTE *const ostart = (BYTE *)dst;
138287 -               BYTE *const oend = ostart + dstSize;
138288 -               const void *const dtPtr = DTable + 1;
138289 -               const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr;
138291 -               /* Init */
138292 -               BIT_DStream_t bitD1;
138293 -               BIT_DStream_t bitD2;
138294 -               BIT_DStream_t bitD3;
138295 -               BIT_DStream_t bitD4;
138296 -               size_t const length1 = ZSTD_readLE16(istart);
138297 -               size_t const length2 = ZSTD_readLE16(istart + 2);
138298 -               size_t const length3 = ZSTD_readLE16(istart + 4);
138299 -               size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
138300 -               const BYTE *const istart1 = istart + 6; /* jumpTable */
138301 -               const BYTE *const istart2 = istart1 + length1;
138302 -               const BYTE *const istart3 = istart2 + length2;
138303 -               const BYTE *const istart4 = istart3 + length3;
138304 -               size_t const segmentSize = (dstSize + 3) / 4;
138305 -               BYTE *const opStart2 = ostart + segmentSize;
138306 -               BYTE *const opStart3 = opStart2 + segmentSize;
138307 -               BYTE *const opStart4 = opStart3 + segmentSize;
138308 -               BYTE *op1 = ostart;
138309 -               BYTE *op2 = opStart2;
138310 -               BYTE *op3 = opStart3;
138311 -               BYTE *op4 = opStart4;
138312 -               U32 endSignal;
138313 -               DTableDesc const dtd = HUF_getDTableDesc(DTable);
138314 -               U32 const dtLog = dtd.tableLog;
138316 -               if (length4 > cSrcSize)
138317 -                       return ERROR(corruption_detected); /* overflow */
138318 -               {
138319 -                       size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
138320 -                       if (HUF_isError(errorCode))
138321 -                               return errorCode;
138322 -               }
138323 -               {
138324 -                       size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
138325 -                       if (HUF_isError(errorCode))
138326 -                               return errorCode;
138327 -               }
138328 -               {
138329 -                       size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
138330 -                       if (HUF_isError(errorCode))
138331 -                               return errorCode;
138332 -               }
138333 -               {
138334 -                       size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
138335 -                       if (HUF_isError(errorCode))
138336 -                               return errorCode;
138337 -               }
138339 -               /* 16-32 symbols per loop (4-8 symbols per stream) */
138340 -               endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
138341 -               for (; (endSignal == BIT_DStream_unfinished) & (op4 < (oend - (sizeof(bitD4.bitContainer) - 1)));) {
138342 -                       HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
138343 -                       HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
138344 -                       HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
138345 -                       HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
138346 -                       HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
138347 -                       HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
138348 -                       HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
138349 -                       HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
138350 -                       HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
138351 -                       HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
138352 -                       HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
138353 -                       HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
138354 -                       HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
138355 -                       HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
138356 -                       HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
138357 -                       HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
138359 -                       endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
138360 -               }
138362 -               /* check corruption */
138363 -               if (op1 > opStart2)
138364 -                       return ERROR(corruption_detected);
138365 -               if (op2 > opStart3)
138366 -                       return ERROR(corruption_detected);
138367 -               if (op3 > opStart4)
138368 -                       return ERROR(corruption_detected);
138369 -               /* note : op4 already verified within main loop */
138371 -               /* finish bitStreams one by one */
138372 -               HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
138373 -               HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
138374 -               HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
138375 -               HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
138377 -               /* check */
138378 -               {
138379 -                       U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
138380 -                       if (!endCheck)
138381 -                               return ERROR(corruption_detected);
138382 -               }
138384 -               /* decoded size */
138385 -               return dstSize;
138386 -       }
138389 -size_t HUF_decompress4X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
138391 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
138392 -       if (dtd.tableType != 1)
138393 -               return ERROR(GENERIC);
138394 -       return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
138397 -size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
138399 -       const BYTE *ip = (const BYTE *)cSrc;
138401 -       size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize);
138402 -       if (HUF_isError(hSize))
138403 -               return hSize;
138404 -       if (hSize >= cSrcSize)
138405 -               return ERROR(srcSize_wrong);
138406 -       ip += hSize;
138407 -       cSrcSize -= hSize;
138409 -       return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
138412 -/* ********************************/
138413 -/* Generic decompression selector */
138414 -/* ********************************/
138416 -size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
138418 -       DTableDesc const dtd = HUF_getDTableDesc(DTable);
138419 -       return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable)
138420 -                            : HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
138423 -size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
138425 -       DTableDesc const dtd = HUF_getDTableDesc(DTable);
138426 -       return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable)
138427 -                            : HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
138430 -typedef struct {
138431 -       U32 tableTime;
138432 -       U32 decode256Time;
138433 -} algo_time_t;
138434 -static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = {
138435 -    /* single, double, quad */
138436 -    {{0, 0}, {1, 1}, {2, 2}},               /* Q==0 : impossible */
138437 -    {{0, 0}, {1, 1}, {2, 2}},               /* Q==1 : impossible */
138438 -    {{38, 130}, {1313, 74}, {2151, 38}},     /* Q == 2 : 12-18% */
138439 -    {{448, 128}, {1353, 74}, {2238, 41}},    /* Q == 3 : 18-25% */
138440 -    {{556, 128}, {1353, 74}, {2238, 47}},    /* Q == 4 : 25-32% */
138441 -    {{714, 128}, {1418, 74}, {2436, 53}},    /* Q == 5 : 32-38% */
138442 -    {{883, 128}, {1437, 74}, {2464, 61}},    /* Q == 6 : 38-44% */
138443 -    {{897, 128}, {1515, 75}, {2622, 68}},    /* Q == 7 : 44-50% */
138444 -    {{926, 128}, {1613, 75}, {2730, 75}},    /* Q == 8 : 50-56% */
138445 -    {{947, 128}, {1729, 77}, {3359, 77}},    /* Q == 9 : 56-62% */
138446 -    {{1107, 128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
138447 -    {{1177, 128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
138448 -    {{1242, 128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
138449 -    {{1349, 128}, {2644, 106}, {5260, 106}}, /* Q ==13 : 81-87% */
138450 -    {{1455, 128}, {2422, 124}, {4174, 124}}, /* Q ==14 : 87-93% */
138451 -    {{722, 128}, {1891, 145}, {1936, 146}},  /* Q ==15 : 93-99% */
138454 -/** HUF_selectDecoder() :
138455 -*   Tells which decoder is likely to decode faster,
138456 -*   based on a set of pre-determined metrics.
138457 -*   @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
138458 -*   Assumption : 0 < cSrcSize < dstSize <= 128 KB */
138459 -U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize)
138461 -       /* decoder timing evaluation */
138462 -       U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
138463 -       U32 const D256 = (U32)(dstSize >> 8);
138464 -       U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
138465 -       U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
138466 -       DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */
138468 -       return DTime1 < DTime0;
138471 -typedef size_t (*decompressionAlgo)(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize);
138473 -size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
138475 -       /* validation checks */
138476 -       if (dstSize == 0)
138477 -               return ERROR(dstSize_tooSmall);
138478 -       if (cSrcSize > dstSize)
138479 -               return ERROR(corruption_detected); /* invalid */
138480 -       if (cSrcSize == dstSize) {
138481 -               memcpy(dst, cSrc, dstSize);
138482 -               return dstSize;
138483 -       } /* not compressed */
138484 -       if (cSrcSize == 1) {
138485 -               memset(dst, *(const BYTE *)cSrc, dstSize);
138486 -               return dstSize;
138487 -       } /* RLE */
138489 -       {
138490 -               U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
138491 -               return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
138492 -                             : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
138493 -       }
138496 -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
138498 -       /* validation checks */
138499 -       if (dstSize == 0)
138500 -               return ERROR(dstSize_tooSmall);
138501 -       if ((cSrcSize >= dstSize) || (cSrcSize <= 1))
138502 -               return ERROR(corruption_detected); /* invalid */
138504 -       {
138505 -               U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
138506 -               return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
138507 -                             : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
138508 -       }
138511 -size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
138513 -       /* validation checks */
138514 -       if (dstSize == 0)
138515 -               return ERROR(dstSize_tooSmall);
138516 -       if (cSrcSize > dstSize)
138517 -               return ERROR(corruption_detected); /* invalid */
138518 -       if (cSrcSize == dstSize) {
138519 -               memcpy(dst, cSrc, dstSize);
138520 -               return dstSize;
138521 -       } /* not compressed */
138522 -       if (cSrcSize == 1) {
138523 -               memset(dst, *(const BYTE *)cSrc, dstSize);
138524 -               return dstSize;
138525 -       } /* RLE */
138527 -       {
138528 -               U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
138529 -               return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
138530 -                             : HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
138531 -       }
138533 diff --git a/lib/zstd/mem.h b/lib/zstd/mem.h
138534 deleted file mode 100644
138535 index 93d7a2c377fe..000000000000
138536 --- a/lib/zstd/mem.h
138537 +++ /dev/null
138538 @@ -1,151 +0,0 @@
138540 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
138541 - * All rights reserved.
138543 - * This source code is licensed under the BSD-style license found in the
138544 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
138545 - * An additional grant of patent rights can be found in the PATENTS file in the
138546 - * same directory.
138548 - * This program is free software; you can redistribute it and/or modify it under
138549 - * the terms of the GNU General Public License version 2 as published by the
138550 - * Free Software Foundation. This program is dual-licensed; you may select
138551 - * either version 2 of the GNU General Public License ("GPL") or BSD license
138552 - * ("BSD").
138553 - */
138555 -#ifndef MEM_H_MODULE
138556 -#define MEM_H_MODULE
138558 -/*-****************************************
138559 -*  Dependencies
138560 -******************************************/
138561 -#include <asm/unaligned.h>
138562 -#include <linux/string.h> /* memcpy */
138563 -#include <linux/types.h>  /* size_t, ptrdiff_t */
138565 -/*-****************************************
138566 -*  Compiler specifics
138567 -******************************************/
138568 -#define ZSTD_STATIC static inline
138570 -/*-**************************************************************
138571 -*  Basic Types
138572 -*****************************************************************/
138573 -typedef uint8_t BYTE;
138574 -typedef uint16_t U16;
138575 -typedef int16_t S16;
138576 -typedef uint32_t U32;
138577 -typedef int32_t S32;
138578 -typedef uint64_t U64;
138579 -typedef int64_t S64;
138580 -typedef ptrdiff_t iPtrDiff;
138581 -typedef uintptr_t uPtrDiff;
138583 -/*-**************************************************************
138584 -*  Memory I/O
138585 -*****************************************************************/
138586 -ZSTD_STATIC unsigned ZSTD_32bits(void) { return sizeof(size_t) == 4; }
138587 -ZSTD_STATIC unsigned ZSTD_64bits(void) { return sizeof(size_t) == 8; }
138589 -#if defined(__LITTLE_ENDIAN)
138590 -#define ZSTD_LITTLE_ENDIAN 1
138591 -#else
138592 -#define ZSTD_LITTLE_ENDIAN 0
138593 -#endif
138595 -ZSTD_STATIC unsigned ZSTD_isLittleEndian(void) { return ZSTD_LITTLE_ENDIAN; }
138597 -ZSTD_STATIC U16 ZSTD_read16(const void *memPtr) { return get_unaligned((const U16 *)memPtr); }
138599 -ZSTD_STATIC U32 ZSTD_read32(const void *memPtr) { return get_unaligned((const U32 *)memPtr); }
138601 -ZSTD_STATIC U64 ZSTD_read64(const void *memPtr) { return get_unaligned((const U64 *)memPtr); }
138603 -ZSTD_STATIC size_t ZSTD_readST(const void *memPtr) { return get_unaligned((const size_t *)memPtr); }
138605 -ZSTD_STATIC void ZSTD_write16(void *memPtr, U16 value) { put_unaligned(value, (U16 *)memPtr); }
138607 -ZSTD_STATIC void ZSTD_write32(void *memPtr, U32 value) { put_unaligned(value, (U32 *)memPtr); }
138609 -ZSTD_STATIC void ZSTD_write64(void *memPtr, U64 value) { put_unaligned(value, (U64 *)memPtr); }
138611 -/*=== Little endian r/w ===*/
138613 -ZSTD_STATIC U16 ZSTD_readLE16(const void *memPtr) { return get_unaligned_le16(memPtr); }
138615 -ZSTD_STATIC void ZSTD_writeLE16(void *memPtr, U16 val) { put_unaligned_le16(val, memPtr); }
138617 -ZSTD_STATIC U32 ZSTD_readLE24(const void *memPtr) { return ZSTD_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16); }
138619 -ZSTD_STATIC void ZSTD_writeLE24(void *memPtr, U32 val)
138621 -       ZSTD_writeLE16(memPtr, (U16)val);
138622 -       ((BYTE *)memPtr)[2] = (BYTE)(val >> 16);
138625 -ZSTD_STATIC U32 ZSTD_readLE32(const void *memPtr) { return get_unaligned_le32(memPtr); }
138627 -ZSTD_STATIC void ZSTD_writeLE32(void *memPtr, U32 val32) { put_unaligned_le32(val32, memPtr); }
138629 -ZSTD_STATIC U64 ZSTD_readLE64(const void *memPtr) { return get_unaligned_le64(memPtr); }
138631 -ZSTD_STATIC void ZSTD_writeLE64(void *memPtr, U64 val64) { put_unaligned_le64(val64, memPtr); }
138633 -ZSTD_STATIC size_t ZSTD_readLEST(const void *memPtr)
138635 -       if (ZSTD_32bits())
138636 -               return (size_t)ZSTD_readLE32(memPtr);
138637 -       else
138638 -               return (size_t)ZSTD_readLE64(memPtr);
138641 -ZSTD_STATIC void ZSTD_writeLEST(void *memPtr, size_t val)
138643 -       if (ZSTD_32bits())
138644 -               ZSTD_writeLE32(memPtr, (U32)val);
138645 -       else
138646 -               ZSTD_writeLE64(memPtr, (U64)val);
138649 -/*=== Big endian r/w ===*/
138651 -ZSTD_STATIC U32 ZSTD_readBE32(const void *memPtr) { return get_unaligned_be32(memPtr); }
138653 -ZSTD_STATIC void ZSTD_writeBE32(void *memPtr, U32 val32) { put_unaligned_be32(val32, memPtr); }
138655 -ZSTD_STATIC U64 ZSTD_readBE64(const void *memPtr) { return get_unaligned_be64(memPtr); }
138657 -ZSTD_STATIC void ZSTD_writeBE64(void *memPtr, U64 val64) { put_unaligned_be64(val64, memPtr); }
138659 -ZSTD_STATIC size_t ZSTD_readBEST(const void *memPtr)
138661 -       if (ZSTD_32bits())
138662 -               return (size_t)ZSTD_readBE32(memPtr);
138663 -       else
138664 -               return (size_t)ZSTD_readBE64(memPtr);
138667 -ZSTD_STATIC void ZSTD_writeBEST(void *memPtr, size_t val)
138669 -       if (ZSTD_32bits())
138670 -               ZSTD_writeBE32(memPtr, (U32)val);
138671 -       else
138672 -               ZSTD_writeBE64(memPtr, (U64)val);
138675 -/* function safe only for comparisons */
138676 -ZSTD_STATIC U32 ZSTD_readMINMATCH(const void *memPtr, U32 length)
138678 -       switch (length) {
138679 -       default:
138680 -       case 4: return ZSTD_read32(memPtr);
138681 -       case 3:
138682 -               if (ZSTD_isLittleEndian())
138683 -                       return ZSTD_read32(memPtr) << 8;
138684 -               else
138685 -                       return ZSTD_read32(memPtr) >> 8;
138686 -       }
138689 -#endif /* MEM_H_MODULE */
138690 diff --git a/lib/zstd/zstd_common.c b/lib/zstd/zstd_common.c
138691 deleted file mode 100644
138692 index a282624ee155..000000000000
138693 --- a/lib/zstd/zstd_common.c
138694 +++ /dev/null
138695 @@ -1,75 +0,0 @@
138697 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
138698 - * All rights reserved.
138700 - * This source code is licensed under the BSD-style license found in the
138701 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
138702 - * An additional grant of patent rights can be found in the PATENTS file in the
138703 - * same directory.
138705 - * This program is free software; you can redistribute it and/or modify it under
138706 - * the terms of the GNU General Public License version 2 as published by the
138707 - * Free Software Foundation. This program is dual-licensed; you may select
138708 - * either version 2 of the GNU General Public License ("GPL") or BSD license
138709 - * ("BSD").
138710 - */
138712 -/*-*************************************
138713 -*  Dependencies
138714 -***************************************/
138715 -#include "error_private.h"
138716 -#include "zstd_internal.h" /* declaration of ZSTD_isError, ZSTD_getErrorName, ZSTD_getErrorCode, ZSTD_getErrorString, ZSTD_versionNumber */
138717 -#include <linux/kernel.h>
138719 -/*=**************************************************************
138720 -*  Custom allocator
138721 -****************************************************************/
138723 -#define stack_push(stack, size)                                 \
138724 -       ({                                                      \
138725 -               void *const ptr = ZSTD_PTR_ALIGN((stack)->ptr); \
138726 -               (stack)->ptr = (char *)ptr + (size);            \
138727 -               (stack)->ptr <= (stack)->end ? ptr : NULL;      \
138728 -       })
138730 -ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize)
138732 -       ZSTD_customMem stackMem = {ZSTD_stackAlloc, ZSTD_stackFree, workspace};
138733 -       ZSTD_stack *stack = (ZSTD_stack *)workspace;
138734 -       /* Verify preconditions */
138735 -       if (!workspace || workspaceSize < sizeof(ZSTD_stack) || workspace != ZSTD_PTR_ALIGN(workspace)) {
138736 -               ZSTD_customMem error = {NULL, NULL, NULL};
138737 -               return error;
138738 -       }
138739 -       /* Initialize the stack */
138740 -       stack->ptr = workspace;
138741 -       stack->end = (char *)workspace + workspaceSize;
138742 -       stack_push(stack, sizeof(ZSTD_stack));
138743 -       return stackMem;
138746 -void *ZSTD_stackAllocAll(void *opaque, size_t *size)
138748 -       ZSTD_stack *stack = (ZSTD_stack *)opaque;
138749 -       *size = (BYTE const *)stack->end - (BYTE *)ZSTD_PTR_ALIGN(stack->ptr);
138750 -       return stack_push(stack, *size);
138753 -void *ZSTD_stackAlloc(void *opaque, size_t size)
138755 -       ZSTD_stack *stack = (ZSTD_stack *)opaque;
138756 -       return stack_push(stack, size);
138758 -void ZSTD_stackFree(void *opaque, void *address)
138760 -       (void)opaque;
138761 -       (void)address;
138764 -void *ZSTD_malloc(size_t size, ZSTD_customMem customMem) { return customMem.customAlloc(customMem.opaque, size); }
138766 -void ZSTD_free(void *ptr, ZSTD_customMem customMem)
138768 -       if (ptr != NULL)
138769 -               customMem.customFree(customMem.opaque, ptr);
138771 diff --git a/lib/zstd/zstd_compress_module.c b/lib/zstd/zstd_compress_module.c
138772 new file mode 100644
138773 index 000000000000..37d08ff43e6e
138774 --- /dev/null
138775 +++ b/lib/zstd/zstd_compress_module.c
138776 @@ -0,0 +1,124 @@
138777 +// SPDX-License-Identifier: GPL-2.0-only
138779 + * Copyright (c) Facebook, Inc.
138780 + * All rights reserved.
138782 + * This source code is licensed under both the BSD-style license (found in the
138783 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
138784 + * in the COPYING file in the root directory of this source tree).
138785 + * You may select, at your option, one of the above-listed licenses.
138786 + */
138788 +#include <linux/kernel.h>
138789 +#include <linux/module.h>
138790 +#include <linux/string.h>
138791 +#include <linux/zstd.h>
138793 +#include "common/zstd_deps.h"
138794 +#include "common/zstd_internal.h"
138796 +int zstd_min_clevel(void)
138798 +       return ZSTD_minCLevel();
138800 +EXPORT_SYMBOL(zstd_min_clevel);
138802 +int zstd_max_clevel(void)
138804 +       return ZSTD_maxCLevel();
138806 +EXPORT_SYMBOL(zstd_max_clevel);
138808 +size_t zstd_compress_bound(size_t src_size)
138810 +       return ZSTD_compressBound(src_size);
138812 +EXPORT_SYMBOL(zstd_compress_bound);
138814 +zstd_parameters zstd_get_params(int level,
138815 +       unsigned long long estimated_src_size)
138817 +       return ZSTD_getParams(level, estimated_src_size, 0);
138819 +EXPORT_SYMBOL(zstd_get_params);
138821 +size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *cparams)
138823 +       return ZSTD_estimateCCtxSize_usingCParams(*cparams);
138825 +EXPORT_SYMBOL(zstd_cctx_workspace_bound);
138827 +zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size)
138829 +       if (workspace == NULL)
138830 +               return NULL;
138831 +       return ZSTD_initStaticCCtx(workspace, workspace_size);
138833 +EXPORT_SYMBOL(zstd_init_cctx);
138835 +size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
138836 +       const void *src, size_t src_size, const zstd_parameters *parameters)
138838 +       return ZSTD_compress_advanced(cctx, dst, dst_capacity, src, src_size, NULL, 0, *parameters);
138840 +EXPORT_SYMBOL(zstd_compress_cctx);
138842 +size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams)
138844 +       return ZSTD_estimateCStreamSize_usingCParams(*cparams);
138846 +EXPORT_SYMBOL(zstd_cstream_workspace_bound);
138848 +zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters,
138849 +       unsigned long long pledged_src_size, void *workspace, size_t workspace_size)
138851 +       zstd_cstream *cstream;
138852 +       size_t ret;
138854 +       if (workspace == NULL)
138855 +               return NULL;
138857 +       cstream = ZSTD_initStaticCStream(workspace, workspace_size);
138858 +       if (cstream == NULL)
138859 +               return NULL;
138861 +       /* 0 means unknown in linux zstd API but means 0 in new zstd API */
138862 +       if (pledged_src_size == 0)
138863 +               pledged_src_size = ZSTD_CONTENTSIZE_UNKNOWN;
138865 +       ret = ZSTD_initCStream_advanced(cstream, NULL, 0, *parameters, pledged_src_size);
138866 +       if (ZSTD_isError(ret))
138867 +               return NULL;
138869 +       return cstream;
138871 +EXPORT_SYMBOL(zstd_init_cstream);
138873 +size_t zstd_reset_cstream(zstd_cstream *cstream,
138874 +       unsigned long long pledged_src_size)
138876 +       return ZSTD_resetCStream(cstream, pledged_src_size);
138878 +EXPORT_SYMBOL(zstd_reset_cstream);
138880 +size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output,
138881 +       zstd_in_buffer *input)
138883 +       return ZSTD_compressStream(cstream, output, input);
138885 +EXPORT_SYMBOL(zstd_compress_stream);
138887 +size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output)
138889 +       return ZSTD_flushStream(cstream, output);
138891 +EXPORT_SYMBOL(zstd_flush_stream);
138893 +size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output)
138895 +       return ZSTD_endStream(cstream, output);
138897 +EXPORT_SYMBOL(zstd_end_stream);
138899 +MODULE_LICENSE("Dual BSD/GPL");
138900 +MODULE_DESCRIPTION("Zstd Compressor");
138901 diff --git a/lib/zstd/zstd_decompress_module.c b/lib/zstd/zstd_decompress_module.c
138902 new file mode 100644
138903 index 000000000000..15005cdb9eca
138904 --- /dev/null
138905 +++ b/lib/zstd/zstd_decompress_module.c
138906 @@ -0,0 +1,105 @@
138907 +// SPDX-License-Identifier: GPL-2.0-only
138909 + * Copyright (c) Facebook, Inc.
138910 + * All rights reserved.
138912 + * This source code is licensed under both the BSD-style license (found in the
138913 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
138914 + * in the COPYING file in the root directory of this source tree).
138915 + * You may select, at your option, one of the above-listed licenses.
138916 + */
138918 +#include <linux/kernel.h>
138919 +#include <linux/module.h>
138920 +#include <linux/string.h>
138921 +#include <linux/zstd.h>
138923 +#include "common/zstd_deps.h"
138925 +/* Common symbols. zstd_compress must depend on zstd_decompress. */
138927 +unsigned int zstd_is_error(size_t code)
138929 +       return ZSTD_isError(code);
138931 +EXPORT_SYMBOL(zstd_is_error);
138933 +zstd_error_code zstd_get_error_code(size_t code)
138935 +       return ZSTD_getErrorCode(code);
138937 +EXPORT_SYMBOL(zstd_get_error_code);
138939 +const char *zstd_get_error_name(size_t code)
138941 +       return ZSTD_getErrorName(code);
138943 +EXPORT_SYMBOL(zstd_get_error_name);
138945 +/* Decompression symbols. */
138947 +size_t zstd_dctx_workspace_bound(void)
138949 +       return ZSTD_estimateDCtxSize();
138951 +EXPORT_SYMBOL(zstd_dctx_workspace_bound);
138953 +zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size)
138955 +       if (workspace == NULL)
138956 +               return NULL;
138957 +       return ZSTD_initStaticDCtx(workspace, workspace_size);
138959 +EXPORT_SYMBOL(zstd_init_dctx);
138961 +size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
138962 +       const void *src, size_t src_size)
138964 +       return ZSTD_decompressDCtx(dctx, dst, dst_capacity, src, src_size);
138966 +EXPORT_SYMBOL(zstd_decompress_dctx);
138968 +size_t zstd_dstream_workspace_bound(size_t max_window_size)
138970 +       return ZSTD_estimateDStreamSize(max_window_size);
138972 +EXPORT_SYMBOL(zstd_dstream_workspace_bound);
138974 +zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace,
138975 +       size_t workspace_size)
138977 +       if (workspace == NULL)
138978 +               return NULL;
138979 +       (void)max_window_size;
138980 +       return ZSTD_initStaticDStream(workspace, workspace_size);
138982 +EXPORT_SYMBOL(zstd_init_dstream);
138984 +size_t zstd_reset_dstream(zstd_dstream *dstream)
138986 +       return ZSTD_resetDStream(dstream);
138988 +EXPORT_SYMBOL(zstd_reset_dstream);
138990 +size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output,
138991 +       zstd_in_buffer *input)
138993 +       return ZSTD_decompressStream(dstream, output, input);
138995 +EXPORT_SYMBOL(zstd_decompress_stream);
138997 +size_t zstd_find_frame_compressed_size(const void *src, size_t src_size)
138999 +       return ZSTD_findFrameCompressedSize(src, src_size);
139001 +EXPORT_SYMBOL(zstd_find_frame_compressed_size);
139003 +size_t zstd_get_frame_header(zstd_frame_header *header, const void *src,
139004 +       size_t src_size)
139006 +       return ZSTD_getFrameHeader(header, src, src_size);
139008 +EXPORT_SYMBOL(zstd_get_frame_header);
139010 +MODULE_LICENSE("Dual BSD/GPL");
139011 +MODULE_DESCRIPTION("Zstd Decompressor");
139012 diff --git a/lib/zstd/zstd_internal.h b/lib/zstd/zstd_internal.h
139013 deleted file mode 100644
139014 index dac753397f86..000000000000
139015 --- a/lib/zstd/zstd_internal.h
139016 +++ /dev/null
139017 @@ -1,273 +0,0 @@
139019 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
139020 - * All rights reserved.
139022 - * This source code is licensed under the BSD-style license found in the
139023 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
139024 - * An additional grant of patent rights can be found in the PATENTS file in the
139025 - * same directory.
139027 - * This program is free software; you can redistribute it and/or modify it under
139028 - * the terms of the GNU General Public License version 2 as published by the
139029 - * Free Software Foundation. This program is dual-licensed; you may select
139030 - * either version 2 of the GNU General Public License ("GPL") or BSD license
139031 - * ("BSD").
139032 - */
139034 -#ifndef ZSTD_CCOMMON_H_MODULE
139035 -#define ZSTD_CCOMMON_H_MODULE
139037 -/*-*******************************************************
139038 -*  Compiler specifics
139039 -*********************************************************/
139040 -#define FORCE_INLINE static __always_inline
139041 -#define FORCE_NOINLINE static noinline
139043 -/*-*************************************
139044 -*  Dependencies
139045 -***************************************/
139046 -#include "error_private.h"
139047 -#include "mem.h"
139048 -#include <linux/compiler.h>
139049 -#include <linux/kernel.h>
139050 -#include <linux/xxhash.h>
139051 -#include <linux/zstd.h>
139053 -/*-*************************************
139054 -*  shared macros
139055 -***************************************/
139056 -#define MIN(a, b) ((a) < (b) ? (a) : (b))
139057 -#define MAX(a, b) ((a) > (b) ? (a) : (b))
139058 -#define CHECK_F(f)                       \
139059 -       {                                \
139060 -               size_t const errcod = f; \
139061 -               if (ERR_isError(errcod)) \
139062 -                       return errcod;   \
139063 -       } /* check and Forward error code */
139064 -#define CHECK_E(f, e)                    \
139065 -       {                                \
139066 -               size_t const errcod = f; \
139067 -               if (ERR_isError(errcod)) \
139068 -                       return ERROR(e); \
139069 -       } /* check and send Error code */
139070 -#define ZSTD_STATIC_ASSERT(c)                                   \
139071 -       {                                                       \
139072 -               enum { ZSTD_static_assert = 1 / (int)(!!(c)) }; \
139073 -       }
139075 -/*-*************************************
139076 -*  Common constants
139077 -***************************************/
139078 -#define ZSTD_OPT_NUM (1 << 12)
139079 -#define ZSTD_DICT_MAGIC 0xEC30A437 /* v0.7+ */
139081 -#define ZSTD_REP_NUM 3               /* number of repcodes */
139082 -#define ZSTD_REP_CHECK (ZSTD_REP_NUM) /* number of repcodes to check by the optimal parser */
139083 -#define ZSTD_REP_MOVE (ZSTD_REP_NUM - 1)
139084 -#define ZSTD_REP_MOVE_OPT (ZSTD_REP_NUM)
139085 -static const U32 repStartValue[ZSTD_REP_NUM] = {1, 4, 8};
139087 -#define KB *(1 << 10)
139088 -#define MB *(1 << 20)
139089 -#define GB *(1U << 30)
139091 -#define BIT7 128
139092 -#define BIT6 64
139093 -#define BIT5 32
139094 -#define BIT4 16
139095 -#define BIT1 2
139096 -#define BIT0 1
139098 -#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
139099 -static const size_t ZSTD_fcs_fieldSize[4] = {0, 2, 4, 8};
139100 -static const size_t ZSTD_did_fieldSize[4] = {0, 1, 2, 4};
139102 -#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
139103 -static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
139104 -typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
139106 -#define MIN_SEQUENCES_SIZE 1                                                                     /* nbSeq==0 */
139107 -#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
139109 -#define HufLog 12
139110 -typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
139112 -#define LONGNBSEQ 0x7F00
139114 -#define MINMATCH 3
139115 -#define EQUAL_READ32 4
139117 -#define Litbits 8
139118 -#define MaxLit ((1 << Litbits) - 1)
139119 -#define MaxML 52
139120 -#define MaxLL 35
139121 -#define MaxOff 28
139122 -#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
139123 -#define MLFSELog 9
139124 -#define LLFSELog 9
139125 -#define OffFSELog 8
139127 -static const U32 LL_bits[MaxLL + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
139128 -static const S16 LL_defaultNorm[MaxLL + 1] = {4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, -1, -1, -1, -1};
139129 -#define LL_DEFAULTNORMLOG 6 /* for static allocation */
139130 -static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
139132 -static const U32 ML_bits[MaxML + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,  0,  0,  0,  0,  0,  0, 0,
139133 -                                      0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
139134 -static const S16 ML_defaultNorm[MaxML + 1] = {1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,  1,  1,  1,  1,  1,  1, 1,
139135 -                                             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1};
139136 -#define ML_DEFAULTNORMLOG 6 /* for static allocation */
139137 -static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
139139 -static const S16 OF_defaultNorm[MaxOff + 1] = {1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1};
139140 -#define OF_DEFAULTNORMLOG 5 /* for static allocation */
139141 -static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
139143 -/*-*******************************************
139144 -*  Shared functions to include for inlining
139145 -*********************************************/
139146 -ZSTD_STATIC void ZSTD_copy8(void *dst, const void *src) {
139147 -       /*
139148 -        * zstd relies heavily on gcc being able to analyze and inline this
139149 -        * memcpy() call, since it is called in a tight loop. Preboot mode
139150 -        * is compiled in freestanding mode, which stops gcc from analyzing
139151 -        * memcpy(). Use __builtin_memcpy() to tell gcc to analyze this as a
139152 -        * regular memcpy().
139153 -        */
139154 -       __builtin_memcpy(dst, src, 8);
139156 -/*! ZSTD_wildcopy() :
139157 -*   custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
139158 -#define WILDCOPY_OVERLENGTH 8
139159 -ZSTD_STATIC void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length)
139161 -       const BYTE* ip = (const BYTE*)src;
139162 -       BYTE* op = (BYTE*)dst;
139163 -       BYTE* const oend = op + length;
139164 -#if defined(GCC_VERSION) && GCC_VERSION >= 70000 && GCC_VERSION < 70200
139165 -       /*
139166 -        * Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388.
139167 -        * Avoid the bad case where the loop only runs once by handling the
139168 -        * special case separately. This doesn't trigger the bug because it
139169 -        * doesn't involve pointer/integer overflow.
139170 -        */
139171 -       if (length <= 8)
139172 -               return ZSTD_copy8(dst, src);
139173 -#endif
139174 -       do {
139175 -               ZSTD_copy8(op, ip);
139176 -               op += 8;
139177 -               ip += 8;
139178 -       } while (op < oend);
139181 -/*-*******************************************
139182 -*  Private interfaces
139183 -*********************************************/
139184 -typedef struct ZSTD_stats_s ZSTD_stats_t;
139186 -typedef struct {
139187 -       U32 off;
139188 -       U32 len;
139189 -} ZSTD_match_t;
139191 -typedef struct {
139192 -       U32 price;
139193 -       U32 off;
139194 -       U32 mlen;
139195 -       U32 litlen;
139196 -       U32 rep[ZSTD_REP_NUM];
139197 -} ZSTD_optimal_t;
139199 -typedef struct seqDef_s {
139200 -       U32 offset;
139201 -       U16 litLength;
139202 -       U16 matchLength;
139203 -} seqDef;
139205 -typedef struct {
139206 -       seqDef *sequencesStart;
139207 -       seqDef *sequences;
139208 -       BYTE *litStart;
139209 -       BYTE *lit;
139210 -       BYTE *llCode;
139211 -       BYTE *mlCode;
139212 -       BYTE *ofCode;
139213 -       U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
139214 -       U32 longLengthPos;
139215 -       /* opt */
139216 -       ZSTD_optimal_t *priceTable;
139217 -       ZSTD_match_t *matchTable;
139218 -       U32 *matchLengthFreq;
139219 -       U32 *litLengthFreq;
139220 -       U32 *litFreq;
139221 -       U32 *offCodeFreq;
139222 -       U32 matchLengthSum;
139223 -       U32 matchSum;
139224 -       U32 litLengthSum;
139225 -       U32 litSum;
139226 -       U32 offCodeSum;
139227 -       U32 log2matchLengthSum;
139228 -       U32 log2matchSum;
139229 -       U32 log2litLengthSum;
139230 -       U32 log2litSum;
139231 -       U32 log2offCodeSum;
139232 -       U32 factor;
139233 -       U32 staticPrices;
139234 -       U32 cachedPrice;
139235 -       U32 cachedLitLength;
139236 -       const BYTE *cachedLiterals;
139237 -} seqStore_t;
139239 -const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx);
139240 -void ZSTD_seqToCodes(const seqStore_t *seqStorePtr);
139241 -int ZSTD_isSkipFrame(ZSTD_DCtx *dctx);
139243 -/*= Custom memory allocation functions */
139244 -typedef void *(*ZSTD_allocFunction)(void *opaque, size_t size);
139245 -typedef void (*ZSTD_freeFunction)(void *opaque, void *address);
139246 -typedef struct {
139247 -       ZSTD_allocFunction customAlloc;
139248 -       ZSTD_freeFunction customFree;
139249 -       void *opaque;
139250 -} ZSTD_customMem;
139252 -void *ZSTD_malloc(size_t size, ZSTD_customMem customMem);
139253 -void ZSTD_free(void *ptr, ZSTD_customMem customMem);
139255 -/*====== stack allocation  ======*/
139257 -typedef struct {
139258 -       void *ptr;
139259 -       const void *end;
139260 -} ZSTD_stack;
139262 -#define ZSTD_ALIGN(x) ALIGN(x, sizeof(size_t))
139263 -#define ZSTD_PTR_ALIGN(p) PTR_ALIGN(p, sizeof(size_t))
139265 -ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize);
139267 -void *ZSTD_stackAllocAll(void *opaque, size_t *size);
139268 -void *ZSTD_stackAlloc(void *opaque, size_t size);
139269 -void ZSTD_stackFree(void *opaque, void *address);
139271 -/*======  common function  ======*/
139273 -ZSTD_STATIC U32 ZSTD_highbit32(U32 val) { return 31 - __builtin_clz(val); }
139275 -/* hidden functions */
139277 -/* ZSTD_invalidateRepCodes() :
139278 - * ensures next compression will not use repcodes from previous block.
139279 - * Note : only works with regular variant;
139280 - *        do not use with extDict variant ! */
139281 -void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx);
139283 -size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx);
139284 -size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx);
139285 -size_t ZSTD_freeCDict(ZSTD_CDict *cdict);
139286 -size_t ZSTD_freeDDict(ZSTD_DDict *cdict);
139287 -size_t ZSTD_freeCStream(ZSTD_CStream *zcs);
139288 -size_t ZSTD_freeDStream(ZSTD_DStream *zds);
139290 -#endif /* ZSTD_CCOMMON_H_MODULE */
139291 diff --git a/lib/zstd/zstd_opt.h b/lib/zstd/zstd_opt.h
139292 deleted file mode 100644
139293 index 55e1b4cba808..000000000000
139294 --- a/lib/zstd/zstd_opt.h
139295 +++ /dev/null
139296 @@ -1,1014 +0,0 @@
139298 - * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
139299 - * All rights reserved.
139301 - * This source code is licensed under the BSD-style license found in the
139302 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
139303 - * An additional grant of patent rights can be found in the PATENTS file in the
139304 - * same directory.
139306 - * This program is free software; you can redistribute it and/or modify it under
139307 - * the terms of the GNU General Public License version 2 as published by the
139308 - * Free Software Foundation. This program is dual-licensed; you may select
139309 - * either version 2 of the GNU General Public License ("GPL") or BSD license
139310 - * ("BSD").
139311 - */
139313 -/* Note : this file is intended to be included within zstd_compress.c */
139315 -#ifndef ZSTD_OPT_H_91842398743
139316 -#define ZSTD_OPT_H_91842398743
139318 -#define ZSTD_LITFREQ_ADD 2
139319 -#define ZSTD_FREQ_DIV 4
139320 -#define ZSTD_MAX_PRICE (1 << 30)
139322 -/*-*************************************
139323 -*  Price functions for optimal parser
139324 -***************************************/
139325 -FORCE_INLINE void ZSTD_setLog2Prices(seqStore_t *ssPtr)
139327 -       ssPtr->log2matchLengthSum = ZSTD_highbit32(ssPtr->matchLengthSum + 1);
139328 -       ssPtr->log2litLengthSum = ZSTD_highbit32(ssPtr->litLengthSum + 1);
139329 -       ssPtr->log2litSum = ZSTD_highbit32(ssPtr->litSum + 1);
139330 -       ssPtr->log2offCodeSum = ZSTD_highbit32(ssPtr->offCodeSum + 1);
139331 -       ssPtr->factor = 1 + ((ssPtr->litSum >> 5) / ssPtr->litLengthSum) + ((ssPtr->litSum << 1) / (ssPtr->litSum + ssPtr->matchSum));
139334 -ZSTD_STATIC void ZSTD_rescaleFreqs(seqStore_t *ssPtr, const BYTE *src, size_t srcSize)
139336 -       unsigned u;
139338 -       ssPtr->cachedLiterals = NULL;
139339 -       ssPtr->cachedPrice = ssPtr->cachedLitLength = 0;
139340 -       ssPtr->staticPrices = 0;
139342 -       if (ssPtr->litLengthSum == 0) {
139343 -               if (srcSize <= 1024)
139344 -                       ssPtr->staticPrices = 1;
139346 -               for (u = 0; u <= MaxLit; u++)
139347 -                       ssPtr->litFreq[u] = 0;
139348 -               for (u = 0; u < srcSize; u++)
139349 -                       ssPtr->litFreq[src[u]]++;
139351 -               ssPtr->litSum = 0;
139352 -               ssPtr->litLengthSum = MaxLL + 1;
139353 -               ssPtr->matchLengthSum = MaxML + 1;
139354 -               ssPtr->offCodeSum = (MaxOff + 1);
139355 -               ssPtr->matchSum = (ZSTD_LITFREQ_ADD << Litbits);
139357 -               for (u = 0; u <= MaxLit; u++) {
139358 -                       ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> ZSTD_FREQ_DIV);
139359 -                       ssPtr->litSum += ssPtr->litFreq[u];
139360 -               }
139361 -               for (u = 0; u <= MaxLL; u++)
139362 -                       ssPtr->litLengthFreq[u] = 1;
139363 -               for (u = 0; u <= MaxML; u++)
139364 -                       ssPtr->matchLengthFreq[u] = 1;
139365 -               for (u = 0; u <= MaxOff; u++)
139366 -                       ssPtr->offCodeFreq[u] = 1;
139367 -       } else {
139368 -               ssPtr->matchLengthSum = 0;
139369 -               ssPtr->litLengthSum = 0;
139370 -               ssPtr->offCodeSum = 0;
139371 -               ssPtr->matchSum = 0;
139372 -               ssPtr->litSum = 0;
139374 -               for (u = 0; u <= MaxLit; u++) {
139375 -                       ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> (ZSTD_FREQ_DIV + 1));
139376 -                       ssPtr->litSum += ssPtr->litFreq[u];
139377 -               }
139378 -               for (u = 0; u <= MaxLL; u++) {
139379 -                       ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u] >> (ZSTD_FREQ_DIV + 1));
139380 -                       ssPtr->litLengthSum += ssPtr->litLengthFreq[u];
139381 -               }
139382 -               for (u = 0; u <= MaxML; u++) {
139383 -                       ssPtr->matchLengthFreq[u] = 1 + (ssPtr->matchLengthFreq[u] >> ZSTD_FREQ_DIV);
139384 -                       ssPtr->matchLengthSum += ssPtr->matchLengthFreq[u];
139385 -                       ssPtr->matchSum += ssPtr->matchLengthFreq[u] * (u + 3);
139386 -               }
139387 -               ssPtr->matchSum *= ZSTD_LITFREQ_ADD;
139388 -               for (u = 0; u <= MaxOff; u++) {
139389 -                       ssPtr->offCodeFreq[u] = 1 + (ssPtr->offCodeFreq[u] >> ZSTD_FREQ_DIV);
139390 -                       ssPtr->offCodeSum += ssPtr->offCodeFreq[u];
139391 -               }
139392 -       }
139394 -       ZSTD_setLog2Prices(ssPtr);
139397 -FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t *ssPtr, U32 litLength, const BYTE *literals)
139399 -       U32 price, u;
139401 -       if (ssPtr->staticPrices)
139402 -               return ZSTD_highbit32((U32)litLength + 1) + (litLength * 6);
139404 -       if (litLength == 0)
139405 -               return ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[0] + 1);
139407 -       /* literals */
139408 -       if (ssPtr->cachedLiterals == literals) {
139409 -               U32 const additional = litLength - ssPtr->cachedLitLength;
139410 -               const BYTE *literals2 = ssPtr->cachedLiterals + ssPtr->cachedLitLength;
139411 -               price = ssPtr->cachedPrice + additional * ssPtr->log2litSum;
139412 -               for (u = 0; u < additional; u++)
139413 -                       price -= ZSTD_highbit32(ssPtr->litFreq[literals2[u]] + 1);
139414 -               ssPtr->cachedPrice = price;
139415 -               ssPtr->cachedLitLength = litLength;
139416 -       } else {
139417 -               price = litLength * ssPtr->log2litSum;
139418 -               for (u = 0; u < litLength; u++)
139419 -                       price -= ZSTD_highbit32(ssPtr->litFreq[literals[u]] + 1);
139421 -               if (litLength >= 12) {
139422 -                       ssPtr->cachedLiterals = literals;
139423 -                       ssPtr->cachedPrice = price;
139424 -                       ssPtr->cachedLitLength = litLength;
139425 -               }
139426 -       }
139428 -       /* literal Length */
139429 -       {
139430 -               const BYTE LL_deltaCode = 19;
139431 -               const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
139432 -               price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode] + 1);
139433 -       }
139435 -       return price;
139438 -FORCE_INLINE U32 ZSTD_getPrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength, const int ultra)
139440 -       /* offset */
139441 -       U32 price;
139442 -       BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1);
139444 -       if (seqStorePtr->staticPrices)
139445 -               return ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + ZSTD_highbit32((U32)matchLength + 1) + 16 + offCode;
139447 -       price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode] + 1);
139448 -       if (!ultra && offCode >= 20)
139449 -               price += (offCode - 19) * 2;
139451 -       /* match Length */
139452 -       {
139453 -               const BYTE ML_deltaCode = 36;
139454 -               const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
139455 -               price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode] + 1);
139456 -       }
139458 -       return price + ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + seqStorePtr->factor;
139461 -ZSTD_STATIC void ZSTD_updatePrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength)
139463 -       U32 u;
139465 -       /* literals */
139466 -       seqStorePtr->litSum += litLength * ZSTD_LITFREQ_ADD;
139467 -       for (u = 0; u < litLength; u++)
139468 -               seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
139470 -       /* literal Length */
139471 -       {
139472 -               const BYTE LL_deltaCode = 19;
139473 -               const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
139474 -               seqStorePtr->litLengthFreq[llCode]++;
139475 -               seqStorePtr->litLengthSum++;
139476 -       }
139478 -       /* match offset */
139479 -       {
139480 -               BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1);
139481 -               seqStorePtr->offCodeSum++;
139482 -               seqStorePtr->offCodeFreq[offCode]++;
139483 -       }
139485 -       /* match Length */
139486 -       {
139487 -               const BYTE ML_deltaCode = 36;
139488 -               const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
139489 -               seqStorePtr->matchLengthFreq[mlCode]++;
139490 -               seqStorePtr->matchLengthSum++;
139491 -       }
139493 -       ZSTD_setLog2Prices(seqStorePtr);
139496 -#define SET_PRICE(pos, mlen_, offset_, litlen_, price_)           \
139497 -       {                                                         \
139498 -               while (last_pos < pos) {                          \
139499 -                       opt[last_pos + 1].price = ZSTD_MAX_PRICE; \
139500 -                       last_pos++;                               \
139501 -               }                                                 \
139502 -               opt[pos].mlen = mlen_;                            \
139503 -               opt[pos].off = offset_;                           \
139504 -               opt[pos].litlen = litlen_;                        \
139505 -               opt[pos].price = price_;                          \
139506 -       }
139508 -/* Update hashTable3 up to ip (excluded)
139509 -   Assumption : always within prefix (i.e. not within extDict) */
139510 -FORCE_INLINE
139511 -U32 ZSTD_insertAndFindFirstIndexHash3(ZSTD_CCtx *zc, const BYTE *ip)
139513 -       U32 *const hashTable3 = zc->hashTable3;
139514 -       U32 const hashLog3 = zc->hashLog3;
139515 -       const BYTE *const base = zc->base;
139516 -       U32 idx = zc->nextToUpdate3;
139517 -       const U32 target = zc->nextToUpdate3 = (U32)(ip - base);
139518 -       const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3);
139520 -       while (idx < target) {
139521 -               hashTable3[ZSTD_hash3Ptr(base + idx, hashLog3)] = idx;
139522 -               idx++;
139523 -       }
139525 -       return hashTable3[hash3];
139528 -/*-*************************************
139529 -*  Binary Tree search
139530 -***************************************/
139531 -static U32 ZSTD_insertBtAndGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, U32 nbCompares, const U32 mls, U32 extDict,
139532 -                                        ZSTD_match_t *matches, const U32 minMatchLen)
139534 -       const BYTE *const base = zc->base;
139535 -       const U32 curr = (U32)(ip - base);
139536 -       const U32 hashLog = zc->params.cParams.hashLog;
139537 -       const size_t h = ZSTD_hashPtr(ip, hashLog, mls);
139538 -       U32 *const hashTable = zc->hashTable;
139539 -       U32 matchIndex = hashTable[h];
139540 -       U32 *const bt = zc->chainTable;
139541 -       const U32 btLog = zc->params.cParams.chainLog - 1;
139542 -       const U32 btMask = (1U << btLog) - 1;
139543 -       size_t commonLengthSmaller = 0, commonLengthLarger = 0;
139544 -       const BYTE *const dictBase = zc->dictBase;
139545 -       const U32 dictLimit = zc->dictLimit;
139546 -       const BYTE *const dictEnd = dictBase + dictLimit;
139547 -       const BYTE *const prefixStart = base + dictLimit;
139548 -       const U32 btLow = btMask >= curr ? 0 : curr - btMask;
139549 -       const U32 windowLow = zc->lowLimit;
139550 -       U32 *smallerPtr = bt + 2 * (curr & btMask);
139551 -       U32 *largerPtr = bt + 2 * (curr & btMask) + 1;
139552 -       U32 matchEndIdx = curr + 8;
139553 -       U32 dummy32; /* to be nullified at the end */
139554 -       U32 mnum = 0;
139556 -       const U32 minMatch = (mls == 3) ? 3 : 4;
139557 -       size_t bestLength = minMatchLen - 1;
139559 -       if (minMatch == 3) { /* HC3 match finder */
139560 -               U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(zc, ip);
139561 -               if (matchIndex3 > windowLow && (curr - matchIndex3 < (1 << 18))) {
139562 -                       const BYTE *match;
139563 -                       size_t currMl = 0;
139564 -                       if ((!extDict) || matchIndex3 >= dictLimit) {
139565 -                               match = base + matchIndex3;
139566 -                               if (match[bestLength] == ip[bestLength])
139567 -                                       currMl = ZSTD_count(ip, match, iLimit);
139568 -                       } else {
139569 -                               match = dictBase + matchIndex3;
139570 -                               if (ZSTD_readMINMATCH(match, MINMATCH) ==
139571 -                                   ZSTD_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */
139572 -                                       currMl = ZSTD_count_2segments(ip + MINMATCH, match + MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH;
139573 -                       }
139575 -                       /* save best solution */
139576 -                       if (currMl > bestLength) {
139577 -                               bestLength = currMl;
139578 -                               matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex3;
139579 -                               matches[mnum].len = (U32)currMl;
139580 -                               mnum++;
139581 -                               if (currMl > ZSTD_OPT_NUM)
139582 -                                       goto update;
139583 -                               if (ip + currMl == iLimit)
139584 -                                       goto update; /* best possible, and avoid read overflow*/
139585 -                       }
139586 -               }
139587 -       }
139589 -       hashTable[h] = curr; /* Update Hash Table */
139591 -       while (nbCompares-- && (matchIndex > windowLow)) {
139592 -               U32 *nextPtr = bt + 2 * (matchIndex & btMask);
139593 -               size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
139594 -               const BYTE *match;
139596 -               if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
139597 -                       match = base + matchIndex;
139598 -                       if (match[matchLength] == ip[matchLength]) {
139599 -                               matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iLimit) + 1;
139600 -                       }
139601 -               } else {
139602 -                       match = dictBase + matchIndex;
139603 -                       matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iLimit, dictEnd, prefixStart);
139604 -                       if (matchIndex + matchLength >= dictLimit)
139605 -                               match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
139606 -               }
139608 -               if (matchLength > bestLength) {
139609 -                       if (matchLength > matchEndIdx - matchIndex)
139610 -                               matchEndIdx = matchIndex + (U32)matchLength;
139611 -                       bestLength = matchLength;
139612 -                       matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex;
139613 -                       matches[mnum].len = (U32)matchLength;
139614 -                       mnum++;
139615 -                       if (matchLength > ZSTD_OPT_NUM)
139616 -                               break;
139617 -                       if (ip + matchLength == iLimit) /* equal : no way to know if inf or sup */
139618 -                               break;                  /* drop, to guarantee consistency (miss a little bit of compression) */
139619 -               }
139621 -               if (match[matchLength] < ip[matchLength]) {
139622 -                       /* match is smaller than curr */
139623 -                       *smallerPtr = matchIndex;         /* update smaller idx */
139624 -                       commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
139625 -                       if (matchIndex <= btLow) {
139626 -                               smallerPtr = &dummy32;
139627 -                               break;
139628 -                       }                         /* beyond tree size, stop the search */
139629 -                       smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
139630 -                       matchIndex = nextPtr[1];  /* new matchIndex larger than previous (closer to curr) */
139631 -               } else {
139632 -                       /* match is larger than curr */
139633 -                       *largerPtr = matchIndex;
139634 -                       commonLengthLarger = matchLength;
139635 -                       if (matchIndex <= btLow) {
139636 -                               largerPtr = &dummy32;
139637 -                               break;
139638 -                       } /* beyond tree size, stop the search */
139639 -                       largerPtr = nextPtr;
139640 -                       matchIndex = nextPtr[0];
139641 -               }
139642 -       }
139644 -       *smallerPtr = *largerPtr = 0;
139646 -update:
139647 -       zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1;
139648 -       return mnum;
139651 -/** Tree updater, providing best match */
139652 -static U32 ZSTD_BtGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls, ZSTD_match_t *matches,
139653 -                               const U32 minMatchLen)
139655 -       if (ip < zc->base + zc->nextToUpdate)
139656 -               return 0; /* skipped area */
139657 -       ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
139658 -       return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen);
139661 -static U32 ZSTD_BtGetAllMatches_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */
139662 -                                         const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch,
139663 -                                         ZSTD_match_t *matches, const U32 minMatchLen)
139665 -       switch (matchLengthSearch) {
139666 -       case 3: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
139667 -       default:
139668 -       case 4: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
139669 -       case 5: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
139670 -       case 7:
139671 -       case 6: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
139672 -       }
139675 -/** Tree updater, providing best match */
139676 -static U32 ZSTD_BtGetAllMatches_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls,
139677 -                                       ZSTD_match_t *matches, const U32 minMatchLen)
139679 -       if (ip < zc->base + zc->nextToUpdate)
139680 -               return 0; /* skipped area */
139681 -       ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
139682 -       return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen);
139685 -static U32 ZSTD_BtGetAllMatches_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */
139686 -                                                 const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch,
139687 -                                                 ZSTD_match_t *matches, const U32 minMatchLen)
139689 -       switch (matchLengthSearch) {
139690 -       case 3: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
139691 -       default:
139692 -       case 4: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
139693 -       case 5: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
139694 -       case 7:
139695 -       case 6: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
139696 -       }
139699 -/*-*******************************
139700 -*  Optimal parser
139701 -*********************************/
139702 -FORCE_INLINE
139703 -void ZSTD_compressBlock_opt_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra)
139705 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
139706 -       const BYTE *const istart = (const BYTE *)src;
139707 -       const BYTE *ip = istart;
139708 -       const BYTE *anchor = istart;
139709 -       const BYTE *const iend = istart + srcSize;
139710 -       const BYTE *const ilimit = iend - 8;
139711 -       const BYTE *const base = ctx->base;
139712 -       const BYTE *const prefixStart = base + ctx->dictLimit;
139714 -       const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
139715 -       const U32 sufficient_len = ctx->params.cParams.targetLength;
139716 -       const U32 mls = ctx->params.cParams.searchLength;
139717 -       const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
139719 -       ZSTD_optimal_t *opt = seqStorePtr->priceTable;
139720 -       ZSTD_match_t *matches = seqStorePtr->matchTable;
139721 -       const BYTE *inr;
139722 -       U32 offset, rep[ZSTD_REP_NUM];
139724 -       /* init */
139725 -       ctx->nextToUpdate3 = ctx->nextToUpdate;
139726 -       ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize);
139727 -       ip += (ip == prefixStart);
139728 -       {
139729 -               U32 i;
139730 -               for (i = 0; i < ZSTD_REP_NUM; i++)
139731 -                       rep[i] = ctx->rep[i];
139732 -       }
139734 -       /* Match Loop */
139735 -       while (ip < ilimit) {
139736 -               U32 cur, match_num, last_pos, litlen, price;
139737 -               U32 u, mlen, best_mlen, best_off, litLength;
139738 -               memset(opt, 0, sizeof(ZSTD_optimal_t));
139739 -               last_pos = 0;
139740 -               litlen = (U32)(ip - anchor);
139742 -               /* check repCode */
139743 -               {
139744 -                       U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor);
139745 -                       for (i = (ip == anchor); i < last_i; i++) {
139746 -                               const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
139747 -                               if ((repCur > 0) && (repCur < (S32)(ip - prefixStart)) &&
139748 -                                   (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repCur, minMatch))) {
139749 -                                       mlen = (U32)ZSTD_count(ip + minMatch, ip + minMatch - repCur, iend) + minMatch;
139750 -                                       if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
139751 -                                               best_mlen = mlen;
139752 -                                               best_off = i;
139753 -                                               cur = 0;
139754 -                                               last_pos = 1;
139755 -                                               goto _storeSequence;
139756 -                                       }
139757 -                                       best_off = i - (ip == anchor);
139758 -                                       do {
139759 -                                               price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
139760 -                                               if (mlen > last_pos || price < opt[mlen].price)
139761 -                                                       SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
139762 -                                               mlen--;
139763 -                                       } while (mlen >= minMatch);
139764 -                               }
139765 -                       }
139766 -               }
139768 -               match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch);
139770 -               if (!last_pos && !match_num) {
139771 -                       ip++;
139772 -                       continue;
139773 -               }
139775 -               if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
139776 -                       best_mlen = matches[match_num - 1].len;
139777 -                       best_off = matches[match_num - 1].off;
139778 -                       cur = 0;
139779 -                       last_pos = 1;
139780 -                       goto _storeSequence;
139781 -               }
139783 -               /* set prices using matches at position = 0 */
139784 -               best_mlen = (last_pos) ? last_pos : minMatch;
139785 -               for (u = 0; u < match_num; u++) {
139786 -                       mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
139787 -                       best_mlen = matches[u].len;
139788 -                       while (mlen <= best_mlen) {
139789 -                               price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
139790 -                               if (mlen > last_pos || price < opt[mlen].price)
139791 -                                       SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */
139792 -                               mlen++;
139793 -                       }
139794 -               }
139796 -               if (last_pos < minMatch) {
139797 -                       ip++;
139798 -                       continue;
139799 -               }
139801 -               /* initialize opt[0] */
139802 -               {
139803 -                       U32 i;
139804 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
139805 -                               opt[0].rep[i] = rep[i];
139806 -               }
139807 -               opt[0].mlen = 1;
139808 -               opt[0].litlen = litlen;
139810 -               /* check further positions */
139811 -               for (cur = 1; cur <= last_pos; cur++) {
139812 -                       inr = ip + cur;
139814 -                       if (opt[cur - 1].mlen == 1) {
139815 -                               litlen = opt[cur - 1].litlen + 1;
139816 -                               if (cur > litlen) {
139817 -                                       price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen);
139818 -                               } else
139819 -                                       price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
139820 -                       } else {
139821 -                               litlen = 1;
139822 -                               price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1);
139823 -                       }
139825 -                       if (cur > last_pos || price <= opt[cur].price)
139826 -                               SET_PRICE(cur, 1, 0, litlen, price);
139828 -                       if (cur == last_pos)
139829 -                               break;
139831 -                       if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
139832 -                               continue;
139834 -                       mlen = opt[cur].mlen;
139835 -                       if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
139836 -                               opt[cur].rep[2] = opt[cur - mlen].rep[1];
139837 -                               opt[cur].rep[1] = opt[cur - mlen].rep[0];
139838 -                               opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
139839 -                       } else {
139840 -                               opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2];
139841 -                               opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1];
139842 -                               opt[cur].rep[0] =
139843 -                                   ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]);
139844 -                       }
139846 -                       best_mlen = minMatch;
139847 -                       {
139848 -                               U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
139849 -                               for (i = (opt[cur].mlen != 1); i < last_i; i++) { /* check rep */
139850 -                                       const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
139851 -                                       if ((repCur > 0) && (repCur < (S32)(inr - prefixStart)) &&
139852 -                                           (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(inr - repCur, minMatch))) {
139853 -                                               mlen = (U32)ZSTD_count(inr + minMatch, inr + minMatch - repCur, iend) + minMatch;
139855 -                                               if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
139856 -                                                       best_mlen = mlen;
139857 -                                                       best_off = i;
139858 -                                                       last_pos = cur + 1;
139859 -                                                       goto _storeSequence;
139860 -                                               }
139862 -                                               best_off = i - (opt[cur].mlen != 1);
139863 -                                               if (mlen > best_mlen)
139864 -                                                       best_mlen = mlen;
139866 -                                               do {
139867 -                                                       if (opt[cur].mlen == 1) {
139868 -                                                               litlen = opt[cur].litlen;
139869 -                                                               if (cur > litlen) {
139870 -                                                                       price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen,
139871 -                                                                                                                       best_off, mlen - MINMATCH, ultra);
139872 -                                                               } else
139873 -                                                                       price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
139874 -                                                       } else {
139875 -                                                               litlen = 0;
139876 -                                                               price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
139877 -                                                       }
139879 -                                                       if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
139880 -                                                               SET_PRICE(cur + mlen, mlen, i, litlen, price);
139881 -                                                       mlen--;
139882 -                                               } while (mlen >= minMatch);
139883 -                                       }
139884 -                               }
139885 -                       }
139887 -                       match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen);
139889 -                       if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
139890 -                               best_mlen = matches[match_num - 1].len;
139891 -                               best_off = matches[match_num - 1].off;
139892 -                               last_pos = cur + 1;
139893 -                               goto _storeSequence;
139894 -                       }
139896 -                       /* set prices using matches at position = cur */
139897 -                       for (u = 0; u < match_num; u++) {
139898 -                               mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
139899 -                               best_mlen = matches[u].len;
139901 -                               while (mlen <= best_mlen) {
139902 -                                       if (opt[cur].mlen == 1) {
139903 -                                               litlen = opt[cur].litlen;
139904 -                                               if (cur > litlen)
139905 -                                                       price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen,
139906 -                                                                                                       matches[u].off - 1, mlen - MINMATCH, ultra);
139907 -                                               else
139908 -                                                       price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
139909 -                                       } else {
139910 -                                               litlen = 0;
139911 -                                               price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra);
139912 -                                       }
139914 -                                       if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
139915 -                                               SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
139917 -                                       mlen++;
139918 -                               }
139919 -                       }
139920 -               }
139922 -               best_mlen = opt[last_pos].mlen;
139923 -               best_off = opt[last_pos].off;
139924 -               cur = last_pos - best_mlen;
139926 -       /* store sequence */
139927 -_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
139928 -               opt[0].mlen = 1;
139930 -               while (1) {
139931 -                       mlen = opt[cur].mlen;
139932 -                       offset = opt[cur].off;
139933 -                       opt[cur].mlen = best_mlen;
139934 -                       opt[cur].off = best_off;
139935 -                       best_mlen = mlen;
139936 -                       best_off = offset;
139937 -                       if (mlen > cur)
139938 -                               break;
139939 -                       cur -= mlen;
139940 -               }
139942 -               for (u = 0; u <= last_pos;) {
139943 -                       u += opt[u].mlen;
139944 -               }
139946 -               for (cur = 0; cur < last_pos;) {
139947 -                       mlen = opt[cur].mlen;
139948 -                       if (mlen == 1) {
139949 -                               ip++;
139950 -                               cur++;
139951 -                               continue;
139952 -                       }
139953 -                       offset = opt[cur].off;
139954 -                       cur += mlen;
139955 -                       litLength = (U32)(ip - anchor);
139957 -                       if (offset > ZSTD_REP_MOVE_OPT) {
139958 -                               rep[2] = rep[1];
139959 -                               rep[1] = rep[0];
139960 -                               rep[0] = offset - ZSTD_REP_MOVE_OPT;
139961 -                               offset--;
139962 -                       } else {
139963 -                               if (offset != 0) {
139964 -                                       best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
139965 -                                       if (offset != 1)
139966 -                                               rep[2] = rep[1];
139967 -                                       rep[1] = rep[0];
139968 -                                       rep[0] = best_off;
139969 -                               }
139970 -                               if (litLength == 0)
139971 -                                       offset--;
139972 -                       }
139974 -                       ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
139975 -                       ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
139976 -                       anchor = ip = ip + mlen;
139977 -               }
139978 -       } /* for (cur=0; cur < last_pos; ) */
139980 -       /* Save reps for next block */
139981 -       {
139982 -               int i;
139983 -               for (i = 0; i < ZSTD_REP_NUM; i++)
139984 -                       ctx->repToConfirm[i] = rep[i];
139985 -       }
139987 -       /* Last Literals */
139988 -       {
139989 -               size_t const lastLLSize = iend - anchor;
139990 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
139991 -               seqStorePtr->lit += lastLLSize;
139992 -       }
139995 -FORCE_INLINE
139996 -void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra)
139998 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
139999 -       const BYTE *const istart = (const BYTE *)src;
140000 -       const BYTE *ip = istart;
140001 -       const BYTE *anchor = istart;
140002 -       const BYTE *const iend = istart + srcSize;
140003 -       const BYTE *const ilimit = iend - 8;
140004 -       const BYTE *const base = ctx->base;
140005 -       const U32 lowestIndex = ctx->lowLimit;
140006 -       const U32 dictLimit = ctx->dictLimit;
140007 -       const BYTE *const prefixStart = base + dictLimit;
140008 -       const BYTE *const dictBase = ctx->dictBase;
140009 -       const BYTE *const dictEnd = dictBase + dictLimit;
140011 -       const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
140012 -       const U32 sufficient_len = ctx->params.cParams.targetLength;
140013 -       const U32 mls = ctx->params.cParams.searchLength;
140014 -       const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
140016 -       ZSTD_optimal_t *opt = seqStorePtr->priceTable;
140017 -       ZSTD_match_t *matches = seqStorePtr->matchTable;
140018 -       const BYTE *inr;
140020 -       /* init */
140021 -       U32 offset, rep[ZSTD_REP_NUM];
140022 -       {
140023 -               U32 i;
140024 -               for (i = 0; i < ZSTD_REP_NUM; i++)
140025 -                       rep[i] = ctx->rep[i];
140026 -       }
140028 -       ctx->nextToUpdate3 = ctx->nextToUpdate;
140029 -       ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize);
140030 -       ip += (ip == prefixStart);
140032 -       /* Match Loop */
140033 -       while (ip < ilimit) {
140034 -               U32 cur, match_num, last_pos, litlen, price;
140035 -               U32 u, mlen, best_mlen, best_off, litLength;
140036 -               U32 curr = (U32)(ip - base);
140037 -               memset(opt, 0, sizeof(ZSTD_optimal_t));
140038 -               last_pos = 0;
140039 -               opt[0].litlen = (U32)(ip - anchor);
140041 -               /* check repCode */
140042 -               {
140043 -                       U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor);
140044 -                       for (i = (ip == anchor); i < last_i; i++) {
140045 -                               const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
140046 -                               const U32 repIndex = (U32)(curr - repCur);
140047 -                               const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
140048 -                               const BYTE *const repMatch = repBase + repIndex;
140049 -                               if ((repCur > 0 && repCur <= (S32)curr) &&
140050 -                                   (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
140051 -                                   && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) {
140052 -                                       /* repcode detected we should take it */
140053 -                                       const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
140054 -                                       mlen = (U32)ZSTD_count_2segments(ip + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch;
140056 -                                       if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
140057 -                                               best_mlen = mlen;
140058 -                                               best_off = i;
140059 -                                               cur = 0;
140060 -                                               last_pos = 1;
140061 -                                               goto _storeSequence;
140062 -                                       }
140064 -                                       best_off = i - (ip == anchor);
140065 -                                       litlen = opt[0].litlen;
140066 -                                       do {
140067 -                                               price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
140068 -                                               if (mlen > last_pos || price < opt[mlen].price)
140069 -                                                       SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
140070 -                                               mlen--;
140071 -                                       } while (mlen >= minMatch);
140072 -                               }
140073 -                       }
140074 -               }
140076 -               match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */
140078 -               if (!last_pos && !match_num) {
140079 -                       ip++;
140080 -                       continue;
140081 -               }
140083 -               {
140084 -                       U32 i;
140085 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
140086 -                               opt[0].rep[i] = rep[i];
140087 -               }
140088 -               opt[0].mlen = 1;
140090 -               if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
140091 -                       best_mlen = matches[match_num - 1].len;
140092 -                       best_off = matches[match_num - 1].off;
140093 -                       cur = 0;
140094 -                       last_pos = 1;
140095 -                       goto _storeSequence;
140096 -               }
140098 -               best_mlen = (last_pos) ? last_pos : minMatch;
140100 -               /* set prices using matches at position = 0 */
140101 -               for (u = 0; u < match_num; u++) {
140102 -                       mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
140103 -                       best_mlen = matches[u].len;
140104 -                       litlen = opt[0].litlen;
140105 -                       while (mlen <= best_mlen) {
140106 -                               price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
140107 -                               if (mlen > last_pos || price < opt[mlen].price)
140108 -                                       SET_PRICE(mlen, mlen, matches[u].off, litlen, price);
140109 -                               mlen++;
140110 -                       }
140111 -               }
140113 -               if (last_pos < minMatch) {
140114 -                       ip++;
140115 -                       continue;
140116 -               }
140118 -               /* check further positions */
140119 -               for (cur = 1; cur <= last_pos; cur++) {
140120 -                       inr = ip + cur;
140122 -                       if (opt[cur - 1].mlen == 1) {
140123 -                               litlen = opt[cur - 1].litlen + 1;
140124 -                               if (cur > litlen) {
140125 -                                       price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen);
140126 -                               } else
140127 -                                       price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
140128 -                       } else {
140129 -                               litlen = 1;
140130 -                               price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1);
140131 -                       }
140133 -                       if (cur > last_pos || price <= opt[cur].price)
140134 -                               SET_PRICE(cur, 1, 0, litlen, price);
140136 -                       if (cur == last_pos)
140137 -                               break;
140139 -                       if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
140140 -                               continue;
140142 -                       mlen = opt[cur].mlen;
140143 -                       if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
140144 -                               opt[cur].rep[2] = opt[cur - mlen].rep[1];
140145 -                               opt[cur].rep[1] = opt[cur - mlen].rep[0];
140146 -                               opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
140147 -                       } else {
140148 -                               opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2];
140149 -                               opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1];
140150 -                               opt[cur].rep[0] =
140151 -                                   ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]);
140152 -                       }
140154 -                       best_mlen = minMatch;
140155 -                       {
140156 -                               U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
140157 -                               for (i = (mlen != 1); i < last_i; i++) {
140158 -                                       const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
140159 -                                       const U32 repIndex = (U32)(curr + cur - repCur);
140160 -                                       const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
140161 -                                       const BYTE *const repMatch = repBase + repIndex;
140162 -                                       if ((repCur > 0 && repCur <= (S32)(curr + cur)) &&
140163 -                                           (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
140164 -                                           && (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) {
140165 -                                               /* repcode detected */
140166 -                                               const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
140167 -                                               mlen = (U32)ZSTD_count_2segments(inr + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch;
140169 -                                               if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
140170 -                                                       best_mlen = mlen;
140171 -                                                       best_off = i;
140172 -                                                       last_pos = cur + 1;
140173 -                                                       goto _storeSequence;
140174 -                                               }
140176 -                                               best_off = i - (opt[cur].mlen != 1);
140177 -                                               if (mlen > best_mlen)
140178 -                                                       best_mlen = mlen;
140180 -                                               do {
140181 -                                                       if (opt[cur].mlen == 1) {
140182 -                                                               litlen = opt[cur].litlen;
140183 -                                                               if (cur > litlen) {
140184 -                                                                       price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen,
140185 -                                                                                                                       best_off, mlen - MINMATCH, ultra);
140186 -                                                               } else
140187 -                                                                       price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
140188 -                                                       } else {
140189 -                                                               litlen = 0;
140190 -                                                               price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
140191 -                                                       }
140193 -                                                       if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
140194 -                                                               SET_PRICE(cur + mlen, mlen, i, litlen, price);
140195 -                                                       mlen--;
140196 -                                               } while (mlen >= minMatch);
140197 -                                       }
140198 -                               }
140199 -                       }
140201 -                       match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch);
140203 -                       if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
140204 -                               best_mlen = matches[match_num - 1].len;
140205 -                               best_off = matches[match_num - 1].off;
140206 -                               last_pos = cur + 1;
140207 -                               goto _storeSequence;
140208 -                       }
140210 -                       /* set prices using matches at position = cur */
140211 -                       for (u = 0; u < match_num; u++) {
140212 -                               mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
140213 -                               best_mlen = matches[u].len;
140215 -                               while (mlen <= best_mlen) {
140216 -                                       if (opt[cur].mlen == 1) {
140217 -                                               litlen = opt[cur].litlen;
140218 -                                               if (cur > litlen)
140219 -                                                       price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen,
140220 -                                                                                                       matches[u].off - 1, mlen - MINMATCH, ultra);
140221 -                                               else
140222 -                                                       price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
140223 -                                       } else {
140224 -                                               litlen = 0;
140225 -                                               price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra);
140226 -                                       }
140228 -                                       if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
140229 -                                               SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
140231 -                                       mlen++;
140232 -                               }
140233 -                       }
140234 -               } /* for (cur = 1; cur <= last_pos; cur++) */
140236 -               best_mlen = opt[last_pos].mlen;
140237 -               best_off = opt[last_pos].off;
140238 -               cur = last_pos - best_mlen;
140240 -       /* store sequence */
140241 -_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
140242 -               opt[0].mlen = 1;
140244 -               while (1) {
140245 -                       mlen = opt[cur].mlen;
140246 -                       offset = opt[cur].off;
140247 -                       opt[cur].mlen = best_mlen;
140248 -                       opt[cur].off = best_off;
140249 -                       best_mlen = mlen;
140250 -                       best_off = offset;
140251 -                       if (mlen > cur)
140252 -                               break;
140253 -                       cur -= mlen;
140254 -               }
140256 -               for (u = 0; u <= last_pos;) {
140257 -                       u += opt[u].mlen;
140258 -               }
140260 -               for (cur = 0; cur < last_pos;) {
140261 -                       mlen = opt[cur].mlen;
140262 -                       if (mlen == 1) {
140263 -                               ip++;
140264 -                               cur++;
140265 -                               continue;
140266 -                       }
140267 -                       offset = opt[cur].off;
140268 -                       cur += mlen;
140269 -                       litLength = (U32)(ip - anchor);
140271 -                       if (offset > ZSTD_REP_MOVE_OPT) {
140272 -                               rep[2] = rep[1];
140273 -                               rep[1] = rep[0];
140274 -                               rep[0] = offset - ZSTD_REP_MOVE_OPT;
140275 -                               offset--;
140276 -                       } else {
140277 -                               if (offset != 0) {
140278 -                                       best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
140279 -                                       if (offset != 1)
140280 -                                               rep[2] = rep[1];
140281 -                                       rep[1] = rep[0];
140282 -                                       rep[0] = best_off;
140283 -                               }
140285 -                               if (litLength == 0)
140286 -                                       offset--;
140287 -                       }
140289 -                       ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
140290 -                       ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
140291 -                       anchor = ip = ip + mlen;
140292 -               }
140293 -       } /* for (cur=0; cur < last_pos; ) */
140295 -       /* Save reps for next block */
140296 -       {
140297 -               int i;
140298 -               for (i = 0; i < ZSTD_REP_NUM; i++)
140299 -                       ctx->repToConfirm[i] = rep[i];
140300 -       }
140302 -       /* Last Literals */
140303 -       {
140304 -               size_t lastLLSize = iend - anchor;
140305 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
140306 -               seqStorePtr->lit += lastLLSize;
140307 -       }
140310 -#endif /* ZSTD_OPT_H_91842398743 */
140311 diff --git a/localversion b/localversion
140312 new file mode 100644
140313 index 000000000000..c21af2f75ee0
140314 --- /dev/null
140315 +++ b/localversion
140316 @@ -0,0 +1 @@
140317 +-xanmod1
140318 diff --git a/mm/Kconfig b/mm/Kconfig
140319 index 24c045b24b95..5650c2d3c9c2 100644
140320 --- a/mm/Kconfig
140321 +++ b/mm/Kconfig
140322 @@ -122,6 +122,41 @@ config SPARSEMEM_VMEMMAP
140323           pfn_to_page and page_to_pfn operations.  This is the most
140324           efficient option when sufficient kernel resources are available.
140326 +config CLEAN_LOW_KBYTES
140327 +       int "Default value for vm.clean_low_kbytes"
140328 +       depends on SYSCTL
140329 +       default "150000"
140330 +       help
140331 +         The vm.clean_low_kbytes sysctl knob provides *best-effort*
140332 +         protection of clean file pages. The clean file pages on the current
140333 +         node won't be reclaimed under memory pressure when their amount is
140334 +         below vm.clean_low_kbytes *unless* we threaten to OOM or have
140335 +         no free swap space or vm.swappiness=0.
140337 +         Protection of clean file pages may be used to prevent thrashing and
140338 +         reducing I/O under low-memory conditions.
140340 +         Setting it to a high value may result in a early eviction of anonymous
140341 +         pages into the swap space by attempting to hold the protected amount of
140342 +         clean file pages in memory.
140344 +config CLEAN_MIN_KBYTES
140345 +       int "Default value for vm.clean_min_kbytes"
140346 +       depends on SYSCTL
140347 +       default "0"
140348 +       help
140349 +         The vm.clean_min_kbytes sysctl knob provides *hard* protection
140350 +         of clean file pages. The clean file pages on the current node won't be
140351 +         reclaimed under memory pressure when their amount is below
140352 +         vm.clean_min_kbytes.
140354 +         Hard protection of clean file pages may be used to avoid high latency and
140355 +         prevent livelock in near-OOM conditions.
140357 +         Setting it to a high value may result in a early out-of-memory condition
140358 +         due to the inability to reclaim the protected amount of clean file pages
140359 +         when other types of pages cannot be reclaimed.
140361  config HAVE_MEMBLOCK_PHYS_MAP
140362         bool
140364 @@ -872,4 +907,59 @@ config MAPPING_DIRTY_HELPERS
140365  config KMAP_LOCAL
140366         bool
140368 +config LRU_GEN
140369 +       bool "Multigenerational LRU"
140370 +       depends on MMU
140371 +       help
140372 +         A high performance LRU implementation to heavily overcommit workloads
140373 +         that are not IO bound. See Documentation/vm/multigen_lru.rst for
140374 +         details.
140376 +         Warning: do not enable this option unless you plan to use it because
140377 +         it introduces a small per-process and per-memcg and per-node memory
140378 +         overhead.
140380 +config NR_LRU_GENS
140381 +       int "Max number of generations"
140382 +       depends on LRU_GEN
140383 +       range 4 31
140384 +       default 7
140385 +       help
140386 +         This will use order_base_2(N+1) spare bits from page flags.
140388 +         Warning: do not use numbers larger than necessary because each
140389 +         generation introduces a small per-node and per-memcg memory overhead.
140391 +config TIERS_PER_GEN
140392 +       int "Number of tiers per generation"
140393 +       depends on LRU_GEN
140394 +       range 2 5
140395 +       default 4
140396 +       help
140397 +         This will use N-2 spare bits from page flags.
140399 +         Higher values generally offer better protection to active pages under
140400 +         heavy buffered I/O workloads.
140402 +config LRU_GEN_ENABLED
140403 +       bool "Turn on by default"
140404 +       depends on LRU_GEN
140405 +       help
140406 +         The default value of /sys/kernel/mm/lru_gen/enabled is 0. This option
140407 +         changes it to 1.
140409 +         Warning: the default value is the fast path. See
140410 +         Documentation/static-keys.txt for details.
140412 +config LRU_GEN_STATS
140413 +       bool "Full stats for debugging"
140414 +       depends on LRU_GEN
140415 +       help
140416 +         This option keeps full stats for each generation, which can be read
140417 +         from /sys/kernel/debug/lru_gen_full.
140419 +         Warning: do not enable this option unless you plan to use it because
140420 +         it introduces an additional small per-process and per-memcg and
140421 +         per-node memory overhead.
140423  endmenu
140424 diff --git a/mm/gup.c b/mm/gup.c
140425 index ef7d2da9f03f..333f5dfd8942 100644
140426 --- a/mm/gup.c
140427 +++ b/mm/gup.c
140428 @@ -1551,54 +1551,60 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
140429                                         struct vm_area_struct **vmas,
140430                                         unsigned int gup_flags)
140432 -       unsigned long i;
140433 -       unsigned long step;
140434 -       bool drain_allow = true;
140435 -       bool migrate_allow = true;
140436 +       unsigned long i, isolation_error_count;
140437 +       bool drain_allow;
140438         LIST_HEAD(cma_page_list);
140439         long ret = nr_pages;
140440 +       struct page *prev_head, *head;
140441         struct migration_target_control mtc = {
140442                 .nid = NUMA_NO_NODE,
140443                 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN,
140444         };
140446  check_again:
140447 -       for (i = 0; i < nr_pages;) {
140449 -               struct page *head = compound_head(pages[i]);
140451 -               /*
140452 -                * gup may start from a tail page. Advance step by the left
140453 -                * part.
140454 -                */
140455 -               step = compound_nr(head) - (pages[i] - head);
140456 +       prev_head = NULL;
140457 +       isolation_error_count = 0;
140458 +       drain_allow = true;
140459 +       for (i = 0; i < nr_pages; i++) {
140460 +               head = compound_head(pages[i]);
140461 +               if (head == prev_head)
140462 +                       continue;
140463 +               prev_head = head;
140464                 /*
140465                  * If we get a page from the CMA zone, since we are going to
140466                  * be pinning these entries, we might as well move them out
140467                  * of the CMA zone if possible.
140468                  */
140469                 if (is_migrate_cma_page(head)) {
140470 -                       if (PageHuge(head))
140471 -                               isolate_huge_page(head, &cma_page_list);
140472 -                       else {
140473 +                       if (PageHuge(head)) {
140474 +                               if (!isolate_huge_page(head, &cma_page_list))
140475 +                                       isolation_error_count++;
140476 +                       } else {
140477                                 if (!PageLRU(head) && drain_allow) {
140478                                         lru_add_drain_all();
140479                                         drain_allow = false;
140480                                 }
140482 -                               if (!isolate_lru_page(head)) {
140483 -                                       list_add_tail(&head->lru, &cma_page_list);
140484 -                                       mod_node_page_state(page_pgdat(head),
140485 -                                                           NR_ISOLATED_ANON +
140486 -                                                           page_is_file_lru(head),
140487 -                                                           thp_nr_pages(head));
140488 +                               if (isolate_lru_page(head)) {
140489 +                                       isolation_error_count++;
140490 +                                       continue;
140491                                 }
140492 +                               list_add_tail(&head->lru, &cma_page_list);
140493 +                               mod_node_page_state(page_pgdat(head),
140494 +                                                   NR_ISOLATED_ANON +
140495 +                                                   page_is_file_lru(head),
140496 +                                                   thp_nr_pages(head));
140497                         }
140498                 }
140500 -               i += step;
140501         }
140503 +       /*
140504 +        * If list is empty, and no isolation errors, means that all pages are
140505 +        * in the correct zone.
140506 +        */
140507 +       if (list_empty(&cma_page_list) && !isolation_error_count)
140508 +               return ret;
140510         if (!list_empty(&cma_page_list)) {
140511                 /*
140512                  * drop the above get_user_pages reference.
140513 @@ -1609,34 +1615,28 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
140514                         for (i = 0; i < nr_pages; i++)
140515                                 put_page(pages[i]);
140517 -               if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,
140518 -                       (unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
140519 -                       /*
140520 -                        * some of the pages failed migration. Do get_user_pages
140521 -                        * without migration.
140522 -                        */
140523 -                       migrate_allow = false;
140525 +               ret = migrate_pages(&cma_page_list, alloc_migration_target,
140526 +                                   NULL, (unsigned long)&mtc, MIGRATE_SYNC,
140527 +                                   MR_CONTIG_RANGE);
140528 +               if (ret) {
140529                         if (!list_empty(&cma_page_list))
140530                                 putback_movable_pages(&cma_page_list);
140531 +                       return ret > 0 ? -ENOMEM : ret;
140532                 }
140533 -               /*
140534 -                * We did migrate all the pages, Try to get the page references
140535 -                * again migrating any new CMA pages which we failed to isolate
140536 -                * earlier.
140537 -                */
140538 -               ret = __get_user_pages_locked(mm, start, nr_pages,
140539 -                                                  pages, vmas, NULL,
140540 -                                                  gup_flags);
140542 -               if ((ret > 0) && migrate_allow) {
140543 -                       nr_pages = ret;
140544 -                       drain_allow = true;
140545 -                       goto check_again;
140546 -               }
140548 +               /* We unpinned pages before migration, pin them again */
140549 +               ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
140550 +                                             NULL, gup_flags);
140551 +               if (ret <= 0)
140552 +                       return ret;
140553 +               nr_pages = ret;
140554         }
140556 -       return ret;
140557 +       /*
140558 +        * check again because pages were unpinned, and we also might have
140559 +        * had isolation errors and need more pages to migrate.
140560 +        */
140561 +       goto check_again;
140563  #else
140564  static long check_and_migrate_cma_pages(struct mm_struct *mm,
140565 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
140566 index ae907a9c2050..2cf46270c84b 100644
140567 --- a/mm/huge_memory.c
140568 +++ b/mm/huge_memory.c
140569 @@ -637,7 +637,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
140570                 entry = mk_huge_pmd(page, vma->vm_page_prot);
140571                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
140572                 page_add_new_anon_rmap(page, vma, haddr, true);
140573 -               lru_cache_add_inactive_or_unevictable(page, vma);
140574 +               lru_cache_add_page_vma(page, vma, true);
140575                 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
140576                 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
140577                 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
140578 @@ -2418,7 +2418,8 @@ static void __split_huge_page_tail(struct page *head, int tail,
140579  #ifdef CONFIG_64BIT
140580                          (1L << PG_arch_2) |
140581  #endif
140582 -                        (1L << PG_dirty)));
140583 +                        (1L << PG_dirty) |
140584 +                        LRU_GEN_MASK | LRU_USAGE_MASK));
140586         /* ->mapping in first tail page is compound_mapcount */
140587         VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
140588 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
140589 index a86a58ef132d..96b722af092e 100644
140590 --- a/mm/hugetlb.c
140591 +++ b/mm/hugetlb.c
140592 @@ -743,13 +743,20 @@ void hugetlb_fix_reserve_counts(struct inode *inode)
140594         struct hugepage_subpool *spool = subpool_inode(inode);
140595         long rsv_adjust;
140596 +       bool reserved = false;
140598         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
140599 -       if (rsv_adjust) {
140600 +       if (rsv_adjust > 0) {
140601                 struct hstate *h = hstate_inode(inode);
140603 -               hugetlb_acct_memory(h, 1);
140604 +               if (!hugetlb_acct_memory(h, 1))
140605 +                       reserved = true;
140606 +       } else if (!rsv_adjust) {
140607 +               reserved = true;
140608         }
140610 +       if (!reserved)
140611 +               pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
140615 @@ -3898,6 +3905,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
140616                                  * See Documentation/vm/mmu_notifier.rst
140617                                  */
140618                                 huge_ptep_set_wrprotect(src, addr, src_pte);
140619 +                               entry = huge_pte_wrprotect(entry);
140620                         }
140622                         page_dup_rmap(ptepage, true);
140623 diff --git a/mm/kfence/core.c b/mm/kfence/core.c
140624 index d53c91f881a4..f0be2c5038b5 100644
140625 --- a/mm/kfence/core.c
140626 +++ b/mm/kfence/core.c
140627 @@ -10,6 +10,7 @@
140628  #include <linux/atomic.h>
140629  #include <linux/bug.h>
140630  #include <linux/debugfs.h>
140631 +#include <linux/irq_work.h>
140632  #include <linux/kcsan-checks.h>
140633  #include <linux/kfence.h>
140634  #include <linux/kmemleak.h>
140635 @@ -586,6 +587,17 @@ late_initcall(kfence_debugfs_init);
140637  /* === Allocation Gate Timer ================================================ */
140639 +#ifdef CONFIG_KFENCE_STATIC_KEYS
140640 +/* Wait queue to wake up allocation-gate timer task. */
140641 +static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
140643 +static void wake_up_kfence_timer(struct irq_work *work)
140645 +       wake_up(&allocation_wait);
140647 +static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
140648 +#endif
140651   * Set up delayed work, which will enable and disable the static key. We need to
140652   * use a work queue (rather than a simple timer), since enabling and disabling a
140653 @@ -603,25 +615,13 @@ static void toggle_allocation_gate(struct work_struct *work)
140654         if (!READ_ONCE(kfence_enabled))
140655                 return;
140657 -       /* Enable static key, and await allocation to happen. */
140658         atomic_set(&kfence_allocation_gate, 0);
140659  #ifdef CONFIG_KFENCE_STATIC_KEYS
140660 +       /* Enable static key, and await allocation to happen. */
140661         static_branch_enable(&kfence_allocation_key);
140662 -       /*
140663 -        * Await an allocation. Timeout after 1 second, in case the kernel stops
140664 -        * doing allocations, to avoid stalling this worker task for too long.
140665 -        */
140666 -       {
140667 -               unsigned long end_wait = jiffies + HZ;
140669 -               do {
140670 -                       set_current_state(TASK_UNINTERRUPTIBLE);
140671 -                       if (atomic_read(&kfence_allocation_gate) != 0)
140672 -                               break;
140673 -                       schedule_timeout(1);
140674 -               } while (time_before(jiffies, end_wait));
140675 -               __set_current_state(TASK_RUNNING);
140676 -       }
140678 +       wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate), HZ);
140680         /* Disable static key and reset timer. */
140681         static_branch_disable(&kfence_allocation_key);
140682  #endif
140683 @@ -728,6 +728,19 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
140684          */
140685         if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1)
140686                 return NULL;
140687 +#ifdef CONFIG_KFENCE_STATIC_KEYS
140688 +       /*
140689 +        * waitqueue_active() is fully ordered after the update of
140690 +        * kfence_allocation_gate per atomic_inc_return().
140691 +        */
140692 +       if (waitqueue_active(&allocation_wait)) {
140693 +               /*
140694 +                * Calling wake_up() here may deadlock when allocations happen
140695 +                * from within timer code. Use an irq_work to defer it.
140696 +                */
140697 +               irq_work_queue(&wake_up_kfence_timer_work);
140698 +       }
140699 +#endif
140701         if (!READ_ONCE(kfence_enabled))
140702                 return NULL;
140703 diff --git a/mm/khugepaged.c b/mm/khugepaged.c
140704 index a7d6cb912b05..fd14b1e3c6f1 100644
140705 --- a/mm/khugepaged.c
140706 +++ b/mm/khugepaged.c
140707 @@ -716,17 +716,17 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
140708                 if (pte_write(pteval))
140709                         writable = true;
140710         }
140711 -       if (likely(writable)) {
140712 -               if (likely(referenced)) {
140713 -                       result = SCAN_SUCCEED;
140714 -                       trace_mm_collapse_huge_page_isolate(page, none_or_zero,
140715 -                                                           referenced, writable, result);
140716 -                       return 1;
140717 -               }
140718 -       } else {
140720 +       if (unlikely(!writable)) {
140721                 result = SCAN_PAGE_RO;
140722 +       } else if (unlikely(!referenced)) {
140723 +               result = SCAN_LACK_REFERENCED_PAGE;
140724 +       } else {
140725 +               result = SCAN_SUCCEED;
140726 +               trace_mm_collapse_huge_page_isolate(page, none_or_zero,
140727 +                                                   referenced, writable, result);
140728 +               return 1;
140729         }
140731  out:
140732         release_pte_pages(pte, _pte, compound_pagelist);
140733         trace_mm_collapse_huge_page_isolate(page, none_or_zero,
140734 @@ -1199,7 +1199,7 @@ static void collapse_huge_page(struct mm_struct *mm,
140735         spin_lock(pmd_ptl);
140736         BUG_ON(!pmd_none(*pmd));
140737         page_add_new_anon_rmap(new_page, vma, address, true);
140738 -       lru_cache_add_inactive_or_unevictable(new_page, vma);
140739 +       lru_cache_add_page_vma(new_page, vma, true);
140740         pgtable_trans_huge_deposit(mm, pmd, pgtable);
140741         set_pmd_at(mm, address, pmd, _pmd);
140742         update_mmu_cache_pmd(vma, address, pmd);
140743 diff --git a/mm/ksm.c b/mm/ksm.c
140744 index 9694ee2c71de..b32391ccf6d5 100644
140745 --- a/mm/ksm.c
140746 +++ b/mm/ksm.c
140747 @@ -794,6 +794,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
140748                 stable_node->rmap_hlist_len--;
140750                 put_anon_vma(rmap_item->anon_vma);
140751 +               rmap_item->head = NULL;
140752                 rmap_item->address &= PAGE_MASK;
140754         } else if (rmap_item->address & UNSTABLE_FLAG) {
140755 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
140756 index e064ac0d850a..594f99eba9c0 100644
140757 --- a/mm/memcontrol.c
140758 +++ b/mm/memcontrol.c
140759 @@ -3181,9 +3181,17 @@ static void drain_obj_stock(struct memcg_stock_pcp *stock)
140760                 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
140762                 if (nr_pages) {
140763 +                       struct mem_cgroup *memcg;
140765                         rcu_read_lock();
140766 -                       __memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages);
140767 +retry:
140768 +                       memcg = obj_cgroup_memcg(old);
140769 +                       if (unlikely(!css_tryget(&memcg->css)))
140770 +                               goto retry;
140771                         rcu_read_unlock();
140773 +                       __memcg_kmem_uncharge(memcg, nr_pages);
140774 +                       css_put(&memcg->css);
140775                 }
140777                 /*
140778 @@ -5206,6 +5214,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
140779                 free_mem_cgroup_per_node_info(memcg, node);
140780         free_percpu(memcg->vmstats_percpu);
140781         free_percpu(memcg->vmstats_local);
140782 +       lru_gen_free_mm_list(memcg);
140783         kfree(memcg);
140786 @@ -5258,6 +5267,9 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
140787                 if (alloc_mem_cgroup_per_node_info(memcg, node))
140788                         goto fail;
140790 +       if (lru_gen_alloc_mm_list(memcg))
140791 +               goto fail;
140793         if (memcg_wb_domain_init(memcg, GFP_KERNEL))
140794                 goto fail;
140796 @@ -6162,6 +6174,29 @@ static void mem_cgroup_move_task(void)
140798  #endif
140800 +#ifdef CONFIG_LRU_GEN
140801 +static void mem_cgroup_attach(struct cgroup_taskset *tset)
140803 +       struct cgroup_subsys_state *css;
140804 +       struct task_struct *task = NULL;
140806 +       cgroup_taskset_for_each_leader(task, css, tset)
140807 +               ;
140809 +       if (!task)
140810 +               return;
140812 +       task_lock(task);
140813 +       if (task->mm && task->mm->owner == task)
140814 +               lru_gen_migrate_mm(task->mm);
140815 +       task_unlock(task);
140817 +#else
140818 +static void mem_cgroup_attach(struct cgroup_taskset *tset)
140821 +#endif
140823  static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
140825         if (value == PAGE_COUNTER_MAX)
140826 @@ -6502,6 +6537,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
140827         .css_free = mem_cgroup_css_free,
140828         .css_reset = mem_cgroup_css_reset,
140829         .can_attach = mem_cgroup_can_attach,
140830 +       .attach = mem_cgroup_attach,
140831         .cancel_attach = mem_cgroup_cancel_attach,
140832         .post_attach = mem_cgroup_move_task,
140833         .dfl_cftypes = memory_files,
140834 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
140835 index 24210c9bd843..bd3945446d47 100644
140836 --- a/mm/memory-failure.c
140837 +++ b/mm/memory-failure.c
140838 @@ -1368,7 +1368,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
140839                  * communicated in siginfo, see kill_proc()
140840                  */
140841                 start = (page->index << PAGE_SHIFT) & ~(size - 1);
140842 -               unmap_mapping_range(page->mapping, start, start + size, 0);
140843 +               unmap_mapping_range(page->mapping, start, size, 0);
140844         }
140845         kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
140846         rc = 0;
140847 diff --git a/mm/memory.c b/mm/memory.c
140848 index 550405fc3b5e..a1332ba9c0da 100644
140849 --- a/mm/memory.c
140850 +++ b/mm/memory.c
140851 @@ -73,6 +73,7 @@
140852  #include <linux/perf_event.h>
140853  #include <linux/ptrace.h>
140854  #include <linux/vmalloc.h>
140855 +#include <linux/mm_inline.h>
140857  #include <trace/events/kmem.h>
140859 @@ -839,7 +840,7 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
140860         copy_user_highpage(new_page, page, addr, src_vma);
140861         __SetPageUptodate(new_page);
140862         page_add_new_anon_rmap(new_page, dst_vma, addr, false);
140863 -       lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
140864 +       lru_cache_add_page_vma(new_page, dst_vma, false);
140865         rss[mm_counter(new_page)]++;
140867         /* All done, just insert the new page copy in the child */
140868 @@ -1548,6 +1549,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
140869         mmu_notifier_invalidate_range_end(&range);
140870         tlb_finish_mmu(&tlb);
140872 +EXPORT_SYMBOL(zap_page_range);
140874  /**
140875   * zap_page_range_single - remove user pages in a given range
140876 @@ -2907,7 +2909,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
140877                  */
140878                 ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
140879                 page_add_new_anon_rmap(new_page, vma, vmf->address, false);
140880 -               lru_cache_add_inactive_or_unevictable(new_page, vma);
140881 +               lru_cache_add_page_vma(new_page, vma, true);
140882                 /*
140883                  * We call the notify macro here because, when using secondary
140884                  * mmu page tables (such as kvm shadow page tables), we want the
140885 @@ -3438,9 +3440,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
140886         /* ksm created a completely new copy */
140887         if (unlikely(page != swapcache && swapcache)) {
140888                 page_add_new_anon_rmap(page, vma, vmf->address, false);
140889 -               lru_cache_add_inactive_or_unevictable(page, vma);
140890 +               lru_cache_add_page_vma(page, vma, true);
140891         } else {
140892                 do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
140893 +               lru_gen_activation(page, vma);
140894         }
140896         swap_free(entry);
140897 @@ -3584,7 +3587,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
140899         inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
140900         page_add_new_anon_rmap(page, vma, vmf->address, false);
140901 -       lru_cache_add_inactive_or_unevictable(page, vma);
140902 +       lru_cache_add_page_vma(page, vma, true);
140903  setpte:
140904         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
140906 @@ -3709,6 +3712,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
140908         add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
140909         page_add_file_rmap(page, true);
140910 +       lru_gen_activation(page, vma);
140911         /*
140912          * deposit and withdraw with pmd lock held
140913          */
140914 @@ -3752,10 +3756,11 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
140915         if (write && !(vma->vm_flags & VM_SHARED)) {
140916                 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
140917                 page_add_new_anon_rmap(page, vma, addr, false);
140918 -               lru_cache_add_inactive_or_unevictable(page, vma);
140919 +               lru_cache_add_page_vma(page, vma, true);
140920         } else {
140921                 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
140922                 page_add_file_rmap(page, false);
140923 +               lru_gen_activation(page, vma);
140924         }
140925         set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
140927 diff --git a/mm/migrate.c b/mm/migrate.c
140928 index 62b81d5257aa..9a50fd026236 100644
140929 --- a/mm/migrate.c
140930 +++ b/mm/migrate.c
140931 @@ -2973,6 +2973,13 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
140933                         swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
140934                         entry = swp_entry_to_pte(swp_entry);
140935 +               } else {
140936 +                       /*
140937 +                        * For now we only support migrating to un-addressable
140938 +                        * device memory.
140939 +                        */
140940 +                       pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
140941 +                       goto abort;
140942                 }
140943         } else {
140944                 entry = mk_pte(page, vma->vm_page_prot);
140945 @@ -3004,7 +3011,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
140946         inc_mm_counter(mm, MM_ANONPAGES);
140947         page_add_new_anon_rmap(page, vma, addr, false);
140948         if (!is_zone_device_page(page))
140949 -               lru_cache_add_inactive_or_unevictable(page, vma);
140950 +               lru_cache_add_page_vma(page, vma, false);
140951         get_page(page);
140953         if (flush) {
140954 diff --git a/mm/mm_init.c b/mm/mm_init.c
140955 index 8e02e865cc65..6303ed7aa511 100644
140956 --- a/mm/mm_init.c
140957 +++ b/mm/mm_init.c
140958 @@ -71,27 +71,33 @@ void __init mminit_verify_pageflags_layout(void)
140959         width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
140960                 - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH;
140961         mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
140962 -               "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Flags %d\n",
140963 +               "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d lru gen %d tier %d Flags %d\n",
140964                 SECTIONS_WIDTH,
140965                 NODES_WIDTH,
140966                 ZONES_WIDTH,
140967                 LAST_CPUPID_WIDTH,
140968                 KASAN_TAG_WIDTH,
140969 +               LRU_GEN_WIDTH,
140970 +               LRU_USAGE_WIDTH,
140971                 NR_PAGEFLAGS);
140972         mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
140973 -               "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
140974 +               "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d lru gen %d tier %d\n",
140975                 SECTIONS_SHIFT,
140976                 NODES_SHIFT,
140977                 ZONES_SHIFT,
140978                 LAST_CPUPID_SHIFT,
140979 -               KASAN_TAG_WIDTH);
140980 +               KASAN_TAG_WIDTH,
140981 +               LRU_GEN_WIDTH,
140982 +               LRU_USAGE_WIDTH);
140983         mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
140984 -               "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
140985 +               "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu lru gen %lu tier %lu\n",
140986                 (unsigned long)SECTIONS_PGSHIFT,
140987                 (unsigned long)NODES_PGSHIFT,
140988                 (unsigned long)ZONES_PGSHIFT,
140989                 (unsigned long)LAST_CPUPID_PGSHIFT,
140990 -               (unsigned long)KASAN_TAG_PGSHIFT);
140991 +               (unsigned long)KASAN_TAG_PGSHIFT,
140992 +               (unsigned long)LRU_GEN_PGOFF,
140993 +               (unsigned long)LRU_USAGE_PGOFF);
140994         mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
140995                 "Node/Zone ID: %lu -> %lu\n",
140996                 (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
140997 diff --git a/mm/mmzone.c b/mm/mmzone.c
140998 index eb89d6e018e2..2ec0d7793424 100644
140999 --- a/mm/mmzone.c
141000 +++ b/mm/mmzone.c
141001 @@ -81,6 +81,8 @@ void lruvec_init(struct lruvec *lruvec)
141003         for_each_lru(lru)
141004                 INIT_LIST_HEAD(&lruvec->lists[lru]);
141006 +       lru_gen_init_lruvec(lruvec);
141009  #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
141010 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
141011 index cfc72873961d..4bb3cdfc47f8 100644
141012 --- a/mm/page_alloc.c
141013 +++ b/mm/page_alloc.c
141014 @@ -764,32 +764,36 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
141015   */
141016  void init_mem_debugging_and_hardening(void)
141018 +       bool page_poisoning_requested = false;
141020 +#ifdef CONFIG_PAGE_POISONING
141021 +       /*
141022 +        * Page poisoning is debug page alloc for some arches. If
141023 +        * either of those options are enabled, enable poisoning.
141024 +        */
141025 +       if (page_poisoning_enabled() ||
141026 +            (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
141027 +             debug_pagealloc_enabled())) {
141028 +               static_branch_enable(&_page_poisoning_enabled);
141029 +               page_poisoning_requested = true;
141030 +       }
141031 +#endif
141033         if (_init_on_alloc_enabled_early) {
141034 -               if (page_poisoning_enabled())
141035 +               if (page_poisoning_requested)
141036                         pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
141037                                 "will take precedence over init_on_alloc\n");
141038                 else
141039                         static_branch_enable(&init_on_alloc);
141040         }
141041         if (_init_on_free_enabled_early) {
141042 -               if (page_poisoning_enabled())
141043 +               if (page_poisoning_requested)
141044                         pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
141045                                 "will take precedence over init_on_free\n");
141046                 else
141047                         static_branch_enable(&init_on_free);
141048         }
141050 -#ifdef CONFIG_PAGE_POISONING
141051 -       /*
141052 -        * Page poisoning is debug page alloc for some arches. If
141053 -        * either of those options are enabled, enable poisoning.
141054 -        */
141055 -       if (page_poisoning_enabled() ||
141056 -            (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
141057 -             debug_pagealloc_enabled()))
141058 -               static_branch_enable(&_page_poisoning_enabled);
141059 -#endif
141061  #ifdef CONFIG_DEBUG_PAGEALLOC
141062         if (!debug_pagealloc_enabled())
141063                 return;
141064 diff --git a/mm/rmap.c b/mm/rmap.c
141065 index b0fc27e77d6d..d600b282ced5 100644
141066 --- a/mm/rmap.c
141067 +++ b/mm/rmap.c
141068 @@ -72,6 +72,7 @@
141069  #include <linux/page_idle.h>
141070  #include <linux/memremap.h>
141071  #include <linux/userfaultfd_k.h>
141072 +#include <linux/mm_inline.h>
141074  #include <asm/tlbflush.h>
141076 @@ -792,6 +793,11 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
141077                 }
141079                 if (pvmw.pte) {
141080 +                       /* the multigenerational lru exploits the spatial locality */
141081 +                       if (lru_gen_enabled() && pte_young(*pvmw.pte)) {
141082 +                               lru_gen_scan_around(&pvmw);
141083 +                               referenced++;
141084 +                       }
141085                         if (ptep_clear_flush_young_notify(vma, address,
141086                                                 pvmw.pte)) {
141087                                 /*
141088 diff --git a/mm/shmem.c b/mm/shmem.c
141089 index b2db4ed0fbc7..9dd24a2f0b7a 100644
141090 --- a/mm/shmem.c
141091 +++ b/mm/shmem.c
141092 @@ -2258,25 +2258,11 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
141093  static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
141095         struct shmem_inode_info *info = SHMEM_I(file_inode(file));
141096 +       int ret;
141098 -       if (info->seals & F_SEAL_FUTURE_WRITE) {
141099 -               /*
141100 -                * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
141101 -                * "future write" seal active.
141102 -                */
141103 -               if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
141104 -                       return -EPERM;
141106 -               /*
141107 -                * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
141108 -                * MAP_SHARED and read-only, take care to not allow mprotect to
141109 -                * revert protections on such mappings. Do this only for shared
141110 -                * mappings. For private mappings, don't need to mask
141111 -                * VM_MAYWRITE as we still want them to be COW-writable.
141112 -                */
141113 -               if (vma->vm_flags & VM_SHARED)
141114 -                       vma->vm_flags &= ~(VM_MAYWRITE);
141115 -       }
141116 +       ret = seal_check_future_write(info->seals, vma);
141117 +       if (ret)
141118 +               return ret;
141120         /* arm64 - allow memory tagging on RAM-based files */
141121         vma->vm_flags |= VM_MTE_ALLOWED;
141122 @@ -2375,8 +2361,18 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
141123         pgoff_t offset, max_off;
141125         ret = -ENOMEM;
141126 -       if (!shmem_inode_acct_block(inode, 1))
141127 +       if (!shmem_inode_acct_block(inode, 1)) {
141128 +               /*
141129 +                * We may have got a page, returned -ENOENT triggering a retry,
141130 +                * and now we find ourselves with -ENOMEM. Release the page, to
141131 +                * avoid a BUG_ON in our caller.
141132 +                */
141133 +               if (unlikely(*pagep)) {
141134 +                       put_page(*pagep);
141135 +                       *pagep = NULL;
141136 +               }
141137                 goto out;
141138 +       }
141140         if (!*pagep) {
141141                 page = shmem_alloc_page(gfp, info, pgoff);
141142 @@ -4233,6 +4229,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
141144         return 0;
141146 +EXPORT_SYMBOL_GPL(shmem_zero_setup);
141148  /**
141149   * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
141150 diff --git a/mm/sparse.c b/mm/sparse.c
141151 index 7bd23f9d6cef..33406ea2ecc4 100644
141152 --- a/mm/sparse.c
141153 +++ b/mm/sparse.c
141154 @@ -547,6 +547,7 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
141155                         pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
141156                                __func__, nid);
141157                         pnum_begin = pnum;
141158 +                       sparse_buffer_fini();
141159                         goto failed;
141160                 }
141161                 check_usemap_section_nr(nid, usage);
141162 diff --git a/mm/swap.c b/mm/swap.c
141163 index 31b844d4ed94..d6458ee1e9f8 100644
141164 --- a/mm/swap.c
141165 +++ b/mm/swap.c
141166 @@ -306,7 +306,7 @@ void lru_note_cost_page(struct page *page)
141168  static void __activate_page(struct page *page, struct lruvec *lruvec)
141170 -       if (!PageActive(page) && !PageUnevictable(page)) {
141171 +       if (!PageUnevictable(page) && !page_is_active(page, lruvec)) {
141172                 int nr_pages = thp_nr_pages(page);
141174                 del_page_from_lru_list(page, lruvec);
141175 @@ -334,10 +334,10 @@ static bool need_activate_page_drain(int cpu)
141176         return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
141179 -static void activate_page(struct page *page)
141180 +static void activate_page_on_lru(struct page *page)
141182         page = compound_head(page);
141183 -       if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
141184 +       if (PageLRU(page) && !PageUnevictable(page) && !page_is_active(page, NULL)) {
141185                 struct pagevec *pvec;
141187                 local_lock(&lru_pvecs.lock);
141188 @@ -354,7 +354,7 @@ static inline void activate_page_drain(int cpu)
141192 -static void activate_page(struct page *page)
141193 +static void activate_page_on_lru(struct page *page)
141195         struct lruvec *lruvec;
141197 @@ -368,11 +368,22 @@ static void activate_page(struct page *page)
141199  #endif
141201 -static void __lru_cache_activate_page(struct page *page)
141203 + * If the page is on the LRU, queue it for activation via
141204 + * lru_pvecs.activate_page. Otherwise, assume the page is on a
141205 + * pagevec, mark it active and it'll be moved to the active
141206 + * LRU on the next drain.
141207 + */
141208 +void activate_page(struct page *page)
141210         struct pagevec *pvec;
141211         int i;
141213 +       if (PageLRU(page)) {
141214 +               activate_page_on_lru(page);
141215 +               return;
141216 +       }
141218         local_lock(&lru_pvecs.lock);
141219         pvec = this_cpu_ptr(&lru_pvecs.lru_add);
141221 @@ -420,17 +431,8 @@ void mark_page_accessed(struct page *page)
141222                  * this list is never rotated or maintained, so marking an
141223                  * evictable page accessed has no effect.
141224                  */
141225 -       } else if (!PageActive(page)) {
141226 -               /*
141227 -                * If the page is on the LRU, queue it for activation via
141228 -                * lru_pvecs.activate_page. Otherwise, assume the page is on a
141229 -                * pagevec, mark it active and it'll be moved to the active
141230 -                * LRU on the next drain.
141231 -                */
141232 -               if (PageLRU(page))
141233 -                       activate_page(page);
141234 -               else
141235 -                       __lru_cache_activate_page(page);
141236 +       } else if (!page_inc_usage(page)) {
141237 +               activate_page(page);
141238                 ClearPageReferenced(page);
141239                 workingset_activation(page);
141240         }
141241 @@ -465,15 +467,14 @@ void lru_cache_add(struct page *page)
141242  EXPORT_SYMBOL(lru_cache_add);
141244  /**
141245 - * lru_cache_add_inactive_or_unevictable
141246 + * lru_cache_add_page_vma
141247   * @page:  the page to be added to LRU
141248   * @vma:   vma in which page is mapped for determining reclaimability
141249   *
141250 - * Place @page on the inactive or unevictable LRU list, depending on its
141251 - * evictability.
141252 + * Place @page on an LRU list, depending on its evictability.
141253   */
141254 -void lru_cache_add_inactive_or_unevictable(struct page *page,
141255 -                                        struct vm_area_struct *vma)
141256 +void lru_cache_add_page_vma(struct page *page, struct vm_area_struct *vma,
141257 +                           bool faulting)
141259         bool unevictable;
141261 @@ -490,6 +491,11 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
141262                 __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
141263                 count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
141264         }
141266 +       /* tell the multigenerational lru that the page is being faulted in */
141267 +       if (lru_gen_enabled() && !unevictable && faulting)
141268 +               SetPageActive(page);
141270         lru_cache_add(page);
141273 @@ -516,7 +522,7 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
141274   */
141275  static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
141277 -       bool active = PageActive(page);
141278 +       bool active = page_is_active(page, lruvec);
141279         int nr_pages = thp_nr_pages(page);
141281         if (PageUnevictable(page))
141282 @@ -556,7 +562,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
141284  static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
141286 -       if (PageActive(page) && !PageUnevictable(page)) {
141287 +       if (!PageUnevictable(page) && page_is_active(page, lruvec)) {
141288                 int nr_pages = thp_nr_pages(page);
141290                 del_page_from_lru_list(page, lruvec);
141291 @@ -670,7 +676,7 @@ void deactivate_file_page(struct page *page)
141292   */
141293  void deactivate_page(struct page *page)
141295 -       if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
141296 +       if (PageLRU(page) && !PageUnevictable(page) && page_is_active(page, NULL)) {
141297                 struct pagevec *pvec;
141299                 local_lock(&lru_pvecs.lock);
141300 diff --git a/mm/swapfile.c b/mm/swapfile.c
141301 index 084a5b9a18e5..ab3b5ca404fd 100644
141302 --- a/mm/swapfile.c
141303 +++ b/mm/swapfile.c
141304 @@ -1936,7 +1936,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
141305                 page_add_anon_rmap(page, vma, addr, false);
141306         } else { /* ksm created a completely new copy */
141307                 page_add_new_anon_rmap(page, vma, addr, false);
141308 -               lru_cache_add_inactive_or_unevictable(page, vma);
141309 +               lru_cache_add_page_vma(page, vma, false);
141310         }
141311         swap_free(entry);
141312  out:
141313 @@ -2702,6 +2702,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
141314         err = 0;
141315         atomic_inc(&proc_poll_event);
141316         wake_up_interruptible(&proc_poll_wait);
141317 +       /* stop tracking anon if the multigenerational lru is enabled */
141318 +       lru_gen_set_state(false, false, true);
141320  out_dput:
141321         filp_close(victim, NULL);
141322 @@ -3348,6 +3350,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
141323         mutex_unlock(&swapon_mutex);
141324         atomic_inc(&proc_poll_event);
141325         wake_up_interruptible(&proc_poll_wait);
141326 +       /* start tracking anon if the multigenerational lru is enabled */
141327 +       lru_gen_set_state(true, false, true);
141329         error = 0;
141330         goto out;
141331 diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
141332 index 9a3d451402d7..e1d4cd3103b8 100644
141333 --- a/mm/userfaultfd.c
141334 +++ b/mm/userfaultfd.c
141335 @@ -123,7 +123,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
141337         inc_mm_counter(dst_mm, MM_ANONPAGES);
141338         page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
141339 -       lru_cache_add_inactive_or_unevictable(page, dst_vma);
141340 +       lru_cache_add_page_vma(page, dst_vma, true);
141342         set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
141344 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
141345 index 4f5f8c907897..64ab133ee816 100644
141346 --- a/mm/vmalloc.c
141347 +++ b/mm/vmalloc.c
141348 @@ -316,6 +316,7 @@ int map_kernel_range_noflush(unsigned long addr, unsigned long size,
141350         return 0;
141352 +EXPORT_SYMBOL(map_kernel_range_noflush);
141354  int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
141355                 struct page **pages)
141356 @@ -2131,6 +2132,7 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
141357                                   NUMA_NO_NODE, GFP_KERNEL,
141358                                   __builtin_return_address(0));
141360 +EXPORT_SYMBOL(get_vm_area);
141362  struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
141363                                 const void *caller)
141364 diff --git a/mm/vmscan.c b/mm/vmscan.c
141365 index 562e87cbd7a1..4a34cc622681 100644
141366 --- a/mm/vmscan.c
141367 +++ b/mm/vmscan.c
141368 @@ -49,6 +49,11 @@
141369  #include <linux/printk.h>
141370  #include <linux/dax.h>
141371  #include <linux/psi.h>
141372 +#include <linux/memory.h>
141373 +#include <linux/pagewalk.h>
141374 +#include <linux/shmem_fs.h>
141375 +#include <linux/ctype.h>
141376 +#include <linux/debugfs.h>
141378  #include <asm/tlbflush.h>
141379  #include <asm/div64.h>
141380 @@ -118,6 +123,19 @@ struct scan_control {
141381         /* The file pages on the current node are dangerously low */
141382         unsigned int file_is_tiny:1;
141384 +       /*
141385 +        * The clean file pages on the current node won't be reclaimed when
141386 +        * their amount is below vm.clean_low_kbytes *unless* we threaten
141387 +        * to OOM or have no free swap space or vm.swappiness=0.
141388 +        */
141389 +       unsigned int clean_below_low:1;
141391 +       /*
141392 +        * The clean file pages on the current node won't be reclaimed when
141393 +        * their amount is below vm.clean_min_kbytes.
141394 +        */
141395 +       unsigned int clean_below_min:1;
141397         /* Allocation order */
141398         s8 order;
141400 @@ -164,10 +182,21 @@ struct scan_control {
141401  #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
141402  #endif
141404 +#if CONFIG_CLEAN_LOW_KBYTES < 0
141405 +#error "CONFIG_CLEAN_LOW_KBYTES must be >= 0"
141406 +#endif
141408 +#if CONFIG_CLEAN_MIN_KBYTES < 0
141409 +#error "CONFIG_CLEAN_MIN_KBYTES must be >= 0"
141410 +#endif
141412 +unsigned long sysctl_clean_low_kbytes __read_mostly = CONFIG_CLEAN_LOW_KBYTES;
141413 +unsigned long sysctl_clean_min_kbytes __read_mostly = CONFIG_CLEAN_MIN_KBYTES;
141416   * From 0 .. 200.  Higher means more swappy.
141417   */
141418 -int vm_swappiness = 60;
141419 +int vm_swappiness = 30;
141421  static void set_task_reclaim_state(struct task_struct *task,
141422                                    struct reclaim_state *rs)
141423 @@ -897,9 +926,11 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
141425         if (PageSwapCache(page)) {
141426                 swp_entry_t swap = { .val = page_private(page) };
141427 -               mem_cgroup_swapout(page, swap);
141429 +               /* get a shadow entry before page_memcg() is cleared */
141430                 if (reclaimed && !mapping_exiting(mapping))
141431                         shadow = workingset_eviction(page, target_memcg);
141432 +               mem_cgroup_swapout(page, swap);
141433                 __delete_from_swap_cache(page, swap, shadow);
141434                 xa_unlock_irqrestore(&mapping->i_pages, flags);
141435                 put_swap_page(page, swap);
141436 @@ -1110,6 +1141,10 @@ static unsigned int shrink_page_list(struct list_head *page_list,
141437                 if (!sc->may_unmap && page_mapped(page))
141438                         goto keep_locked;
141440 +               /* in case the page was found accessed by lru_gen_scan_around() */
141441 +               if (lru_gen_enabled() && !ignore_references && PageReferenced(page))
141442 +                       goto keep_locked;
141444                 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
141445                         (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
141447 @@ -2224,6 +2259,135 @@ enum scan_balance {
141448         SCAN_FILE,
141451 +static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
141453 +       unsigned long file;
141454 +       struct lruvec *target_lruvec;
141456 +       /* the multigenerational lru doesn't use these counters */
141457 +       if (lru_gen_enabled())
141458 +               return;
141460 +       target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
141462 +       /*
141463 +        * Determine the scan balance between anon and file LRUs.
141464 +        */
141465 +       spin_lock_irq(&target_lruvec->lru_lock);
141466 +       sc->anon_cost = target_lruvec->anon_cost;
141467 +       sc->file_cost = target_lruvec->file_cost;
141468 +       spin_unlock_irq(&target_lruvec->lru_lock);
141470 +       /*
141471 +        * Target desirable inactive:active list ratios for the anon
141472 +        * and file LRU lists.
141473 +        */
141474 +       if (!sc->force_deactivate) {
141475 +               unsigned long refaults;
141477 +               refaults = lruvec_page_state(target_lruvec,
141478 +                               WORKINGSET_ACTIVATE_ANON);
141479 +               if (refaults != target_lruvec->refaults[0] ||
141480 +                       inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
141481 +                       sc->may_deactivate |= DEACTIVATE_ANON;
141482 +               else
141483 +                       sc->may_deactivate &= ~DEACTIVATE_ANON;
141485 +               /*
141486 +                * When refaults are being observed, it means a new
141487 +                * workingset is being established. Deactivate to get
141488 +                * rid of any stale active pages quickly.
141489 +                */
141490 +               refaults = lruvec_page_state(target_lruvec,
141491 +                               WORKINGSET_ACTIVATE_FILE);
141492 +               if (refaults != target_lruvec->refaults[1] ||
141493 +                   inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
141494 +                       sc->may_deactivate |= DEACTIVATE_FILE;
141495 +               else
141496 +                       sc->may_deactivate &= ~DEACTIVATE_FILE;
141497 +       } else
141498 +               sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
141500 +       /*
141501 +        * If we have plenty of inactive file pages that aren't
141502 +        * thrashing, try to reclaim those first before touching
141503 +        * anonymous pages.
141504 +        */
141505 +       file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
141506 +       if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
141507 +               sc->cache_trim_mode = 1;
141508 +       else
141509 +               sc->cache_trim_mode = 0;
141511 +       /*
141512 +        * Prevent the reclaimer from falling into the cache trap: as
141513 +        * cache pages start out inactive, every cache fault will tip
141514 +        * the scan balance towards the file LRU.  And as the file LRU
141515 +        * shrinks, so does the window for rotation from references.
141516 +        * This means we have a runaway feedback loop where a tiny
141517 +        * thrashing file LRU becomes infinitely more attractive than
141518 +        * anon pages.  Try to detect this based on file LRU size.
141519 +        */
141520 +       if (!cgroup_reclaim(sc)) {
141521 +               unsigned long total_high_wmark = 0;
141522 +               unsigned long free, anon;
141523 +               int z;
141525 +               free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
141526 +               file = node_page_state(pgdat, NR_ACTIVE_FILE) +
141527 +                          node_page_state(pgdat, NR_INACTIVE_FILE);
141529 +               for (z = 0; z < MAX_NR_ZONES; z++) {
141530 +                       struct zone *zone = &pgdat->node_zones[z];
141532 +                       if (!managed_zone(zone))
141533 +                               continue;
141535 +                       total_high_wmark += high_wmark_pages(zone);
141536 +               }
141538 +               /*
141539 +                * Consider anon: if that's low too, this isn't a
141540 +                * runaway file reclaim problem, but rather just
141541 +                * extreme pressure. Reclaim as per usual then.
141542 +                */
141543 +               anon = node_page_state(pgdat, NR_INACTIVE_ANON);
141545 +               sc->file_is_tiny =
141546 +                       file + free <= total_high_wmark &&
141547 +                       !(sc->may_deactivate & DEACTIVATE_ANON) &&
141548 +                       anon >> sc->priority;
141550 +               /*
141551 +               * Check the number of clean file pages to protect them from
141552 +               * reclaiming if their amount is below the specified.
141553 +               */
141554 +               if (sysctl_clean_low_kbytes || sysctl_clean_min_kbytes) {
141555 +                       unsigned long reclaimable_file, dirty, clean;
141557 +                       reclaimable_file =
141558 +                               node_page_state(pgdat, NR_ACTIVE_FILE) +
141559 +                               node_page_state(pgdat, NR_INACTIVE_FILE) +
141560 +                               node_page_state(pgdat, NR_ISOLATED_FILE);
141561 +                       dirty = node_page_state(pgdat, NR_FILE_DIRTY);
141562 +                       /*
141563 +                       * node_page_state() sum can go out of sync since
141564 +                       * all the values are not read at once.
141565 +                       */
141566 +                       if (likely(reclaimable_file > dirty))
141567 +                               clean = (reclaimable_file - dirty) << (PAGE_SHIFT - 10);
141568 +                       else
141569 +                               clean = 0;
141571 +                       sc->clean_below_low = clean < sysctl_clean_low_kbytes;
141572 +                       sc->clean_below_min = clean < sysctl_clean_min_kbytes;
141573 +               } else {
141574 +                       sc->clean_below_low = false;
141575 +                       sc->clean_below_min = false;
141576 +               }
141577 +       }
141581   * Determine how aggressively the anon and file LRU lists should be
141582   * scanned.  The relative value of each set of LRU lists is determined
141583 @@ -2281,6 +2445,16 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
141584                 goto out;
141585         }
141587 +       /*
141588 +        * Force-scan anon if clean file pages is under vm.clean_min_kbytes
141589 +        * or vm.clean_low_kbytes (unless the swappiness setting
141590 +        * disagrees with swapping).
141591 +        */
141592 +       if ((sc->clean_below_low || sc->clean_below_min) && swappiness) {
141593 +               scan_balance = SCAN_ANON;
141594 +               goto out;
141595 +       }
141597         /*
141598          * If there is enough inactive page cache, we do not reclaim
141599          * anything from the anonymous working right now.
141600 @@ -2417,10 +2591,30 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
141601                         BUG();
141602                 }
141604 +               /*
141605 +                * Don't reclaim clean file pages when their amount is below
141606 +                * vm.clean_min_kbytes.
141607 +                */
141608 +               if (file && sc->clean_below_min)
141609 +                       scan = 0;
141611                 nr[lru] = scan;
141612         }
141615 +#ifdef CONFIG_LRU_GEN
141616 +static void age_lru_gens(struct pglist_data *pgdat, struct scan_control *sc);
141617 +static void shrink_lru_gens(struct lruvec *lruvec, struct scan_control *sc);
141618 +#else
141619 +static void age_lru_gens(struct pglist_data *pgdat, struct scan_control *sc)
141623 +static void shrink_lru_gens(struct lruvec *lruvec, struct scan_control *sc)
141626 +#endif
141628  static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
141630         unsigned long nr[NR_LRU_LISTS];
141631 @@ -2432,6 +2626,11 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
141632         struct blk_plug plug;
141633         bool scan_adjusted;
141635 +       if (lru_gen_enabled()) {
141636 +               shrink_lru_gens(lruvec, sc);
141637 +               return;
141638 +       }
141640         get_scan_count(lruvec, sc, nr);
141642         /* Record the original scan target for proportional adjustments later */
141643 @@ -2669,7 +2868,6 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
141644         unsigned long nr_reclaimed, nr_scanned;
141645         struct lruvec *target_lruvec;
141646         bool reclaimable = false;
141647 -       unsigned long file;
141649         target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
141651 @@ -2679,93 +2877,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
141652         nr_reclaimed = sc->nr_reclaimed;
141653         nr_scanned = sc->nr_scanned;
141655 -       /*
141656 -        * Determine the scan balance between anon and file LRUs.
141657 -        */
141658 -       spin_lock_irq(&target_lruvec->lru_lock);
141659 -       sc->anon_cost = target_lruvec->anon_cost;
141660 -       sc->file_cost = target_lruvec->file_cost;
141661 -       spin_unlock_irq(&target_lruvec->lru_lock);
141663 -       /*
141664 -        * Target desirable inactive:active list ratios for the anon
141665 -        * and file LRU lists.
141666 -        */
141667 -       if (!sc->force_deactivate) {
141668 -               unsigned long refaults;
141670 -               refaults = lruvec_page_state(target_lruvec,
141671 -                               WORKINGSET_ACTIVATE_ANON);
141672 -               if (refaults != target_lruvec->refaults[0] ||
141673 -                       inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
141674 -                       sc->may_deactivate |= DEACTIVATE_ANON;
141675 -               else
141676 -                       sc->may_deactivate &= ~DEACTIVATE_ANON;
141678 -               /*
141679 -                * When refaults are being observed, it means a new
141680 -                * workingset is being established. Deactivate to get
141681 -                * rid of any stale active pages quickly.
141682 -                */
141683 -               refaults = lruvec_page_state(target_lruvec,
141684 -                               WORKINGSET_ACTIVATE_FILE);
141685 -               if (refaults != target_lruvec->refaults[1] ||
141686 -                   inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
141687 -                       sc->may_deactivate |= DEACTIVATE_FILE;
141688 -               else
141689 -                       sc->may_deactivate &= ~DEACTIVATE_FILE;
141690 -       } else
141691 -               sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
141693 -       /*
141694 -        * If we have plenty of inactive file pages that aren't
141695 -        * thrashing, try to reclaim those first before touching
141696 -        * anonymous pages.
141697 -        */
141698 -       file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
141699 -       if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
141700 -               sc->cache_trim_mode = 1;
141701 -       else
141702 -               sc->cache_trim_mode = 0;
141704 -       /*
141705 -        * Prevent the reclaimer from falling into the cache trap: as
141706 -        * cache pages start out inactive, every cache fault will tip
141707 -        * the scan balance towards the file LRU.  And as the file LRU
141708 -        * shrinks, so does the window for rotation from references.
141709 -        * This means we have a runaway feedback loop where a tiny
141710 -        * thrashing file LRU becomes infinitely more attractive than
141711 -        * anon pages.  Try to detect this based on file LRU size.
141712 -        */
141713 -       if (!cgroup_reclaim(sc)) {
141714 -               unsigned long total_high_wmark = 0;
141715 -               unsigned long free, anon;
141716 -               int z;
141718 -               free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
141719 -               file = node_page_state(pgdat, NR_ACTIVE_FILE) +
141720 -                          node_page_state(pgdat, NR_INACTIVE_FILE);
141722 -               for (z = 0; z < MAX_NR_ZONES; z++) {
141723 -                       struct zone *zone = &pgdat->node_zones[z];
141724 -                       if (!managed_zone(zone))
141725 -                               continue;
141727 -                       total_high_wmark += high_wmark_pages(zone);
141728 -               }
141730 -               /*
141731 -                * Consider anon: if that's low too, this isn't a
141732 -                * runaway file reclaim problem, but rather just
141733 -                * extreme pressure. Reclaim as per usual then.
141734 -                */
141735 -               anon = node_page_state(pgdat, NR_INACTIVE_ANON);
141737 -               sc->file_is_tiny =
141738 -                       file + free <= total_high_wmark &&
141739 -                       !(sc->may_deactivate & DEACTIVATE_ANON) &&
141740 -                       anon >> sc->priority;
141741 -       }
141742 +       prepare_scan_count(pgdat, sc);
141744         shrink_node_memcgs(pgdat, sc);
141746 @@ -2985,6 +3097,10 @@ static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
141747         struct lruvec *target_lruvec;
141748         unsigned long refaults;
141750 +       /* the multigenerational lru doesn't use these counters */
141751 +       if (lru_gen_enabled())
141752 +               return;
141754         target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
141755         refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
141756         target_lruvec->refaults[0] = refaults;
141757 @@ -3359,6 +3475,11 @@ static void age_active_anon(struct pglist_data *pgdat,
141758         struct mem_cgroup *memcg;
141759         struct lruvec *lruvec;
141761 +       if (lru_gen_enabled()) {
141762 +               age_lru_gens(pgdat, sc);
141763 +               return;
141764 +       }
141766         if (!total_swap_pages)
141767                 return;
141769 @@ -4304,3 +4425,2365 @@ void check_move_unevictable_pages(struct pagevec *pvec)
141770         }
141772  EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
141774 +#ifdef CONFIG_LRU_GEN
141777 + * After pages are faulted in, the aging must scan them twice before the
141778 + * eviction can. The first scan clears the accessed bit set during initial
141779 + * faults. And the second scan makes sure they haven't been used since the
141780 + * first.
141781 + */
141782 +#define MIN_NR_GENS    2
141784 +#define MAX_BATCH_SIZE 8192
141786 +/******************************************************************************
141787 + *                          shorthand helpers
141788 + ******************************************************************************/
141790 +#define DEFINE_MAX_SEQ()                                               \
141791 +       unsigned long max_seq = READ_ONCE(lruvec->evictable.max_seq)
141793 +#define DEFINE_MIN_SEQ()                                               \
141794 +       unsigned long min_seq[ANON_AND_FILE] = {                        \
141795 +               READ_ONCE(lruvec->evictable.min_seq[0]),                \
141796 +               READ_ONCE(lruvec->evictable.min_seq[1]),                \
141797 +       }
141799 +#define for_each_type_zone(file, zone)                                 \
141800 +       for ((file) = 0; (file) < ANON_AND_FILE; (file)++)              \
141801 +               for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
141803 +#define for_each_gen_type_zone(gen, file, zone)                                \
141804 +       for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++)                   \
141805 +               for ((file) = 0; (file) < ANON_AND_FILE; (file)++)      \
141806 +                       for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
141808 +static int get_nr_gens(struct lruvec *lruvec, int file)
141810 +       return lruvec->evictable.max_seq - lruvec->evictable.min_seq[file] + 1;
141813 +static int min_nr_gens(unsigned long max_seq, unsigned long *min_seq, int swappiness)
141815 +       return max_seq - max(min_seq[!swappiness], min_seq[1]) + 1;
141818 +static int max_nr_gens(unsigned long max_seq, unsigned long *min_seq, int swappiness)
141820 +       return max_seq - min(min_seq[!swappiness], min_seq[1]) + 1;
141823 +static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
141825 +       lockdep_assert_held(&lruvec->lru_lock);
141827 +       return get_nr_gens(lruvec, 0) >= MIN_NR_GENS &&
141828 +              get_nr_gens(lruvec, 0) <= MAX_NR_GENS &&
141829 +              get_nr_gens(lruvec, 1) >= MIN_NR_GENS &&
141830 +              get_nr_gens(lruvec, 1) <= MAX_NR_GENS;
141833 +/******************************************************************************
141834 + *                          refault feedback loop
141835 + ******************************************************************************/
141838 + * A feedback loop modeled after the PID controller. Currently supports the
141839 + * proportional (P) and the integral (I) terms; the derivative (D) term can be
141840 + * added if necessary. The setpoint (SP) is the desired position; the process
141841 + * variable (PV) is the measured position. The error is the difference between
141842 + * the SP and the PV. A positive error results in a positive control output
141843 + * correction, which, in our case, is to allow eviction.
141845 + * The P term is the current refault rate refaulted/(evicted+activated), which
141846 + * has a weight of 1. The I term is the arithmetic mean of the last N refault
141847 + * rates, weighted by geometric series 1/2, 1/4, ..., 1/(1<<N).
141849 + * Our goal is to make sure upper tiers have similar refault rates as the base
141850 + * tier. That is we try to be fair to all tiers by maintaining similar refault
141851 + * rates across them.
141852 + */
141853 +struct controller_pos {
141854 +       unsigned long refaulted;
141855 +       unsigned long total;
141856 +       int gain;
141859 +static void read_controller_pos(struct controller_pos *pos, struct lruvec *lruvec,
141860 +                               int file, int tier, int gain)
141862 +       struct lrugen *lrugen = &lruvec->evictable;
141863 +       int sid = sid_from_seq_or_gen(lrugen->min_seq[file]);
141865 +       pos->refaulted = lrugen->avg_refaulted[file][tier] +
141866 +                        atomic_long_read(&lrugen->refaulted[sid][file][tier]);
141867 +       pos->total = lrugen->avg_total[file][tier] +
141868 +                    atomic_long_read(&lrugen->evicted[sid][file][tier]);
141869 +       if (tier)
141870 +               pos->total += lrugen->activated[sid][file][tier - 1];
141871 +       pos->gain = gain;
141874 +static void reset_controller_pos(struct lruvec *lruvec, int gen, int file)
141876 +       int tier;
141877 +       int sid = sid_from_seq_or_gen(gen);
141878 +       struct lrugen *lrugen = &lruvec->evictable;
141879 +       bool carryover = gen == lru_gen_from_seq(lrugen->min_seq[file]);
141881 +       if (!carryover && NR_STAT_GENS == 1)
141882 +               return;
141884 +       for (tier = 0; tier < MAX_NR_TIERS; tier++) {
141885 +               if (carryover) {
141886 +                       unsigned long sum;
141888 +                       sum = lrugen->avg_refaulted[file][tier] +
141889 +                             atomic_long_read(&lrugen->refaulted[sid][file][tier]);
141890 +                       WRITE_ONCE(lrugen->avg_refaulted[file][tier], sum >> 1);
141892 +                       sum = lrugen->avg_total[file][tier] +
141893 +                             atomic_long_read(&lrugen->evicted[sid][file][tier]);
141894 +                       if (tier)
141895 +                               sum += lrugen->activated[sid][file][tier - 1];
141896 +                       WRITE_ONCE(lrugen->avg_total[file][tier], sum >> 1);
141898 +                       if (NR_STAT_GENS > 1)
141899 +                               continue;
141900 +               }
141902 +               atomic_long_set(&lrugen->refaulted[sid][file][tier], 0);
141903 +               atomic_long_set(&lrugen->evicted[sid][file][tier], 0);
141904 +               if (tier)
141905 +                       WRITE_ONCE(lrugen->activated[sid][file][tier - 1], 0);
141906 +       }
141909 +static bool positive_ctrl_err(struct controller_pos *sp, struct controller_pos *pv)
141911 +       /*
141912 +        * Allow eviction if the PV has a limited number of refaulted pages or a
141913 +        * lower refault rate than the SP.
141914 +        */
141915 +       return pv->refaulted < SWAP_CLUSTER_MAX ||
141916 +              pv->refaulted * max(sp->total, 1UL) * sp->gain <=
141917 +              sp->refaulted * max(pv->total, 1UL) * pv->gain;
141920 +/******************************************************************************
141921 + *                          mm_struct list
141922 + ******************************************************************************/
141924 +enum {
141925 +       MM_SCHED_ACTIVE,        /* running processes */
141926 +       MM_SCHED_INACTIVE,      /* sleeping processes */
141927 +       MM_LOCK_CONTENTION,     /* lock contentions */
141928 +       MM_VMA_INTERVAL,        /* VMAs within the range of current table */
141929 +       MM_LEAF_OTHER_NODE,     /* entries not from node under reclaim */
141930 +       MM_LEAF_OTHER_MEMCG,    /* entries not from memcg under reclaim */
141931 +       MM_LEAF_OLD,            /* old entries */
141932 +       MM_LEAF_YOUNG,          /* young entries */
141933 +       MM_LEAF_DIRTY,          /* dirty entries */
141934 +       MM_LEAF_HOLE,           /* non-present entries */
141935 +       MM_NONLEAF_OLD,         /* old non-leaf pmd entries */
141936 +       MM_NONLEAF_YOUNG,       /* young non-leaf pmd entries */
141937 +       NR_MM_STATS
141940 +/* mnemonic codes for the stats above */
141941 +#define MM_STAT_CODES          "aicvnmoydhlu"
141943 +struct lru_gen_mm_list {
141944 +       /* the head of a global or per-memcg mm_struct list */
141945 +       struct list_head head;
141946 +       /* protects the list */
141947 +       spinlock_t lock;
141948 +       struct {
141949 +               /* set to max_seq after each round of walk */
141950 +               unsigned long cur_seq;
141951 +               /* the next mm on the list to walk */
141952 +               struct list_head *iter;
141953 +               /* to wait for the last worker to finish */
141954 +               struct wait_queue_head wait;
141955 +               /* the number of concurrent workers */
141956 +               int nr_workers;
141957 +               /* stats for debugging */
141958 +               unsigned long stats[NR_STAT_GENS][NR_MM_STATS];
141959 +       } nodes[0];
141962 +static struct lru_gen_mm_list *global_mm_list;
141964 +static struct lru_gen_mm_list *alloc_mm_list(void)
141966 +       int nid;
141967 +       struct lru_gen_mm_list *mm_list;
141969 +       mm_list = kzalloc(struct_size(mm_list, nodes, nr_node_ids), GFP_KERNEL);
141970 +       if (!mm_list)
141971 +               return NULL;
141973 +       INIT_LIST_HEAD(&mm_list->head);
141974 +       spin_lock_init(&mm_list->lock);
141976 +       for_each_node(nid) {
141977 +               mm_list->nodes[nid].cur_seq = MIN_NR_GENS;
141978 +               mm_list->nodes[nid].iter = &mm_list->head;
141979 +               init_waitqueue_head(&mm_list->nodes[nid].wait);
141980 +       }
141982 +       return mm_list;
141985 +static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
141987 +#ifdef CONFIG_MEMCG
141988 +       if (!mem_cgroup_disabled())
141989 +               return memcg ? memcg->mm_list : root_mem_cgroup->mm_list;
141990 +#endif
141991 +       VM_BUG_ON(memcg);
141993 +       return global_mm_list;
141996 +void lru_gen_init_mm(struct mm_struct *mm)
141998 +       int file;
142000 +       INIT_LIST_HEAD(&mm->lrugen.list);
142001 +#ifdef CONFIG_MEMCG
142002 +       mm->lrugen.memcg = NULL;
142003 +#endif
142004 +#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
142005 +       atomic_set(&mm->lrugen.nr_cpus, 0);
142006 +#endif
142007 +       for (file = 0; file < ANON_AND_FILE; file++)
142008 +               nodes_clear(mm->lrugen.nodes[file]);
142011 +void lru_gen_add_mm(struct mm_struct *mm)
142013 +       struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
142014 +       struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
142016 +       VM_BUG_ON_MM(!list_empty(&mm->lrugen.list), mm);
142017 +#ifdef CONFIG_MEMCG
142018 +       VM_BUG_ON_MM(mm->lrugen.memcg, mm);
142019 +       WRITE_ONCE(mm->lrugen.memcg, memcg);
142020 +#endif
142021 +       spin_lock(&mm_list->lock);
142022 +       list_add_tail(&mm->lrugen.list, &mm_list->head);
142023 +       spin_unlock(&mm_list->lock);
142026 +void lru_gen_del_mm(struct mm_struct *mm)
142028 +       int nid;
142029 +#ifdef CONFIG_MEMCG
142030 +       struct lru_gen_mm_list *mm_list = get_mm_list(mm->lrugen.memcg);
142031 +#else
142032 +       struct lru_gen_mm_list *mm_list = get_mm_list(NULL);
142033 +#endif
142035 +       spin_lock(&mm_list->lock);
142037 +       for_each_node(nid) {
142038 +               if (mm_list->nodes[nid].iter != &mm->lrugen.list)
142039 +                       continue;
142041 +               mm_list->nodes[nid].iter = mm_list->nodes[nid].iter->next;
142042 +               if (mm_list->nodes[nid].iter == &mm_list->head)
142043 +                       WRITE_ONCE(mm_list->nodes[nid].cur_seq,
142044 +                                  mm_list->nodes[nid].cur_seq + 1);
142045 +       }
142047 +       list_del_init(&mm->lrugen.list);
142049 +       spin_unlock(&mm_list->lock);
142051 +#ifdef CONFIG_MEMCG
142052 +       mem_cgroup_put(mm->lrugen.memcg);
142053 +       WRITE_ONCE(mm->lrugen.memcg, NULL);
142054 +#endif
142057 +#ifdef CONFIG_MEMCG
142058 +int lru_gen_alloc_mm_list(struct mem_cgroup *memcg)
142060 +       if (mem_cgroup_disabled())
142061 +               return 0;
142063 +       memcg->mm_list = alloc_mm_list();
142065 +       return memcg->mm_list ? 0 : -ENOMEM;
142068 +void lru_gen_free_mm_list(struct mem_cgroup *memcg)
142070 +       kfree(memcg->mm_list);
142071 +       memcg->mm_list = NULL;
142074 +void lru_gen_migrate_mm(struct mm_struct *mm)
142076 +       struct mem_cgroup *memcg;
142078 +       lockdep_assert_held(&mm->owner->alloc_lock);
142080 +       if (mem_cgroup_disabled())
142081 +               return;
142083 +       rcu_read_lock();
142084 +       memcg = mem_cgroup_from_task(mm->owner);
142085 +       rcu_read_unlock();
142086 +       if (memcg == mm->lrugen.memcg)
142087 +               return;
142089 +       VM_BUG_ON_MM(!mm->lrugen.memcg, mm);
142090 +       VM_BUG_ON_MM(list_empty(&mm->lrugen.list), mm);
142092 +       lru_gen_del_mm(mm);
142093 +       lru_gen_add_mm(mm);
142096 +static bool mm_has_migrated(struct mm_struct *mm, struct mem_cgroup *memcg)
142098 +       return READ_ONCE(mm->lrugen.memcg) != memcg;
142100 +#else
142101 +static bool mm_has_migrated(struct mm_struct *mm, struct mem_cgroup *memcg)
142103 +       return false;
142105 +#endif
142107 +struct mm_walk_args {
142108 +       struct mem_cgroup *memcg;
142109 +       unsigned long max_seq;
142110 +       unsigned long next_addr;
142111 +       unsigned long start_pfn;
142112 +       unsigned long end_pfn;
142113 +       int node_id;
142114 +       int batch_size;
142115 +       int mm_stats[NR_MM_STATS];
142116 +       int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
142117 +       bool should_walk[ANON_AND_FILE];
142118 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG)
142119 +       unsigned long bitmap[BITS_TO_LONGS(PTRS_PER_PMD)];
142120 +#endif
142123 +static void reset_mm_stats(struct lru_gen_mm_list *mm_list, bool last,
142124 +                          struct mm_walk_args *args)
142126 +       int i;
142127 +       int nid = args->node_id;
142128 +       int sid = sid_from_seq_or_gen(args->max_seq);
142130 +       lockdep_assert_held(&mm_list->lock);
142132 +       for (i = 0; i < NR_MM_STATS; i++) {
142133 +               WRITE_ONCE(mm_list->nodes[nid].stats[sid][i],
142134 +                          mm_list->nodes[nid].stats[sid][i] + args->mm_stats[i]);
142135 +               args->mm_stats[i] = 0;
142136 +       }
142138 +       if (!last || NR_STAT_GENS == 1)
142139 +               return;
142141 +       sid = sid_from_seq_or_gen(args->max_seq + 1);
142142 +       for (i = 0; i < NR_MM_STATS; i++)
142143 +               WRITE_ONCE(mm_list->nodes[nid].stats[sid][i], 0);
142146 +static bool should_skip_mm(struct mm_struct *mm, int nid, int swappiness)
142148 +       int file;
142149 +       unsigned long size = 0;
142151 +       if (mm_is_oom_victim(mm))
142152 +               return true;
142154 +       for (file = !swappiness; file < ANON_AND_FILE; file++) {
142155 +               if (lru_gen_mm_is_active(mm) || node_isset(nid, mm->lrugen.nodes[file]))
142156 +                       size += file ? get_mm_counter(mm, MM_FILEPAGES) :
142157 +                                      get_mm_counter(mm, MM_ANONPAGES) +
142158 +                                      get_mm_counter(mm, MM_SHMEMPAGES);
142159 +       }
142161 +       /* leave the legwork to the rmap if mapped pages are too sparse */
142162 +       if (size < max(SWAP_CLUSTER_MAX, mm_pgtables_bytes(mm) / PAGE_SIZE))
142163 +               return true;
142165 +       return !mmget_not_zero(mm);
142168 +/* To support multiple workers that concurrently walk mm_struct list. */
142169 +static bool get_next_mm(struct mm_walk_args *args, int swappiness, struct mm_struct **iter)
142171 +       bool last = true;
142172 +       struct mm_struct *mm = NULL;
142173 +       int nid = args->node_id;
142174 +       struct lru_gen_mm_list *mm_list = get_mm_list(args->memcg);
142176 +       if (*iter)
142177 +               mmput_async(*iter);
142178 +       else if (args->max_seq <= READ_ONCE(mm_list->nodes[nid].cur_seq))
142179 +               return false;
142181 +       spin_lock(&mm_list->lock);
142183 +       VM_BUG_ON(args->max_seq > mm_list->nodes[nid].cur_seq + 1);
142184 +       VM_BUG_ON(*iter && args->max_seq < mm_list->nodes[nid].cur_seq);
142185 +       VM_BUG_ON(*iter && !mm_list->nodes[nid].nr_workers);
142187 +       if (args->max_seq <= mm_list->nodes[nid].cur_seq) {
142188 +               last = *iter;
142189 +               goto done;
142190 +       }
142192 +       if (mm_list->nodes[nid].iter == &mm_list->head) {
142193 +               VM_BUG_ON(*iter || mm_list->nodes[nid].nr_workers);
142194 +               mm_list->nodes[nid].iter = mm_list->nodes[nid].iter->next;
142195 +       }
142197 +       while (!mm && mm_list->nodes[nid].iter != &mm_list->head) {
142198 +               mm = list_entry(mm_list->nodes[nid].iter, struct mm_struct, lrugen.list);
142199 +               mm_list->nodes[nid].iter = mm_list->nodes[nid].iter->next;
142200 +               if (should_skip_mm(mm, nid, swappiness))
142201 +                       mm = NULL;
142203 +               args->mm_stats[mm ? MM_SCHED_ACTIVE : MM_SCHED_INACTIVE]++;
142204 +       }
142206 +       if (mm_list->nodes[nid].iter == &mm_list->head)
142207 +               WRITE_ONCE(mm_list->nodes[nid].cur_seq,
142208 +                          mm_list->nodes[nid].cur_seq + 1);
142209 +done:
142210 +       if (*iter && !mm)
142211 +               mm_list->nodes[nid].nr_workers--;
142212 +       if (!*iter && mm)
142213 +               mm_list->nodes[nid].nr_workers++;
142215 +       last = last && !mm_list->nodes[nid].nr_workers &&
142216 +              mm_list->nodes[nid].iter == &mm_list->head;
142218 +       reset_mm_stats(mm_list, last, args);
142220 +       spin_unlock(&mm_list->lock);
142222 +       *iter = mm;
142224 +       return last;
142227 +/******************************************************************************
142228 + *                          the aging
142229 + ******************************************************************************/
142231 +static void update_batch_size(struct page *page, int old_gen, int new_gen,
142232 +                             struct mm_walk_args *args)
142234 +       int file = page_is_file_lru(page);
142235 +       int zone = page_zonenum(page);
142236 +       int delta = thp_nr_pages(page);
142238 +       VM_BUG_ON(old_gen >= MAX_NR_GENS);
142239 +       VM_BUG_ON(new_gen >= MAX_NR_GENS);
142241 +       args->batch_size++;
142243 +       args->nr_pages[old_gen][file][zone] -= delta;
142244 +       args->nr_pages[new_gen][file][zone] += delta;
142247 +static void reset_batch_size(struct lruvec *lruvec, struct mm_walk_args *args)
142249 +       int gen, file, zone;
142250 +       struct lrugen *lrugen = &lruvec->evictable;
142252 +       args->batch_size = 0;
142254 +       spin_lock_irq(&lruvec->lru_lock);
142256 +       for_each_gen_type_zone(gen, file, zone) {
142257 +               enum lru_list lru = LRU_FILE * file;
142258 +               int total = args->nr_pages[gen][file][zone];
142260 +               if (!total)
142261 +                       continue;
142263 +               args->nr_pages[gen][file][zone] = 0;
142264 +               WRITE_ONCE(lrugen->sizes[gen][file][zone],
142265 +                          lrugen->sizes[gen][file][zone] + total);
142267 +               if (lru_gen_is_active(lruvec, gen))
142268 +                       lru += LRU_ACTIVE;
142269 +               update_lru_size(lruvec, lru, zone, total);
142270 +       }
142272 +       spin_unlock_irq(&lruvec->lru_lock);
142275 +static int page_update_gen(struct page *page, int new_gen)
142277 +       int old_gen;
142278 +       unsigned long old_flags, new_flags;
142280 +       VM_BUG_ON(new_gen >= MAX_NR_GENS);
142282 +       do {
142283 +               old_flags = READ_ONCE(page->flags);
142285 +               old_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
142286 +               if (old_gen < 0)
142287 +                       new_flags = old_flags | BIT(PG_referenced);
142288 +               else
142289 +                       new_flags = (old_flags & ~(LRU_GEN_MASK | LRU_USAGE_MASK |
142290 +                                    LRU_TIER_FLAGS)) | ((new_gen + 1UL) << LRU_GEN_PGOFF);
142292 +               if (old_flags == new_flags)
142293 +                       break;
142294 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
142296 +       return old_gen;
142299 +static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *walk)
142301 +       struct address_space *mapping;
142302 +       struct vm_area_struct *vma = walk->vma;
142303 +       struct mm_walk_args *args = walk->private;
142305 +       if (!vma_is_accessible(vma) || is_vm_hugetlb_page(vma) ||
142306 +           (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)))
142307 +               return true;
142309 +       if (vma_is_anonymous(vma))
142310 +               return !args->should_walk[0];
142312 +       if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping))
142313 +               return true;
142315 +       mapping = vma->vm_file->f_mapping;
142316 +       if (!mapping->a_ops->writepage)
142317 +               return true;
142319 +       if (shmem_mapping(mapping))
142320 +               return !args->should_walk[0] ||
142321 +                      mapping_unevictable(vma->vm_file->f_mapping);
142323 +       return !args->should_walk[1] || mapping_unevictable(mapping);
142327 + * Some userspace memory allocators create many single-page VMAs. So instead of
142328 + * returning back to the PGD table for each of such VMAs, we finish at least an
142329 + * entire PMD table and therefore avoid many zigzags. This optimizes page table
142330 + * walks for workloads that have large numbers of tiny VMAs.
142332 + * We scan PMD tables in two pass. The first pass reaches to PTE tables and
142333 + * doesn't take the PMD lock. The second pass clears the accessed bit on PMD
142334 + * entries and needs to take the PMD lock. The second pass is only done on the
142335 + * PMD entries that first pass has found the accessed bit is set, and they must
142336 + * be:
142337 + *   1) leaf entries mapping huge pages from the node under reclaim
142338 + *   2) non-leaf entries whose leaf entries only map pages from the node under
142339 + *   reclaim, when CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG=y.
142340 + */
142341 +static bool get_next_interval(struct mm_walk *walk, unsigned long mask, unsigned long size,
142342 +                             unsigned long *start, unsigned long *end)
142344 +       unsigned long next = round_up(*end, size);
142345 +       struct mm_walk_args *args = walk->private;
142347 +       VM_BUG_ON(mask & size);
142348 +       VM_BUG_ON(*start != *end);
142349 +       VM_BUG_ON(!(*end & ~mask));
142350 +       VM_BUG_ON((*end & mask) != (next & mask));
142352 +       while (walk->vma) {
142353 +               if (next >= walk->vma->vm_end) {
142354 +                       walk->vma = walk->vma->vm_next;
142355 +                       continue;
142356 +               }
142358 +               if ((next & mask) != (walk->vma->vm_start & mask))
142359 +                       return false;
142361 +               if (should_skip_vma(walk->vma->vm_start, walk->vma->vm_end, walk)) {
142362 +                       walk->vma = walk->vma->vm_next;
142363 +                       continue;
142364 +               }
142366 +               args->mm_stats[MM_VMA_INTERVAL]++;
142368 +               *start = max(next, walk->vma->vm_start);
142369 +               next = (next | ~mask) + 1;
142370 +               /* rounded-up boundaries can wrap to 0 */
142371 +               *end = next && next < walk->vma->vm_end ? next : walk->vma->vm_end;
142373 +               return true;
142374 +       }
142376 +       return false;
142379 +static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
142380 +                          struct mm_walk *walk)
142382 +       int i;
142383 +       pte_t *pte;
142384 +       spinlock_t *ptl;
142385 +       int remote = 0;
142386 +       struct mm_walk_args *args = walk->private;
142387 +       int old_gen, new_gen = lru_gen_from_seq(args->max_seq);
142389 +       VM_BUG_ON(pmd_leaf(*pmd));
142391 +       pte = pte_offset_map_lock(walk->mm, pmd, start & PMD_MASK, &ptl);
142392 +       arch_enter_lazy_mmu_mode();
142393 +restart:
142394 +       for (i = pte_index(start); start != end; i++, start += PAGE_SIZE) {
142395 +               struct page *page;
142396 +               unsigned long pfn = pte_pfn(pte[i]);
142398 +               if (!pte_present(pte[i]) || is_zero_pfn(pfn)) {
142399 +                       args->mm_stats[MM_LEAF_HOLE]++;
142400 +                       continue;
142401 +               }
142403 +               if (WARN_ON_ONCE(pte_devmap(pte[i]) || pte_special(pte[i])))
142404 +                       continue;
142406 +               if (!pte_young(pte[i])) {
142407 +                       args->mm_stats[MM_LEAF_OLD]++;
142408 +                       continue;
142409 +               }
142411 +               if (pfn < args->start_pfn || pfn >= args->end_pfn) {
142412 +                       remote++;
142413 +                       args->mm_stats[MM_LEAF_OTHER_NODE]++;
142414 +                       continue;
142415 +               }
142417 +               page = compound_head(pfn_to_page(pfn));
142418 +               if (page_to_nid(page) != args->node_id) {
142419 +                       remote++;
142420 +                       args->mm_stats[MM_LEAF_OTHER_NODE]++;
142421 +                       continue;
142422 +               }
142424 +               if (!ptep_test_and_clear_young(walk->vma, start, pte + i))
142425 +                       continue;
142427 +               if (pte_dirty(pte[i]) && !PageDirty(page) &&
142428 +                   !(PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page))) {
142429 +                       set_page_dirty(page);
142430 +                       args->mm_stats[MM_LEAF_DIRTY]++;
142431 +               }
142433 +               if (page_memcg_rcu(page) != args->memcg) {
142434 +                       args->mm_stats[MM_LEAF_OTHER_MEMCG]++;
142435 +                       continue;
142436 +               }
142438 +               old_gen = page_update_gen(page, new_gen);
142439 +               if (old_gen >= 0 && old_gen != new_gen)
142440 +                       update_batch_size(page, old_gen, new_gen, args);
142441 +               args->mm_stats[MM_LEAF_YOUNG]++;
142442 +       }
142444 +       if (i < PTRS_PER_PTE && get_next_interval(walk, PMD_MASK, PAGE_SIZE, &start, &end))
142445 +               goto restart;
142447 +       arch_leave_lazy_mmu_mode();
142448 +       pte_unmap_unlock(pte, ptl);
142450 +       return !remote;
142453 +static bool walk_pmd_range_unlocked(pud_t *pud, unsigned long start, unsigned long end,
142454 +                                   struct mm_walk *walk)
142456 +       int i;
142457 +       pmd_t *pmd;
142458 +       unsigned long next;
142459 +       int young = 0;
142460 +       struct mm_walk_args *args = walk->private;
142462 +       VM_BUG_ON(pud_leaf(*pud));
142464 +       pmd = pmd_offset(pud, start & PUD_MASK);
142465 +restart:
142466 +       for (i = pmd_index(start); start != end; i++, start = next) {
142467 +               pmd_t val = pmd_read_atomic(pmd + i);
142469 +               next = pmd_addr_end(start, end);
142471 +               barrier();
142472 +               if (!pmd_present(val) || is_huge_zero_pmd(val)) {
142473 +                       args->mm_stats[MM_LEAF_HOLE]++;
142474 +                       continue;
142475 +               }
142477 +               if (pmd_trans_huge(val)) {
142478 +                       unsigned long pfn = pmd_pfn(val);
142480 +                       if (!pmd_young(val)) {
142481 +                               args->mm_stats[MM_LEAF_OLD]++;
142482 +                               continue;
142483 +                       }
142485 +                       if (pfn < args->start_pfn || pfn >= args->end_pfn) {
142486 +                               args->mm_stats[MM_LEAF_OTHER_NODE]++;
142487 +                               continue;
142488 +                       }
142490 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
142491 +                       young++;
142492 +                       __set_bit(i, args->bitmap);
142493 +#endif
142494 +                       continue;
142495 +               }
142497 +#ifdef CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG
142498 +               if (!pmd_young(val)) {
142499 +                       args->mm_stats[MM_NONLEAF_OLD]++;
142500 +                       continue;
142501 +               }
142502 +#endif
142504 +               if (walk_pte_range(&val, start, next, walk)) {
142505 +#ifdef CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG
142506 +                       young++;
142507 +                       __set_bit(i, args->bitmap);
142508 +#endif
142509 +               }
142510 +       }
142512 +       if (i < PTRS_PER_PMD && get_next_interval(walk, PUD_MASK, PMD_SIZE, &start, &end))
142513 +               goto restart;
142515 +       return young;
142518 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG)
142519 +static void walk_pmd_range_locked(pud_t *pud, unsigned long start, unsigned long end,
142520 +                                 struct mm_walk *walk)
142522 +       int i;
142523 +       pmd_t *pmd;
142524 +       spinlock_t *ptl;
142525 +       struct mm_walk_args *args = walk->private;
142526 +       int old_gen, new_gen = lru_gen_from_seq(args->max_seq);
142528 +       VM_BUG_ON(pud_leaf(*pud));
142530 +       start &= PUD_MASK;
142531 +       pmd = pmd_offset(pud, start);
142532 +       ptl = pmd_lock(walk->mm, pmd);
142533 +       arch_enter_lazy_mmu_mode();
142535 +       for_each_set_bit(i, args->bitmap, PTRS_PER_PMD) {
142536 +               struct page *page;
142537 +               unsigned long pfn = pmd_pfn(pmd[i]);
142538 +               unsigned long addr = start + PMD_SIZE * i;
142540 +               if (!pmd_present(pmd[i]) || is_huge_zero_pmd(pmd[i])) {
142541 +                       args->mm_stats[MM_LEAF_HOLE]++;
142542 +                       continue;
142543 +               }
142545 +               if (WARN_ON_ONCE(pmd_devmap(pmd[i])))
142546 +                       continue;
142548 +               if (!pmd_young(pmd[i])) {
142549 +                       args->mm_stats[MM_LEAF_OLD]++;
142550 +                       continue;
142551 +               }
142553 +               if (!pmd_trans_huge(pmd[i])) {
142554 +#ifdef CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG
142555 +                       args->mm_stats[MM_NONLEAF_YOUNG]++;
142556 +                       pmdp_test_and_clear_young(walk->vma, addr, pmd + i);
142557 +#endif
142558 +                       continue;
142559 +               }
142561 +               if (pfn < args->start_pfn || pfn >= args->end_pfn) {
142562 +                       args->mm_stats[MM_LEAF_OTHER_NODE]++;
142563 +                       continue;
142564 +               }
142566 +               page = pfn_to_page(pfn);
142567 +               VM_BUG_ON_PAGE(PageTail(page), page);
142568 +               if (page_to_nid(page) != args->node_id) {
142569 +                       args->mm_stats[MM_LEAF_OTHER_NODE]++;
142570 +                       continue;
142571 +               }
142573 +               if (!pmdp_test_and_clear_young(walk->vma, addr, pmd + i))
142574 +                       continue;
142576 +               if (pmd_dirty(pmd[i]) && !PageDirty(page) &&
142577 +                   !(PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page))) {
142578 +                       set_page_dirty(page);
142579 +                       args->mm_stats[MM_LEAF_DIRTY]++;
142580 +               }
142582 +               if (page_memcg_rcu(page) != args->memcg) {
142583 +                       args->mm_stats[MM_LEAF_OTHER_MEMCG]++;
142584 +                       continue;
142585 +               }
142587 +               old_gen = page_update_gen(page, new_gen);
142588 +               if (old_gen >= 0 && old_gen != new_gen)
142589 +                       update_batch_size(page, old_gen, new_gen, args);
142590 +               args->mm_stats[MM_LEAF_YOUNG]++;
142591 +       }
142593 +       arch_leave_lazy_mmu_mode();
142594 +       spin_unlock(ptl);
142596 +       memset(args->bitmap, 0, sizeof(args->bitmap));
142598 +#else
142599 +static void walk_pmd_range_locked(pud_t *pud, unsigned long start, unsigned long end,
142600 +                                 struct mm_walk *walk)
142603 +#endif
142605 +static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
142606 +                         struct mm_walk *walk)
142608 +       int i;
142609 +       pud_t *pud;
142610 +       unsigned long next;
142611 +       struct mm_walk_args *args = walk->private;
142613 +       VM_BUG_ON(p4d_leaf(*p4d));
142615 +       pud = pud_offset(p4d, start & P4D_MASK);
142616 +restart:
142617 +       for (i = pud_index(start); start != end; i++, start = next) {
142618 +               pud_t val = READ_ONCE(pud[i]);
142620 +               next = pud_addr_end(start, end);
142622 +               if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val)))
142623 +                       continue;
142625 +               if (walk_pmd_range_unlocked(&val, start, next, walk))
142626 +                       walk_pmd_range_locked(&val, start, next, walk);
142628 +               if (args->batch_size >= MAX_BATCH_SIZE) {
142629 +                       end = (start | ~PUD_MASK) + 1;
142630 +                       goto done;
142631 +               }
142632 +       }
142634 +       if (i < PTRS_PER_PUD && get_next_interval(walk, P4D_MASK, PUD_SIZE, &start, &end))
142635 +               goto restart;
142637 +       end = round_up(end, P4D_SIZE);
142638 +done:
142639 +       /* rounded-up boundaries can wrap to 0 */
142640 +       args->next_addr = end && walk->vma ? max(end, walk->vma->vm_start) : 0;
142642 +       return -EAGAIN;
142645 +static void walk_mm(struct mm_walk_args *args, int swappiness, struct mm_struct *mm)
142647 +       static const struct mm_walk_ops mm_walk_ops = {
142648 +               .test_walk = should_skip_vma,
142649 +               .p4d_entry = walk_pud_range,
142650 +       };
142652 +       int err;
142653 +       int file;
142654 +       int nid = args->node_id;
142655 +       struct mem_cgroup *memcg = args->memcg;
142656 +       struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
142658 +       args->next_addr = FIRST_USER_ADDRESS;
142659 +       for (file = !swappiness; file < ANON_AND_FILE; file++)
142660 +               args->should_walk[file] = lru_gen_mm_is_active(mm) ||
142661 +                                         node_isset(nid, mm->lrugen.nodes[file]);
142663 +       do {
142664 +               unsigned long start = args->next_addr;
142665 +               unsigned long end = mm->highest_vm_end;
142667 +               err = -EBUSY;
142669 +               preempt_disable();
142670 +               rcu_read_lock();
142672 +#ifdef CONFIG_MEMCG
142673 +               if (memcg && atomic_read(&memcg->moving_account)) {
142674 +                       args->mm_stats[MM_LOCK_CONTENTION]++;
142675 +                       goto contended;
142676 +               }
142677 +#endif
142678 +               if (!mmap_read_trylock(mm)) {
142679 +                       args->mm_stats[MM_LOCK_CONTENTION]++;
142680 +                       goto contended;
142681 +               }
142683 +               err = walk_page_range(mm, start, end, &mm_walk_ops, args);
142685 +               mmap_read_unlock(mm);
142687 +               if (args->batch_size)
142688 +                       reset_batch_size(lruvec, args);
142689 +contended:
142690 +               rcu_read_unlock();
142691 +               preempt_enable();
142693 +               cond_resched();
142694 +       } while (err == -EAGAIN && args->next_addr &&
142695 +                !mm_is_oom_victim(mm) && !mm_has_migrated(mm, memcg));
142697 +       if (err == -EBUSY)
142698 +               return;
142700 +       for (file = !swappiness; file < ANON_AND_FILE; file++) {
142701 +               if (args->should_walk[file])
142702 +                       node_clear(nid, mm->lrugen.nodes[file]);
142703 +       }
142706 +static void page_inc_gen(struct page *page, struct lruvec *lruvec, bool front)
142708 +       int old_gen, new_gen;
142709 +       unsigned long old_flags, new_flags;
142710 +       int file = page_is_file_lru(page);
142711 +       int zone = page_zonenum(page);
142712 +       struct lrugen *lrugen = &lruvec->evictable;
142714 +       old_gen = lru_gen_from_seq(lrugen->min_seq[file]);
142716 +       do {
142717 +               old_flags = READ_ONCE(page->flags);
142718 +               new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
142719 +               VM_BUG_ON_PAGE(new_gen < 0, page);
142720 +               if (new_gen >= 0 && new_gen != old_gen)
142721 +                       goto sort;
142723 +               new_gen = (old_gen + 1) % MAX_NR_GENS;
142724 +               new_flags = (old_flags & ~(LRU_GEN_MASK | LRU_USAGE_MASK | LRU_TIER_FLAGS)) |
142725 +                           ((new_gen + 1UL) << LRU_GEN_PGOFF);
142726 +               /* mark the page for reclaim if it's pending writeback */
142727 +               if (front)
142728 +                       new_flags |= BIT(PG_reclaim);
142729 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
142731 +       lru_gen_update_size(page, lruvec, old_gen, new_gen);
142732 +sort:
142733 +       if (front)
142734 +               list_move(&page->lru, &lrugen->lists[new_gen][file][zone]);
142735 +       else
142736 +               list_move_tail(&page->lru, &lrugen->lists[new_gen][file][zone]);
142739 +static bool try_inc_min_seq(struct lruvec *lruvec, int file)
142741 +       int gen, zone;
142742 +       bool success = false;
142743 +       struct lrugen *lrugen = &lruvec->evictable;
142745 +       VM_BUG_ON(!seq_is_valid(lruvec));
142747 +       while (get_nr_gens(lruvec, file) > MIN_NR_GENS) {
142748 +               gen = lru_gen_from_seq(lrugen->min_seq[file]);
142750 +               for (zone = 0; zone < MAX_NR_ZONES; zone++) {
142751 +                       if (!list_empty(&lrugen->lists[gen][file][zone]))
142752 +                               return success;
142753 +               }
142755 +               reset_controller_pos(lruvec, gen, file);
142756 +               WRITE_ONCE(lrugen->min_seq[file], lrugen->min_seq[file] + 1);
142758 +               success = true;
142759 +       }
142761 +       return success;
142764 +static bool inc_min_seq(struct lruvec *lruvec, int file)
142766 +       int gen, zone;
142767 +       int batch_size = 0;
142768 +       struct lrugen *lrugen = &lruvec->evictable;
142770 +       VM_BUG_ON(!seq_is_valid(lruvec));
142772 +       if (get_nr_gens(lruvec, file) != MAX_NR_GENS)
142773 +               return true;
142775 +       gen = lru_gen_from_seq(lrugen->min_seq[file]);
142777 +       for (zone = 0; zone < MAX_NR_ZONES; zone++) {
142778 +               struct list_head *head = &lrugen->lists[gen][file][zone];
142780 +               while (!list_empty(head)) {
142781 +                       struct page *page = lru_to_page(head);
142783 +                       VM_BUG_ON_PAGE(PageTail(page), page);
142784 +                       VM_BUG_ON_PAGE(PageUnevictable(page), page);
142785 +                       VM_BUG_ON_PAGE(PageActive(page), page);
142786 +                       VM_BUG_ON_PAGE(page_is_file_lru(page) != file, page);
142787 +                       VM_BUG_ON_PAGE(page_zonenum(page) != zone, page);
142789 +                       prefetchw_prev_lru_page(page, head, flags);
142791 +                       page_inc_gen(page, lruvec, false);
142793 +                       if (++batch_size == MAX_BATCH_SIZE)
142794 +                               return false;
142795 +               }
142797 +               VM_BUG_ON(lrugen->sizes[gen][file][zone]);
142798 +       }
142800 +       reset_controller_pos(lruvec, gen, file);
142801 +       WRITE_ONCE(lrugen->min_seq[file], lrugen->min_seq[file] + 1);
142803 +       return true;
142806 +static void inc_max_seq(struct lruvec *lruvec)
142808 +       int gen, file, zone;
142809 +       struct lrugen *lrugen = &lruvec->evictable;
142811 +       spin_lock_irq(&lruvec->lru_lock);
142813 +       VM_BUG_ON(!seq_is_valid(lruvec));
142815 +       for (file = 0; file < ANON_AND_FILE; file++) {
142816 +               if (try_inc_min_seq(lruvec, file))
142817 +                       continue;
142819 +               while (!inc_min_seq(lruvec, file)) {
142820 +                       spin_unlock_irq(&lruvec->lru_lock);
142821 +                       cond_resched();
142822 +                       spin_lock_irq(&lruvec->lru_lock);
142823 +               }
142824 +       }
142826 +       gen = lru_gen_from_seq(lrugen->max_seq - 1);
142827 +       for_each_type_zone(file, zone) {
142828 +               enum lru_list lru = LRU_FILE * file;
142829 +               long total = lrugen->sizes[gen][file][zone];
142831 +               if (!total)
142832 +                       continue;
142834 +               WARN_ON_ONCE(total != (int)total);
142836 +               update_lru_size(lruvec, lru, zone, total);
142837 +               update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -total);
142838 +       }
142840 +       gen = lru_gen_from_seq(lrugen->max_seq + 1);
142841 +       for_each_type_zone(file, zone) {
142842 +               VM_BUG_ON(lrugen->sizes[gen][file][zone]);
142843 +               VM_BUG_ON(!list_empty(&lrugen->lists[gen][file][zone]));
142844 +       }
142846 +       for (file = 0; file < ANON_AND_FILE; file++)
142847 +               reset_controller_pos(lruvec, gen, file);
142849 +       WRITE_ONCE(lrugen->timestamps[gen], jiffies);
142850 +       /* make sure all preceding modifications appear first */
142851 +       smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
142853 +       spin_unlock_irq(&lruvec->lru_lock);
142856 +/* Main function used by foreground, background and user-triggered aging. */
142857 +static bool walk_mm_list(struct lruvec *lruvec, unsigned long max_seq,
142858 +                        struct scan_control *sc, int swappiness, struct mm_walk_args *args)
142860 +       bool last;
142861 +       bool alloc = !args;
142862 +       struct mm_struct *mm = NULL;
142863 +       struct lrugen *lrugen = &lruvec->evictable;
142864 +       struct pglist_data *pgdat = lruvec_pgdat(lruvec);
142865 +       int nid = pgdat->node_id;
142866 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
142867 +       struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
142869 +       VM_BUG_ON(max_seq > READ_ONCE(lrugen->max_seq));
142871 +       /*
142872 +        * For each walk of the mm_struct list of a memcg, we decrement the
142873 +        * priority of its lrugen. For each walk of all memcgs in kswapd, we
142874 +        * increment the priority of every lrugen.
142875 +        *
142876 +        * So if this lrugen has a higher priority (smaller value), it means
142877 +        * other concurrent reclaimers have walked its mm list, and we skip it
142878 +        * for this priority in order to balance the pressure on all memcgs.
142879 +        */
142880 +       if (!mem_cgroup_disabled() && !cgroup_reclaim(sc) &&
142881 +           sc->priority > atomic_read(&lrugen->priority))
142882 +               return false;
142884 +       if (alloc) {
142885 +               args = kvzalloc_node(sizeof(*args), GFP_KERNEL, nid);
142886 +               if (!args)
142887 +                       return false;
142888 +       }
142890 +       args->memcg = memcg;
142891 +       args->max_seq = max_seq;
142892 +       args->start_pfn = pgdat->node_start_pfn;
142893 +       args->end_pfn = pgdat_end_pfn(pgdat);
142894 +       args->node_id = nid;
142896 +       do {
142897 +               last = get_next_mm(args, swappiness, &mm);
142898 +               if (mm)
142899 +                       walk_mm(args, swappiness, mm);
142901 +               cond_resched();
142902 +       } while (mm);
142904 +       if (alloc)
142905 +               kvfree(args);
142907 +       if (!last) {
142908 +               /* foreground aging prefers not to wait unless "necessary" */
142909 +               if (!current_is_kswapd() && sc->priority < DEF_PRIORITY - 2)
142910 +                       wait_event_killable(mm_list->nodes[nid].wait,
142911 +                                           max_seq < READ_ONCE(lrugen->max_seq));
142913 +               return max_seq < READ_ONCE(lrugen->max_seq);
142914 +       }
142916 +       VM_BUG_ON(max_seq != READ_ONCE(lrugen->max_seq));
142918 +       inc_max_seq(lruvec);
142920 +       if (!mem_cgroup_disabled())
142921 +               atomic_add_unless(&lrugen->priority, -1, 0);
142923 +       /* order against inc_max_seq() */
142924 +       smp_mb();
142925 +       /* either we see any waiters or they will see the updated max_seq */
142926 +       if (waitqueue_active(&mm_list->nodes[nid].wait))
142927 +               wake_up_all(&mm_list->nodes[nid].wait);
142929 +       wakeup_flusher_threads(WB_REASON_VMSCAN);
142931 +       return true;
142934 +void lru_gen_scan_around(struct page_vma_mapped_walk *pvmw)
142936 +       pte_t *pte;
142937 +       unsigned long start, end;
142938 +       int old_gen, new_gen;
142939 +       unsigned long flags;
142940 +       struct lruvec *lruvec;
142941 +       struct mem_cgroup *memcg;
142942 +       struct pglist_data *pgdat = page_pgdat(pvmw->page);
142944 +       lockdep_assert_held(pvmw->ptl);
142946 +       start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start);
142947 +       end = pmd_addr_end(pvmw->address, pvmw->vma->vm_end);
142948 +       pte = pvmw->pte - ((pvmw->address - start) >> PAGE_SHIFT);
142950 +       memcg = lock_page_memcg(pvmw->page);
142951 +       lruvec = lock_page_lruvec_irqsave(pvmw->page, &flags);
142953 +       new_gen = lru_gen_from_seq(lruvec->evictable.max_seq);
142955 +       for (; start != end; pte++, start += PAGE_SIZE) {
142956 +               struct page *page;
142957 +               unsigned long pfn = pte_pfn(*pte);
142959 +               if (!pte_present(*pte) || !pte_young(*pte) || is_zero_pfn(pfn))
142960 +                       continue;
142962 +               if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
142963 +                       continue;
142965 +               page = compound_head(pfn_to_page(pfn));
142966 +               if (page_to_nid(page) != pgdat->node_id)
142967 +                       continue;
142969 +               if (page_memcg_rcu(page) != memcg)
142970 +                       continue;
142971 +               /*
142972 +                * We may be holding many locks. So try to finish as fast as
142973 +                * possible and leave the accessed and the dirty bits to page
142974 +                * table walks.
142975 +                */
142976 +               old_gen = page_update_gen(page, new_gen);
142977 +               if (old_gen >= 0 && old_gen != new_gen)
142978 +                       lru_gen_update_size(page, lruvec, old_gen, new_gen);
142979 +       }
142981 +       unlock_page_lruvec_irqrestore(lruvec, flags);
142982 +       unlock_page_memcg(pvmw->page);
142985 +/******************************************************************************
142986 + *                          the eviction
142987 + ******************************************************************************/
142989 +static bool sort_page(struct page *page, struct lruvec *lruvec, int tier_to_isolate)
142991 +       bool success;
142992 +       int gen = page_lru_gen(page);
142993 +       int file = page_is_file_lru(page);
142994 +       int zone = page_zonenum(page);
142995 +       int tier = lru_tier_from_usage(page_tier_usage(page));
142996 +       struct lrugen *lrugen = &lruvec->evictable;
142998 +       VM_BUG_ON_PAGE(gen == -1, page);
142999 +       VM_BUG_ON_PAGE(tier_to_isolate < 0, page);
143001 +       /* a lazy-free page that has been written into? */
143002 +       if (file && PageDirty(page) && PageAnon(page)) {
143003 +               success = lru_gen_deletion(page, lruvec);
143004 +               VM_BUG_ON_PAGE(!success, page);
143005 +               SetPageSwapBacked(page);
143006 +               add_page_to_lru_list_tail(page, lruvec);
143007 +               return true;
143008 +       }
143010 +       /* page_update_gen() has updated the page? */
143011 +       if (gen != lru_gen_from_seq(lrugen->min_seq[file])) {
143012 +               list_move(&page->lru, &lrugen->lists[gen][file][zone]);
143013 +               return true;
143014 +       }
143016 +       /* activate the page if its tier has a higher refault rate */
143017 +       if (tier_to_isolate < tier) {
143018 +               int sid = sid_from_seq_or_gen(gen);
143020 +               page_inc_gen(page, lruvec, false);
143021 +               WRITE_ONCE(lrugen->activated[sid][file][tier - 1],
143022 +                          lrugen->activated[sid][file][tier - 1] + thp_nr_pages(page));
143023 +               inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file);
143024 +               return true;
143025 +       }
143027 +       /*
143028 +        * A page can't be immediately evicted, and page_inc_gen() will mark it
143029 +        * for reclaim and hopefully writeback will write it soon if it's dirty.
143030 +        */
143031 +       if (PageLocked(page) || PageWriteback(page) || (file && PageDirty(page))) {
143032 +               page_inc_gen(page, lruvec, true);
143033 +               return true;
143034 +       }
143036 +       return false;
143039 +static bool should_skip_page(struct page *page, struct scan_control *sc)
143041 +       if (!sc->may_unmap && page_mapped(page))
143042 +               return true;
143044 +       if (!(sc->may_writepage && (sc->gfp_mask & __GFP_IO)) &&
143045 +           (PageDirty(page) || (PageAnon(page) && !PageSwapCache(page))))
143046 +               return true;
143048 +       if (!get_page_unless_zero(page))
143049 +               return true;
143051 +       if (!TestClearPageLRU(page)) {
143052 +               put_page(page);
143053 +               return true;
143054 +       }
143056 +       return false;
143059 +static void isolate_page(struct page *page, struct lruvec *lruvec)
143061 +       bool success;
143063 +       success = lru_gen_deletion(page, lruvec);
143064 +       VM_BUG_ON_PAGE(!success, page);
143066 +       if (PageActive(page)) {
143067 +               ClearPageActive(page);
143068 +               /* make sure shrink_page_list() rejects this page */
143069 +               SetPageReferenced(page);
143070 +               return;
143071 +       }
143073 +       /* make sure shrink_page_list() doesn't try to write this page */
143074 +       ClearPageReclaim(page);
143075 +       /* make sure shrink_page_list() doesn't reject this page */
143076 +       ClearPageReferenced(page);
143079 +static int scan_lru_gen_pages(struct lruvec *lruvec, struct scan_control *sc,
143080 +                             long *nr_to_scan, int file, int tier,
143081 +                             struct list_head *list)
143083 +       bool success;
143084 +       int gen, zone;
143085 +       enum vm_event_item item;
143086 +       int sorted = 0;
143087 +       int scanned = 0;
143088 +       int isolated = 0;
143089 +       int batch_size = 0;
143090 +       struct lrugen *lrugen = &lruvec->evictable;
143092 +       VM_BUG_ON(!list_empty(list));
143094 +       if (get_nr_gens(lruvec, file) == MIN_NR_GENS)
143095 +               return -ENOENT;
143097 +       gen = lru_gen_from_seq(lrugen->min_seq[file]);
143099 +       for (zone = sc->reclaim_idx; zone >= 0; zone--) {
143100 +               LIST_HEAD(moved);
143101 +               int skipped = 0;
143102 +               struct list_head *head = &lrugen->lists[gen][file][zone];
143104 +               while (!list_empty(head)) {
143105 +                       struct page *page = lru_to_page(head);
143106 +                       int delta = thp_nr_pages(page);
143108 +                       VM_BUG_ON_PAGE(PageTail(page), page);
143109 +                       VM_BUG_ON_PAGE(PageUnevictable(page), page);
143110 +                       VM_BUG_ON_PAGE(PageActive(page), page);
143111 +                       VM_BUG_ON_PAGE(page_is_file_lru(page) != file, page);
143112 +                       VM_BUG_ON_PAGE(page_zonenum(page) != zone, page);
143114 +                       prefetchw_prev_lru_page(page, head, flags);
143116 +                       scanned += delta;
143118 +                       if (sort_page(page, lruvec, tier))
143119 +                               sorted += delta;
143120 +                       else if (should_skip_page(page, sc)) {
143121 +                               list_move(&page->lru, &moved);
143122 +                               skipped += delta;
143123 +                       } else {
143124 +                               isolate_page(page, lruvec);
143125 +                               list_add(&page->lru, list);
143126 +                               isolated += delta;
143127 +                       }
143129 +                       if (scanned >= *nr_to_scan || isolated >= SWAP_CLUSTER_MAX ||
143130 +                           ++batch_size == MAX_BATCH_SIZE)
143131 +                               break;
143132 +               }
143134 +               list_splice(&moved, head);
143135 +               __count_zid_vm_events(PGSCAN_SKIP, zone, skipped);
143137 +               if (scanned >= *nr_to_scan || isolated >= SWAP_CLUSTER_MAX ||
143138 +                   batch_size == MAX_BATCH_SIZE)
143139 +                       break;
143140 +       }
143142 +       success = try_inc_min_seq(lruvec, file);
143144 +       item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
143145 +       if (!cgroup_reclaim(sc))
143146 +               __count_vm_events(item, scanned);
143147 +       __count_memcg_events(lruvec_memcg(lruvec), item, scanned);
143148 +       __count_vm_events(PGSCAN_ANON + file, scanned);
143150 +       *nr_to_scan -= scanned;
143152 +       if (*nr_to_scan <= 0 || success || isolated)
143153 +               return isolated;
143154 +       /*
143155 +        * We may have trouble finding eligible pages due to reclaim_idx,
143156 +        * may_unmap and may_writepage. The following check makes sure we won't
143157 +        * be stuck if we aren't making enough progress.
143158 +        */
143159 +       return batch_size == MAX_BATCH_SIZE && sorted >= SWAP_CLUSTER_MAX ? 0 : -ENOENT;
143162 +static int get_tier_to_isolate(struct lruvec *lruvec, int file)
143164 +       int tier;
143165 +       struct controller_pos sp, pv;
143167 +       /*
143168 +        * Ideally we don't want to evict upper tiers that have higher refault
143169 +        * rates. However, we need to leave some margin for the fluctuation in
143170 +        * refault rates. So we use a larger gain factor to make sure upper
143171 +        * tiers are indeed more active. We choose 2 because the lowest upper
143172 +        * tier would have twice of the refault rate of the base tier, according
143173 +        * to their numbers of accesses.
143174 +        */
143175 +       read_controller_pos(&sp, lruvec, file, 0, 1);
143176 +       for (tier = 1; tier < MAX_NR_TIERS; tier++) {
143177 +               read_controller_pos(&pv, lruvec, file, tier, 2);
143178 +               if (!positive_ctrl_err(&sp, &pv))
143179 +                       break;
143180 +       }
143182 +       return tier - 1;
143185 +static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_to_isolate)
143187 +       int file, tier;
143188 +       struct controller_pos sp, pv;
143189 +       int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness };
143191 +       /*
143192 +        * Compare the refault rates between the base tiers of anon and file to
143193 +        * determine which type to evict. Also need to compare the refault rates
143194 +        * of the upper tiers of the selected type with that of the base tier to
143195 +        * determine which tier of the selected type to evict.
143196 +        */
143197 +       read_controller_pos(&sp, lruvec, 0, 0, gain[0]);
143198 +       read_controller_pos(&pv, lruvec, 1, 0, gain[1]);
143199 +       file = positive_ctrl_err(&sp, &pv);
143201 +       read_controller_pos(&sp, lruvec, !file, 0, gain[!file]);
143202 +       for (tier = 1; tier < MAX_NR_TIERS; tier++) {
143203 +               read_controller_pos(&pv, lruvec, file, tier, gain[file]);
143204 +               if (!positive_ctrl_err(&sp, &pv))
143205 +                       break;
143206 +       }
143208 +       *tier_to_isolate = tier - 1;
143210 +       return file;
143213 +static int isolate_lru_gen_pages(struct lruvec *lruvec, struct scan_control *sc,
143214 +                                int swappiness, long *nr_to_scan, int *type_to_scan,
143215 +                                struct list_head *list)
143217 +       int i;
143218 +       int file;
143219 +       int isolated;
143220 +       int tier = -1;
143221 +       DEFINE_MAX_SEQ();
143222 +       DEFINE_MIN_SEQ();
143224 +       VM_BUG_ON(!seq_is_valid(lruvec));
143226 +       if (max_nr_gens(max_seq, min_seq, swappiness) == MIN_NR_GENS)
143227 +               return 0;
143228 +       /*
143229 +        * Try to select a type based on generations and swappiness, and if that
143230 +        * fails, fall back to get_type_to_scan(). When anon and file are both
143231 +        * available from the same generation, swappiness 200 is interpreted as
143232 +        * anon first and swappiness 1 is interpreted as file first.
143233 +        */
143234 +       file = !swappiness || min_seq[0] > min_seq[1] ||
143235 +              (min_seq[0] == min_seq[1] && swappiness != 200 &&
143236 +               (swappiness == 1 || get_type_to_scan(lruvec, swappiness, &tier)));
143238 +       if (tier == -1)
143239 +               tier = get_tier_to_isolate(lruvec, file);
143241 +       for (i = !swappiness; i < ANON_AND_FILE; i++) {
143242 +               isolated = scan_lru_gen_pages(lruvec, sc, nr_to_scan, file, tier, list);
143243 +               if (isolated >= 0)
143244 +                       break;
143246 +               file = !file;
143247 +               tier = get_tier_to_isolate(lruvec, file);
143248 +       }
143250 +       if (isolated < 0)
143251 +               isolated = *nr_to_scan = 0;
143253 +       *type_to_scan = file;
143255 +       return isolated;
143258 +/* Main function used by foreground, background and user-triggered eviction. */
143259 +static bool evict_lru_gen_pages(struct lruvec *lruvec, struct scan_control *sc,
143260 +                               int swappiness, long *nr_to_scan)
143262 +       int file;
143263 +       int isolated;
143264 +       int reclaimed;
143265 +       LIST_HEAD(list);
143266 +       struct page *page;
143267 +       enum vm_event_item item;
143268 +       struct reclaim_stat stat;
143269 +       struct pglist_data *pgdat = lruvec_pgdat(lruvec);
143271 +       spin_lock_irq(&lruvec->lru_lock);
143273 +       isolated = isolate_lru_gen_pages(lruvec, sc, swappiness, nr_to_scan, &file, &list);
143274 +       VM_BUG_ON(list_empty(&list) == !!isolated);
143276 +       if (isolated)
143277 +               __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, isolated);
143279 +       spin_unlock_irq(&lruvec->lru_lock);
143281 +       if (!isolated)
143282 +               goto done;
143284 +       reclaimed = shrink_page_list(&list, pgdat, sc, &stat, false);
143285 +       /*
143286 +        * We need to prevent rejected pages from being added back to the same
143287 +        * lists they were isolated from. Otherwise we may risk looping on them
143288 +        * forever. We use PageActive() or !PageReferenced() && PageWorkingset()
143289 +        * to tell lru_gen_addition() not to add them to the oldest generation.
143290 +        */
143291 +       list_for_each_entry(page, &list, lru) {
143292 +               if (PageMlocked(page))
143293 +                       continue;
143295 +               if (PageReferenced(page)) {
143296 +                       SetPageActive(page);
143297 +                       ClearPageReferenced(page);
143298 +               } else {
143299 +                       ClearPageActive(page);
143300 +                       SetPageWorkingset(page);
143301 +               }
143302 +       }
143304 +       spin_lock_irq(&lruvec->lru_lock);
143306 +       move_pages_to_lru(lruvec, &list);
143308 +       __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -isolated);
143310 +       item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
143311 +       if (!cgroup_reclaim(sc))
143312 +               __count_vm_events(item, reclaimed);
143313 +       __count_memcg_events(lruvec_memcg(lruvec), item, reclaimed);
143314 +       __count_vm_events(PGSTEAL_ANON + file, reclaimed);
143316 +       spin_unlock_irq(&lruvec->lru_lock);
143318 +       mem_cgroup_uncharge_list(&list);
143319 +       free_unref_page_list(&list);
143321 +       sc->nr_reclaimed += reclaimed;
143322 +done:
143323 +       return *nr_to_scan > 0 && sc->nr_reclaimed < sc->nr_to_reclaim;
143326 +/******************************************************************************
143327 + *                          page reclaim
143328 + ******************************************************************************/
143330 +static int get_swappiness(struct lruvec *lruvec)
143332 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
143333 +       int swappiness = mem_cgroup_get_nr_swap_pages(memcg) >= (long)SWAP_CLUSTER_MAX ?
143334 +                        mem_cgroup_swappiness(memcg) : 0;
143336 +       VM_BUG_ON(swappiness > 200U);
143338 +       return swappiness;
143341 +static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
143342 +                                   int swappiness)
143344 +       int gen, file, zone;
143345 +       long nr_to_scan = 0;
143346 +       struct lrugen *lrugen = &lruvec->evictable;
143347 +       DEFINE_MAX_SEQ();
143348 +       DEFINE_MIN_SEQ();
143350 +       lru_add_drain();
143352 +       for (file = !swappiness; file < ANON_AND_FILE; file++) {
143353 +               unsigned long seq;
143355 +               for (seq = min_seq[file]; seq <= max_seq; seq++) {
143356 +                       gen = lru_gen_from_seq(seq);
143358 +                       for (zone = 0; zone <= sc->reclaim_idx; zone++)
143359 +                               nr_to_scan += READ_ONCE(lrugen->sizes[gen][file][zone]);
143360 +               }
143361 +       }
143363 +       nr_to_scan = max(nr_to_scan, 0L);
143364 +       nr_to_scan = round_up(nr_to_scan >> sc->priority, SWAP_CLUSTER_MAX);
143366 +       if (max_nr_gens(max_seq, min_seq, swappiness) > MIN_NR_GENS)
143367 +               return nr_to_scan;
143369 +       /* kswapd uses age_lru_gens() */
143370 +       if (current_is_kswapd())
143371 +               return 0;
143373 +       return walk_mm_list(lruvec, max_seq, sc, swappiness, NULL) ? nr_to_scan : 0;
143376 +static void shrink_lru_gens(struct lruvec *lruvec, struct scan_control *sc)
143378 +       struct blk_plug plug;
143379 +       unsigned long scanned = 0;
143380 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
143382 +       blk_start_plug(&plug);
143384 +       while (true) {
143385 +               long nr_to_scan;
143386 +               int swappiness = sc->may_swap ? get_swappiness(lruvec) : 0;
143388 +               nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness) - scanned;
143389 +               if (nr_to_scan < (long)SWAP_CLUSTER_MAX)
143390 +                       break;
143392 +               scanned += nr_to_scan;
143394 +               if (!evict_lru_gen_pages(lruvec, sc, swappiness, &nr_to_scan))
143395 +                       break;
143397 +               scanned -= nr_to_scan;
143399 +               if (mem_cgroup_below_min(memcg) ||
143400 +                   (mem_cgroup_below_low(memcg) && !sc->memcg_low_reclaim))
143401 +                       break;
143403 +               cond_resched();
143404 +       }
143406 +       blk_finish_plug(&plug);
143409 +/******************************************************************************
143410 + *                          the background aging
143411 + ******************************************************************************/
143413 +static int lru_gen_spread = MIN_NR_GENS;
143415 +static void try_walk_mm_list(struct lruvec *lruvec, struct scan_control *sc)
143417 +       int gen, file, zone;
143418 +       long old_and_young[2] = {};
143419 +       struct mm_walk_args args = {};
143420 +       int spread = READ_ONCE(lru_gen_spread);
143421 +       int swappiness = get_swappiness(lruvec);
143422 +       struct lrugen *lrugen = &lruvec->evictable;
143423 +       DEFINE_MAX_SEQ();
143424 +       DEFINE_MIN_SEQ();
143426 +       lru_add_drain();
143428 +       for (file = !swappiness; file < ANON_AND_FILE; file++) {
143429 +               unsigned long seq;
143431 +               for (seq = min_seq[file]; seq <= max_seq; seq++) {
143432 +                       gen = lru_gen_from_seq(seq);
143434 +                       for (zone = 0; zone < MAX_NR_ZONES; zone++)
143435 +                               old_and_young[seq == max_seq] +=
143436 +                                       READ_ONCE(lrugen->sizes[gen][file][zone]);
143437 +               }
143438 +       }
143440 +       old_and_young[0] = max(old_and_young[0], 0L);
143441 +       old_and_young[1] = max(old_and_young[1], 0L);
143443 +       if (old_and_young[0] + old_and_young[1] < SWAP_CLUSTER_MAX)
143444 +               return;
143446 +       /* try to spread pages out across spread+1 generations */
143447 +       if (old_and_young[0] >= old_and_young[1] * spread &&
143448 +           min_nr_gens(max_seq, min_seq, swappiness) > max(spread, MIN_NR_GENS))
143449 +               return;
143451 +       walk_mm_list(lruvec, max_seq, sc, swappiness, &args);
143454 +static void age_lru_gens(struct pglist_data *pgdat, struct scan_control *sc)
143456 +       struct mem_cgroup *memcg;
143458 +       VM_BUG_ON(!current_is_kswapd());
143460 +       memcg = mem_cgroup_iter(NULL, NULL, NULL);
143461 +       do {
143462 +               struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
143463 +               struct lrugen *lrugen = &lruvec->evictable;
143465 +               if (!mem_cgroup_below_min(memcg) &&
143466 +                   (!mem_cgroup_below_low(memcg) || sc->memcg_low_reclaim))
143467 +                       try_walk_mm_list(lruvec, sc);
143469 +               if (!mem_cgroup_disabled())
143470 +                       atomic_add_unless(&lrugen->priority, 1, DEF_PRIORITY);
143472 +               cond_resched();
143473 +       } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
143476 +/******************************************************************************
143477 + *                          state change
143478 + ******************************************************************************/
143480 +#ifdef CONFIG_LRU_GEN_ENABLED
143481 +DEFINE_STATIC_KEY_TRUE(lru_gen_static_key);
143482 +#else
143483 +DEFINE_STATIC_KEY_FALSE(lru_gen_static_key);
143484 +#endif
143486 +static DEFINE_MUTEX(lru_gen_state_mutex);
143487 +static int lru_gen_nr_swapfiles __read_mostly;
143489 +static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
143491 +       int gen, file, zone;
143492 +       enum lru_list lru;
143493 +       struct lrugen *lrugen = &lruvec->evictable;
143495 +       for_each_evictable_lru(lru) {
143496 +               file = is_file_lru(lru);
143498 +               if (lrugen->enabled[file] && !list_empty(&lruvec->lists[lru]))
143499 +                       return false;
143500 +       }
143502 +       for_each_gen_type_zone(gen, file, zone) {
143503 +               if (!lrugen->enabled[file] && !list_empty(&lrugen->lists[gen][file][zone]))
143504 +                       return false;
143506 +               VM_WARN_ONCE(!lrugen->enabled[file] && lrugen->sizes[gen][file][zone],
143507 +                            "lru_gen: possible unbalanced number of pages");
143508 +       }
143510 +       return true;
143513 +static bool fill_lru_gen_lists(struct lruvec *lruvec)
143515 +       enum lru_list lru;
143516 +       int batch_size = 0;
143518 +       for_each_evictable_lru(lru) {
143519 +               int file = is_file_lru(lru);
143520 +               bool active = is_active_lru(lru);
143521 +               struct list_head *head = &lruvec->lists[lru];
143523 +               if (!lruvec->evictable.enabled[file])
143524 +                       continue;
143526 +               while (!list_empty(head)) {
143527 +                       bool success;
143528 +                       struct page *page = lru_to_page(head);
143530 +                       VM_BUG_ON_PAGE(PageTail(page), page);
143531 +                       VM_BUG_ON_PAGE(PageUnevictable(page), page);
143532 +                       VM_BUG_ON_PAGE(PageActive(page) != active, page);
143533 +                       VM_BUG_ON_PAGE(page_lru_gen(page) != -1, page);
143534 +                       VM_BUG_ON_PAGE(page_is_file_lru(page) != file, page);
143536 +                       prefetchw_prev_lru_page(page, head, flags);
143538 +                       del_page_from_lru_list(page, lruvec);
143539 +                       success = lru_gen_addition(page, lruvec, true);
143540 +                       VM_BUG_ON(!success);
143542 +                       if (++batch_size == MAX_BATCH_SIZE)
143543 +                               return false;
143544 +               }
143545 +       }
143547 +       return true;
143550 +static bool drain_lru_gen_lists(struct lruvec *lruvec)
143552 +       int gen, file, zone;
143553 +       int batch_size = 0;
143555 +       for_each_gen_type_zone(gen, file, zone) {
143556 +               struct list_head *head = &lruvec->evictable.lists[gen][file][zone];
143558 +               if (lruvec->evictable.enabled[file])
143559 +                       continue;
143561 +               while (!list_empty(head)) {
143562 +                       bool success;
143563 +                       struct page *page = lru_to_page(head);
143565 +                       VM_BUG_ON_PAGE(PageTail(page), page);
143566 +                       VM_BUG_ON_PAGE(PageUnevictable(page), page);
143567 +                       VM_BUG_ON_PAGE(PageActive(page), page);
143568 +                       VM_BUG_ON_PAGE(page_is_file_lru(page) != file, page);
143569 +                       VM_BUG_ON_PAGE(page_zonenum(page) != zone, page);
143571 +                       prefetchw_prev_lru_page(page, head, flags);
143573 +                       success = lru_gen_deletion(page, lruvec);
143574 +                       VM_BUG_ON(!success);
143575 +                       add_page_to_lru_list(page, lruvec);
143577 +                       if (++batch_size == MAX_BATCH_SIZE)
143578 +                               return false;
143579 +               }
143580 +       }
143582 +       return true;
143586 + * For file page tracking, we enable/disable it according to the main switch.
143587 + * For anon page tracking, we only enabled it when the main switch is on and
143588 + * there is at least one swapfile; we disable it when there are no swapfiles
143589 + * regardless of the value of the main switch. Otherwise, we will eventually
143590 + * reach the max size of the sliding window and have to call inc_min_seq(),
143591 + * which brings an unnecessary overhead.
143592 + */
143593 +void lru_gen_set_state(bool enable, bool main, bool swap)
143595 +       struct mem_cgroup *memcg;
143597 +       mem_hotplug_begin();
143598 +       mutex_lock(&lru_gen_state_mutex);
143599 +       cgroup_lock();
143601 +       main = main && enable != lru_gen_enabled();
143602 +       swap = swap && !(enable ? lru_gen_nr_swapfiles++ : --lru_gen_nr_swapfiles);
143603 +       swap = swap && lru_gen_enabled();
143604 +       if (!main && !swap)
143605 +               goto unlock;
143607 +       if (main) {
143608 +               if (enable)
143609 +                       static_branch_enable(&lru_gen_static_key);
143610 +               else
143611 +                       static_branch_disable(&lru_gen_static_key);
143612 +       }
143614 +       memcg = mem_cgroup_iter(NULL, NULL, NULL);
143615 +       do {
143616 +               int nid;
143618 +               for_each_node_state(nid, N_MEMORY) {
143619 +                       struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
143620 +                       struct lrugen *lrugen = &lruvec->evictable;
143622 +                       spin_lock_irq(&lruvec->lru_lock);
143624 +                       VM_BUG_ON(!seq_is_valid(lruvec));
143625 +                       VM_BUG_ON(!state_is_valid(lruvec));
143627 +                       WRITE_ONCE(lrugen->enabled[0], lru_gen_enabled() && lru_gen_nr_swapfiles);
143628 +                       WRITE_ONCE(lrugen->enabled[1], lru_gen_enabled());
143630 +                       while (!(enable ? fill_lru_gen_lists(lruvec) :
143631 +                                         drain_lru_gen_lists(lruvec))) {
143632 +                               spin_unlock_irq(&lruvec->lru_lock);
143633 +                               cond_resched();
143634 +                               spin_lock_irq(&lruvec->lru_lock);
143635 +                       }
143637 +                       spin_unlock_irq(&lruvec->lru_lock);
143638 +               }
143640 +               cond_resched();
143641 +       } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
143642 +unlock:
143643 +       cgroup_unlock();
143644 +       mutex_unlock(&lru_gen_state_mutex);
143645 +       mem_hotplug_done();
143648 +static int __meminit __maybe_unused lru_gen_online_mem(struct notifier_block *self,
143649 +                                                      unsigned long action, void *arg)
143651 +       struct mem_cgroup *memcg;
143652 +       struct memory_notify *mnb = arg;
143653 +       int nid = mnb->status_change_nid;
143655 +       if (action != MEM_GOING_ONLINE || nid == NUMA_NO_NODE)
143656 +               return NOTIFY_DONE;
143658 +       mutex_lock(&lru_gen_state_mutex);
143659 +       cgroup_lock();
143661 +       memcg = mem_cgroup_iter(NULL, NULL, NULL);
143662 +       do {
143663 +               struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
143664 +               struct lrugen *lrugen = &lruvec->evictable;
143666 +               VM_BUG_ON(!seq_is_valid(lruvec));
143667 +               VM_BUG_ON(!state_is_valid(lruvec));
143669 +               WRITE_ONCE(lrugen->enabled[0], lru_gen_enabled() && lru_gen_nr_swapfiles);
143670 +               WRITE_ONCE(lrugen->enabled[1], lru_gen_enabled());
143671 +       } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
143673 +       cgroup_unlock();
143674 +       mutex_unlock(&lru_gen_state_mutex);
143676 +       return NOTIFY_DONE;
143679 +/******************************************************************************
143680 + *                          sysfs interface
143681 + ******************************************************************************/
143683 +static ssize_t show_lru_gen_spread(struct kobject *kobj, struct kobj_attribute *attr,
143684 +                                  char *buf)
143686 +       return sprintf(buf, "%d\n", READ_ONCE(lru_gen_spread));
143689 +static ssize_t store_lru_gen_spread(struct kobject *kobj, struct kobj_attribute *attr,
143690 +                                   const char *buf, size_t len)
143692 +       int spread;
143694 +       if (kstrtoint(buf, 10, &spread) || spread >= MAX_NR_GENS)
143695 +               return -EINVAL;
143697 +       WRITE_ONCE(lru_gen_spread, spread);
143699 +       return len;
143702 +static struct kobj_attribute lru_gen_spread_attr = __ATTR(
143703 +       spread, 0644, show_lru_gen_spread, store_lru_gen_spread
143706 +static ssize_t show_lru_gen_enabled(struct kobject *kobj, struct kobj_attribute *attr,
143707 +                                   char *buf)
143709 +       return snprintf(buf, PAGE_SIZE, "%ld\n", lru_gen_enabled());
143712 +static ssize_t store_lru_gen_enabled(struct kobject *kobj, struct kobj_attribute *attr,
143713 +                                    const char *buf, size_t len)
143715 +       int enable;
143717 +       if (kstrtoint(buf, 10, &enable))
143718 +               return -EINVAL;
143720 +       lru_gen_set_state(enable, true, false);
143722 +       return len;
143725 +static struct kobj_attribute lru_gen_enabled_attr = __ATTR(
143726 +       enabled, 0644, show_lru_gen_enabled, store_lru_gen_enabled
143729 +static struct attribute *lru_gen_attrs[] = {
143730 +       &lru_gen_spread_attr.attr,
143731 +       &lru_gen_enabled_attr.attr,
143732 +       NULL
143735 +static struct attribute_group lru_gen_attr_group = {
143736 +       .name = "lru_gen",
143737 +       .attrs = lru_gen_attrs,
143740 +/******************************************************************************
143741 + *                          debugfs interface
143742 + ******************************************************************************/
143744 +static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos)
143746 +       struct mem_cgroup *memcg;
143747 +       loff_t nr_to_skip = *pos;
143749 +       m->private = kzalloc(PATH_MAX, GFP_KERNEL);
143750 +       if (!m->private)
143751 +               return ERR_PTR(-ENOMEM);
143753 +       memcg = mem_cgroup_iter(NULL, NULL, NULL);
143754 +       do {
143755 +               int nid;
143757 +               for_each_node_state(nid, N_MEMORY) {
143758 +                       if (!nr_to_skip--)
143759 +                               return mem_cgroup_lruvec(memcg, NODE_DATA(nid));
143760 +               }
143761 +       } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
143763 +       return NULL;
143766 +static void lru_gen_seq_stop(struct seq_file *m, void *v)
143768 +       if (!IS_ERR_OR_NULL(v))
143769 +               mem_cgroup_iter_break(NULL, lruvec_memcg(v));
143771 +       kfree(m->private);
143772 +       m->private = NULL;
143775 +static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos)
143777 +       int nid = lruvec_pgdat(v)->node_id;
143778 +       struct mem_cgroup *memcg = lruvec_memcg(v);
143780 +       ++*pos;
143782 +       nid = next_memory_node(nid);
143783 +       if (nid == MAX_NUMNODES) {
143784 +               memcg = mem_cgroup_iter(NULL, memcg, NULL);
143785 +               if (!memcg)
143786 +                       return NULL;
143788 +               nid = first_memory_node;
143789 +       }
143791 +       return mem_cgroup_lruvec(memcg, NODE_DATA(nid));
143794 +static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
143795 +                                 unsigned long max_seq, unsigned long *min_seq,
143796 +                                 unsigned long seq)
143798 +       int i;
143799 +       int file, tier;
143800 +       int sid = sid_from_seq_or_gen(seq);
143801 +       struct lrugen *lrugen = &lruvec->evictable;
143802 +       int nid = lruvec_pgdat(lruvec)->node_id;
143803 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
143804 +       struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
143806 +       for (tier = 0; tier < MAX_NR_TIERS; tier++) {
143807 +               seq_printf(m, "            %10d", tier);
143808 +               for (file = 0; file < ANON_AND_FILE; file++) {
143809 +                       unsigned long n[3] = {};
143811 +                       if (seq == max_seq) {
143812 +                               n[0] = READ_ONCE(lrugen->avg_refaulted[file][tier]);
143813 +                               n[1] = READ_ONCE(lrugen->avg_total[file][tier]);
143815 +                               seq_printf(m, " %10luR %10luT %10lu ", n[0], n[1], n[2]);
143816 +                       } else if (seq == min_seq[file] || NR_STAT_GENS > 1) {
143817 +                               n[0] = atomic_long_read(&lrugen->refaulted[sid][file][tier]);
143818 +                               n[1] = atomic_long_read(&lrugen->evicted[sid][file][tier]);
143819 +                               if (tier)
143820 +                                       n[2] = READ_ONCE(lrugen->activated[sid][file][tier - 1]);
143822 +                               seq_printf(m, " %10lur %10lue %10lua", n[0], n[1], n[2]);
143823 +                       } else
143824 +                               seq_puts(m, "          0           0           0 ");
143825 +               }
143826 +               seq_putc(m, '\n');
143827 +       }
143829 +       seq_puts(m, "                      ");
143830 +       for (i = 0; i < NR_MM_STATS; i++) {
143831 +               if (seq == max_seq && NR_STAT_GENS == 1)
143832 +                       seq_printf(m, " %10lu%c", READ_ONCE(mm_list->nodes[nid].stats[sid][i]),
143833 +                                  toupper(MM_STAT_CODES[i]));
143834 +               else if (seq != max_seq && NR_STAT_GENS > 1)
143835 +                       seq_printf(m, " %10lu%c", READ_ONCE(mm_list->nodes[nid].stats[sid][i]),
143836 +                                  MM_STAT_CODES[i]);
143837 +               else
143838 +                       seq_puts(m, "          0 ");
143839 +       }
143840 +       seq_putc(m, '\n');
143843 +static int lru_gen_seq_show(struct seq_file *m, void *v)
143845 +       unsigned long seq;
143846 +       bool full = !debugfs_real_fops(m->file)->write;
143847 +       struct lruvec *lruvec = v;
143848 +       struct lrugen *lrugen = &lruvec->evictable;
143849 +       int nid = lruvec_pgdat(lruvec)->node_id;
143850 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
143851 +       DEFINE_MAX_SEQ();
143852 +       DEFINE_MIN_SEQ();
143854 +       if (nid == first_memory_node) {
143855 +#ifdef CONFIG_MEMCG
143856 +               if (memcg)
143857 +                       cgroup_path(memcg->css.cgroup, m->private, PATH_MAX);
143858 +#endif
143859 +               seq_printf(m, "memcg %5hu %s\n",
143860 +                          mem_cgroup_id(memcg), (char *)m->private);
143861 +       }
143863 +       seq_printf(m, " node %5d %10d\n", nid, atomic_read(&lrugen->priority));
143865 +       seq = full ? (max_seq < MAX_NR_GENS ? 0 : max_seq - MAX_NR_GENS + 1) :
143866 +                    min(min_seq[0], min_seq[1]);
143868 +       for (; seq <= max_seq; seq++) {
143869 +               int gen, file, zone;
143870 +               unsigned int msecs;
143872 +               gen = lru_gen_from_seq(seq);
143873 +               msecs = jiffies_to_msecs(jiffies - READ_ONCE(lrugen->timestamps[gen]));
143875 +               seq_printf(m, " %10lu %10u", seq, msecs);
143877 +               for (file = 0; file < ANON_AND_FILE; file++) {
143878 +                       long size = 0;
143880 +                       if (seq < min_seq[file]) {
143881 +                               seq_puts(m, "         -0 ");
143882 +                               continue;
143883 +                       }
143885 +                       for (zone = 0; zone < MAX_NR_ZONES; zone++)
143886 +                               size += READ_ONCE(lrugen->sizes[gen][file][zone]);
143888 +                       seq_printf(m, " %10lu ", max(size, 0L));
143889 +               }
143891 +               seq_putc(m, '\n');
143893 +               if (full)
143894 +                       lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq);
143895 +       }
143897 +       return 0;
143900 +static const struct seq_operations lru_gen_seq_ops = {
143901 +       .start = lru_gen_seq_start,
143902 +       .stop = lru_gen_seq_stop,
143903 +       .next = lru_gen_seq_next,
143904 +       .show = lru_gen_seq_show,
143907 +static int advance_max_seq(struct lruvec *lruvec, unsigned long seq, int swappiness)
143909 +       struct mm_walk_args args = {};
143910 +       struct scan_control sc = {
143911 +               .target_mem_cgroup = lruvec_memcg(lruvec),
143912 +       };
143913 +       DEFINE_MAX_SEQ();
143915 +       if (seq == max_seq)
143916 +               walk_mm_list(lruvec, max_seq, &sc, swappiness, &args);
143918 +       return seq > max_seq ? -EINVAL : 0;
143921 +static int advance_min_seq(struct lruvec *lruvec, unsigned long seq, int swappiness,
143922 +                          unsigned long nr_to_reclaim)
143924 +       struct blk_plug plug;
143925 +       int err = -EINTR;
143926 +       long nr_to_scan = LONG_MAX;
143927 +       struct scan_control sc = {
143928 +               .nr_to_reclaim = nr_to_reclaim,
143929 +               .target_mem_cgroup = lruvec_memcg(lruvec),
143930 +               .may_writepage = 1,
143931 +               .may_unmap = 1,
143932 +               .may_swap = 1,
143933 +               .reclaim_idx = MAX_NR_ZONES - 1,
143934 +               .gfp_mask = GFP_KERNEL,
143935 +       };
143936 +       DEFINE_MAX_SEQ();
143938 +       if (seq >= max_seq - 1)
143939 +               return -EINVAL;
143941 +       blk_start_plug(&plug);
143943 +       while (!signal_pending(current)) {
143944 +               DEFINE_MIN_SEQ();
143946 +               if (seq < min(min_seq[!swappiness], min_seq[swappiness < 200]) ||
143947 +                   !evict_lru_gen_pages(lruvec, &sc, swappiness, &nr_to_scan)) {
143948 +                       err = 0;
143949 +                       break;
143950 +               }
143952 +               cond_resched();
143953 +       }
143955 +       blk_finish_plug(&plug);
143957 +       return err;
143960 +static int advance_seq(char cmd, int memcg_id, int nid, unsigned long seq,
143961 +                      int swappiness, unsigned long nr_to_reclaim)
143963 +       struct lruvec *lruvec;
143964 +       int err = -EINVAL;
143965 +       struct mem_cgroup *memcg = NULL;
143967 +       if (!mem_cgroup_disabled()) {
143968 +               rcu_read_lock();
143969 +               memcg = mem_cgroup_from_id(memcg_id);
143970 +#ifdef CONFIG_MEMCG
143971 +               if (memcg && !css_tryget(&memcg->css))
143972 +                       memcg = NULL;
143973 +#endif
143974 +               rcu_read_unlock();
143976 +               if (!memcg)
143977 +                       goto done;
143978 +       }
143979 +       if (memcg_id != mem_cgroup_id(memcg))
143980 +               goto done;
143982 +       if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY))
143983 +               goto done;
143985 +       lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
143987 +       if (swappiness == -1)
143988 +               swappiness = get_swappiness(lruvec);
143989 +       else if (swappiness > 200U)
143990 +               goto done;
143992 +       switch (cmd) {
143993 +       case '+':
143994 +               err = advance_max_seq(lruvec, seq, swappiness);
143995 +               break;
143996 +       case '-':
143997 +               err = advance_min_seq(lruvec, seq, swappiness, nr_to_reclaim);
143998 +               break;
143999 +       }
144000 +done:
144001 +       mem_cgroup_put(memcg);
144003 +       return err;
144006 +static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
144007 +                                size_t len, loff_t *pos)
144009 +       void *buf;
144010 +       char *cur, *next;
144011 +       int err = 0;
144013 +       buf = kvmalloc(len + 1, GFP_USER);
144014 +       if (!buf)
144015 +               return -ENOMEM;
144017 +       if (copy_from_user(buf, src, len)) {
144018 +               kvfree(buf);
144019 +               return -EFAULT;
144020 +       }
144022 +       next = buf;
144023 +       next[len] = '\0';
144025 +       while ((cur = strsep(&next, ",;\n"))) {
144026 +               int n;
144027 +               int end;
144028 +               char cmd;
144029 +               int memcg_id;
144030 +               int nid;
144031 +               unsigned long seq;
144032 +               int swappiness = -1;
144033 +               unsigned long nr_to_reclaim = -1;
144035 +               cur = skip_spaces(cur);
144036 +               if (!*cur)
144037 +                       continue;
144039 +               n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid,
144040 +                          &seq, &end, &swappiness, &end, &nr_to_reclaim, &end);
144041 +               if (n < 4 || cur[end]) {
144042 +                       err = -EINVAL;
144043 +                       break;
144044 +               }
144046 +               err = advance_seq(cmd, memcg_id, nid, seq, swappiness, nr_to_reclaim);
144047 +               if (err)
144048 +                       break;
144049 +       }
144051 +       kvfree(buf);
144053 +       return err ? : len;
144056 +static int lru_gen_seq_open(struct inode *inode, struct file *file)
144058 +       return seq_open(file, &lru_gen_seq_ops);
144061 +static const struct file_operations lru_gen_rw_fops = {
144062 +       .open = lru_gen_seq_open,
144063 +       .read = seq_read,
144064 +       .write = lru_gen_seq_write,
144065 +       .llseek = seq_lseek,
144066 +       .release = seq_release,
144069 +static const struct file_operations lru_gen_ro_fops = {
144070 +       .open = lru_gen_seq_open,
144071 +       .read = seq_read,
144072 +       .llseek = seq_lseek,
144073 +       .release = seq_release,
144076 +/******************************************************************************
144077 + *                          initialization
144078 + ******************************************************************************/
144080 +void lru_gen_init_lruvec(struct lruvec *lruvec)
144082 +       int i;
144083 +       int gen, file, zone;
144084 +       struct lrugen *lrugen = &lruvec->evictable;
144086 +       atomic_set(&lrugen->priority, DEF_PRIORITY);
144088 +       lrugen->max_seq = MIN_NR_GENS + 1;
144089 +       lrugen->enabled[0] = lru_gen_enabled() && lru_gen_nr_swapfiles;
144090 +       lrugen->enabled[1] = lru_gen_enabled();
144092 +       for (i = 0; i <= MIN_NR_GENS + 1; i++)
144093 +               lrugen->timestamps[i] = jiffies;
144095 +       for_each_gen_type_zone(gen, file, zone)
144096 +               INIT_LIST_HEAD(&lrugen->lists[gen][file][zone]);
144099 +static int __init init_lru_gen(void)
144101 +       BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
144102 +       BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
144103 +       BUILD_BUG_ON(sizeof(MM_STAT_CODES) != NR_MM_STATS + 1);
144104 +       BUILD_BUG_ON(PMD_SIZE / PAGE_SIZE != PTRS_PER_PTE);
144105 +       BUILD_BUG_ON(PUD_SIZE / PMD_SIZE != PTRS_PER_PMD);
144106 +       BUILD_BUG_ON(P4D_SIZE / PUD_SIZE != PTRS_PER_PUD);
144108 +       if (mem_cgroup_disabled()) {
144109 +               global_mm_list = alloc_mm_list();
144110 +               if (!global_mm_list) {
144111 +                       pr_err("lru_gen: failed to allocate global mm_struct list\n");
144112 +                       return -ENOMEM;
144113 +               }
144114 +       }
144116 +       if (hotplug_memory_notifier(lru_gen_online_mem, 0))
144117 +               pr_err("lru_gen: failed to subscribe hotplug notifications\n");
144119 +       if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
144120 +               pr_err("lru_gen: failed to create sysfs group\n");
144122 +       debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops);
144123 +       debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops);
144125 +       return 0;
144128 + * We want to run as early as possible because some debug code, e.g.,
144129 + * dma_resv_lockdep(), calls mm_alloc() and mmput(). We only depend on mm_kobj,
144130 + * which is initialized one stage earlier.
144131 + */
144132 +arch_initcall(init_lru_gen);
144134 +#endif /* CONFIG_LRU_GEN */
144135 diff --git a/mm/workingset.c b/mm/workingset.c
144136 index cd39902c1062..df363f9419fc 100644
144137 --- a/mm/workingset.c
144138 +++ b/mm/workingset.c
144139 @@ -168,9 +168,9 @@
144140   * refault distance will immediately activate the refaulting page.
144141   */
144143 -#define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) +  \
144144 -                        1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
144145 -#define EVICTION_MASK  (~0UL >> EVICTION_SHIFT)
144146 +#define EVICTION_SHIFT         (BITS_PER_XA_VALUE - MEM_CGROUP_ID_SHIFT - NODES_SHIFT)
144147 +#define EVICTION_MASK          (BIT(EVICTION_SHIFT) - 1)
144148 +#define WORKINGSET_WIDTH       1
144151   * Eviction timestamps need to be able to cover the full range of
144152 @@ -182,38 +182,139 @@
144153   */
144154  static unsigned int bucket_order __read_mostly;
144156 -static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
144157 -                        bool workingset)
144158 +static void *pack_shadow(int memcg_id, struct pglist_data *pgdat, unsigned long val)
144160 -       eviction >>= bucket_order;
144161 -       eviction &= EVICTION_MASK;
144162 -       eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
144163 -       eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
144164 -       eviction = (eviction << 1) | workingset;
144165 +       val = (val << MEM_CGROUP_ID_SHIFT) | memcg_id;
144166 +       val = (val << NODES_SHIFT) | pgdat->node_id;
144168 -       return xa_mk_value(eviction);
144169 +       return xa_mk_value(val);
144172 -static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
144173 -                         unsigned long *evictionp, bool *workingsetp)
144174 +static unsigned long unpack_shadow(void *shadow, int *memcg_id, struct pglist_data **pgdat)
144176 -       unsigned long entry = xa_to_value(shadow);
144177 -       int memcgid, nid;
144178 -       bool workingset;
144179 +       unsigned long val = xa_to_value(shadow);
144181 +       *pgdat = NODE_DATA(val & (BIT(NODES_SHIFT) - 1));
144182 +       val >>= NODES_SHIFT;
144183 +       *memcg_id = val & (BIT(MEM_CGROUP_ID_SHIFT) - 1);
144185 +       return val >> MEM_CGROUP_ID_SHIFT;
144188 +#ifdef CONFIG_LRU_GEN
144190 +#if LRU_GEN_SHIFT + LRU_USAGE_SHIFT >= EVICTION_SHIFT
144191 +#error "Please try smaller NODES_SHIFT, NR_LRU_GENS and TIERS_PER_GEN configurations"
144192 +#endif
144194 +static void page_set_usage(struct page *page, int usage)
144196 +       unsigned long old_flags, new_flags;
144198 +       VM_BUG_ON(usage > BIT(LRU_USAGE_WIDTH));
144200 +       if (!usage)
144201 +               return;
144203 +       do {
144204 +               old_flags = READ_ONCE(page->flags);
144205 +               new_flags = (old_flags & ~LRU_USAGE_MASK) | LRU_TIER_FLAGS |
144206 +                           ((usage - 1UL) << LRU_USAGE_PGOFF);
144207 +               if (old_flags == new_flags)
144208 +                       break;
144209 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
144212 +/* Return a token to be stored in the shadow entry of a page being evicted. */
144213 +static void *lru_gen_eviction(struct page *page)
144215 +       int sid, tier;
144216 +       unsigned long token;
144217 +       unsigned long min_seq;
144218 +       struct lruvec *lruvec;
144219 +       struct lrugen *lrugen;
144220 +       int file = page_is_file_lru(page);
144221 +       int usage = page_tier_usage(page);
144222 +       struct mem_cgroup *memcg = page_memcg(page);
144223 +       struct pglist_data *pgdat = page_pgdat(page);
144225 +       if (!lru_gen_enabled())
144226 +               return NULL;
144228 +       lruvec = mem_cgroup_lruvec(memcg, pgdat);
144229 +       lrugen = &lruvec->evictable;
144230 +       min_seq = READ_ONCE(lrugen->min_seq[file]);
144231 +       token = (min_seq << LRU_USAGE_SHIFT) | usage;
144233 +       sid = sid_from_seq_or_gen(min_seq);
144234 +       tier = lru_tier_from_usage(usage);
144235 +       atomic_long_add(thp_nr_pages(page), &lrugen->evicted[sid][file][tier]);
144237 +       return pack_shadow(mem_cgroup_id(memcg), pgdat, token);
144240 +/* Account a refaulted page based on the token stored in its shadow entry. */
144241 +static bool lru_gen_refault(struct page *page, void *shadow)
144243 +       int sid, tier, usage;
144244 +       int memcg_id;
144245 +       unsigned long token;
144246 +       unsigned long min_seq;
144247 +       struct lruvec *lruvec;
144248 +       struct lrugen *lrugen;
144249 +       struct pglist_data *pgdat;
144250 +       struct mem_cgroup *memcg;
144251 +       int file = page_is_file_lru(page);
144253 +       if (!lru_gen_enabled())
144254 +               return false;
144256 +       token = unpack_shadow(shadow, &memcg_id, &pgdat);
144257 +       if (page_pgdat(page) != pgdat)
144258 +               return true;
144260 +       rcu_read_lock();
144261 +       memcg = page_memcg_rcu(page);
144262 +       if (mem_cgroup_id(memcg) != memcg_id)
144263 +               goto unlock;
144265 +       usage = token & (BIT(LRU_USAGE_SHIFT) - 1);
144266 +       token >>= LRU_USAGE_SHIFT;
144268 +       lruvec = mem_cgroup_lruvec(memcg, pgdat);
144269 +       lrugen = &lruvec->evictable;
144270 +       min_seq = READ_ONCE(lrugen->min_seq[file]);
144271 +       if (token != (min_seq & (EVICTION_MASK >> LRU_USAGE_SHIFT)))
144272 +               goto unlock;
144274 -       workingset = entry & 1;
144275 -       entry >>= 1;
144276 -       nid = entry & ((1UL << NODES_SHIFT) - 1);
144277 -       entry >>= NODES_SHIFT;
144278 -       memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
144279 -       entry >>= MEM_CGROUP_ID_SHIFT;
144281 -       *memcgidp = memcgid;
144282 -       *pgdat = NODE_DATA(nid);
144283 -       *evictionp = entry << bucket_order;
144284 -       *workingsetp = workingset;
144285 +       page_set_usage(page, usage);
144287 +       sid = sid_from_seq_or_gen(min_seq);
144288 +       tier = lru_tier_from_usage(usage);
144289 +       atomic_long_add(thp_nr_pages(page), &lrugen->refaulted[sid][file][tier]);
144290 +       inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
144291 +       if (tier)
144292 +               inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file);
144293 +unlock:
144294 +       rcu_read_unlock();
144296 +       return true;
144299 +#else /* CONFIG_LRU_GEN */
144301 +static void *lru_gen_eviction(struct page *page)
144303 +       return NULL;
144306 +static bool lru_gen_refault(struct page *page, void *shadow)
144308 +       return false;
144311 +#endif /* CONFIG_LRU_GEN */
144313  /**
144314   * workingset_age_nonresident - age non-resident entries as LRU ages
144315   * @lruvec: the lruvec that was aged
144316 @@ -256,18 +357,25 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
144317         unsigned long eviction;
144318         struct lruvec *lruvec;
144319         int memcgid;
144320 +       void *shadow;
144322         /* Page is fully exclusive and pins page's memory cgroup pointer */
144323         VM_BUG_ON_PAGE(PageLRU(page), page);
144324         VM_BUG_ON_PAGE(page_count(page), page);
144325         VM_BUG_ON_PAGE(!PageLocked(page), page);
144327 +       shadow = lru_gen_eviction(page);
144328 +       if (shadow)
144329 +               return shadow;
144331         lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
144332         /* XXX: target_memcg can be NULL, go through lruvec */
144333         memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
144334         eviction = atomic_long_read(&lruvec->nonresident_age);
144335 +       eviction >>= bucket_order;
144336 +       eviction = (eviction << WORKINGSET_WIDTH) | PageWorkingset(page);
144337         workingset_age_nonresident(lruvec, thp_nr_pages(page));
144338 -       return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
144339 +       return pack_shadow(memcgid, pgdat, eviction);
144342  /**
144343 @@ -294,7 +402,10 @@ void workingset_refault(struct page *page, void *shadow)
144344         bool workingset;
144345         int memcgid;
144347 -       unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
144348 +       if (lru_gen_refault(page, shadow))
144349 +               return;
144351 +       eviction = unpack_shadow(shadow, &memcgid, &pgdat);
144353         rcu_read_lock();
144354         /*
144355 @@ -318,6 +429,8 @@ void workingset_refault(struct page *page, void *shadow)
144356                 goto out;
144357         eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
144358         refault = atomic_long_read(&eviction_lruvec->nonresident_age);
144359 +       workingset = eviction & (BIT(WORKINGSET_WIDTH) - 1);
144360 +       eviction = (eviction >> WORKINGSET_WIDTH) << bucket_order;
144362         /*
144363          * Calculate the refault distance
144364 @@ -335,7 +448,7 @@ void workingset_refault(struct page *page, void *shadow)
144365          * longest time, so the occasional inappropriate activation
144366          * leading to pressure on the active list is not a problem.
144367          */
144368 -       refault_distance = (refault - eviction) & EVICTION_MASK;
144369 +       refault_distance = (refault - eviction) & (EVICTION_MASK >> WORKINGSET_WIDTH);
144371         /*
144372          * The activation decision for this page is made at the level
144373 @@ -594,7 +707,7 @@ static int __init workingset_init(void)
144374         unsigned int max_order;
144375         int ret;
144377 -       BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
144378 +       BUILD_BUG_ON(EVICTION_SHIFT < WORKINGSET_WIDTH);
144379         /*
144380          * Calculate the eviction bucket size to cover the longest
144381          * actionable refault distance, which is currently half of
144382 @@ -602,7 +715,7 @@ static int __init workingset_init(void)
144383          * some more pages at runtime, so keep working with up to
144384          * double the initial memory by using totalram_pages as-is.
144385          */
144386 -       timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
144387 +       timestamp_bits = EVICTION_SHIFT - WORKINGSET_WIDTH;
144388         max_order = fls_long(totalram_pages() - 1);
144389         if (max_order > timestamp_bits)
144390                 bucket_order = max_order - timestamp_bits;
144391 diff --git a/net/bluetooth/ecdh_helper.h b/net/bluetooth/ecdh_helper.h
144392 index a6f8d03d4aaf..830723971cf8 100644
144393 --- a/net/bluetooth/ecdh_helper.h
144394 +++ b/net/bluetooth/ecdh_helper.h
144395 @@ -25,6 +25,6 @@
144397  int compute_ecdh_secret(struct crypto_kpp *tfm, const u8 pair_public_key[64],
144398                         u8 secret[32]);
144399 -int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 *private_key);
144400 +int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 private_key[32]);
144401  int generate_ecdh_public_key(struct crypto_kpp *tfm, u8 public_key[64]);
144402  int generate_ecdh_keys(struct crypto_kpp *tfm, u8 public_key[64]);
144403 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
144404 index 6ffa89e3ba0a..f72646690539 100644
144405 --- a/net/bluetooth/hci_conn.c
144406 +++ b/net/bluetooth/hci_conn.c
144407 @@ -1830,8 +1830,6 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
144409         u32 phys = 0;
144411 -       hci_dev_lock(conn->hdev);
144413         /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
144414          * Table 6.2: Packets defined for synchronous, asynchronous, and
144415          * CSB logical transport types.
144416 @@ -1928,7 +1926,5 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
144417                 break;
144418         }
144420 -       hci_dev_unlock(conn->hdev);
144422         return phys;
144424 diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
144425 index 67668be3461e..82f4973a011d 100644
144426 --- a/net/bluetooth/hci_event.c
144427 +++ b/net/bluetooth/hci_event.c
144428 @@ -5005,6 +5005,7 @@ static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
144429                 return;
144431         hchan->handle = le16_to_cpu(ev->handle);
144432 +       hchan->amp = true;
144434         BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
144436 @@ -5037,7 +5038,7 @@ static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
144437         hci_dev_lock(hdev);
144439         hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
144440 -       if (!hchan)
144441 +       if (!hchan || !hchan->amp)
144442                 goto unlock;
144444         amp_destroy_logical_link(hchan, ev->reason);
144445 @@ -5911,7 +5912,7 @@ static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
144447         BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
144449 -       if (!ev->status)
144450 +       if (ev->status)
144451                 return;
144453         hci_dev_lock(hdev);
144454 diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
144455 index e55976db4403..805ce546b813 100644
144456 --- a/net/bluetooth/hci_request.c
144457 +++ b/net/bluetooth/hci_request.c
144458 @@ -272,12 +272,16 @@ int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
144460         int ret;
144462 -       if (!test_bit(HCI_UP, &hdev->flags))
144463 -               return -ENETDOWN;
144465         /* Serialize all requests */
144466         hci_req_sync_lock(hdev);
144467 -       ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
144468 +       /* check the state after obtaing the lock to protect the HCI_UP
144469 +        * against any races from hci_dev_do_close when the controller
144470 +        * gets removed.
144471 +        */
144472 +       if (test_bit(HCI_UP, &hdev->flags))
144473 +               ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
144474 +       else
144475 +               ret = -ENETDOWN;
144476         hci_req_sync_unlock(hdev);
144478         return ret;
144479 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
144480 index 72c2f5226d67..53ddbee459b9 100644
144481 --- a/net/bluetooth/l2cap_core.c
144482 +++ b/net/bluetooth/l2cap_core.c
144483 @@ -451,6 +451,8 @@ struct l2cap_chan *l2cap_chan_create(void)
144484         if (!chan)
144485                 return NULL;
144487 +       skb_queue_head_init(&chan->tx_q);
144488 +       skb_queue_head_init(&chan->srej_q);
144489         mutex_init(&chan->lock);
144491         /* Set default lock nesting level */
144492 @@ -516,7 +518,9 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
144493         chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
144494         chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
144495         chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
144497         chan->conf_state = 0;
144498 +       set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
144500         set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
144502 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
144503 index f1b1edd0b697..c99d65ef13b1 100644
144504 --- a/net/bluetooth/l2cap_sock.c
144505 +++ b/net/bluetooth/l2cap_sock.c
144506 @@ -179,9 +179,17 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
144507         struct l2cap_chan *chan = l2cap_pi(sk)->chan;
144508         struct sockaddr_l2 la;
144509         int len, err = 0;
144510 +       bool zapped;
144512         BT_DBG("sk %p", sk);
144514 +       lock_sock(sk);
144515 +       zapped = sock_flag(sk, SOCK_ZAPPED);
144516 +       release_sock(sk);
144518 +       if (zapped)
144519 +               return -EINVAL;
144521         if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
144522             addr->sa_family != AF_BLUETOOTH)
144523                 return -EINVAL;
144524 diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
144525 index 74971b4bd457..939c6f77fecc 100644
144526 --- a/net/bluetooth/mgmt.c
144527 +++ b/net/bluetooth/mgmt.c
144528 @@ -7976,7 +7976,6 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
144529                 goto unlock;
144530         }
144532 -       hdev->cur_adv_instance = cp->instance;
144533         /* Submit request for advertising params if ext adv available */
144534         if (ext_adv_capable(hdev)) {
144535                 hci_req_init(&req, hdev);
144536 diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c
144537 index dfec65eca8a6..3db1def4437b 100644
144538 --- a/net/bridge/br_arp_nd_proxy.c
144539 +++ b/net/bridge/br_arp_nd_proxy.c
144540 @@ -160,7 +160,9 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
144541         if (br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
144542                 if (p && (p->flags & BR_NEIGH_SUPPRESS))
144543                         return;
144544 -               if (ipv4_is_zeronet(sip) || sip == tip) {
144545 +               if (parp->ar_op != htons(ARPOP_RREQUEST) &&
144546 +                   parp->ar_op != htons(ARPOP_RREPLY) &&
144547 +                   (ipv4_is_zeronet(sip) || sip == tip)) {
144548                         /* prevent flooding to neigh suppress ports */
144549                         BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
144550                         return;
144551 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
144552 index 9d265447d654..226bb05c3b42 100644
144553 --- a/net/bridge/br_multicast.c
144554 +++ b/net/bridge/br_multicast.c
144555 @@ -1593,7 +1593,8 @@ static void br_multicast_port_group_rexmit(struct timer_list *t)
144556         spin_unlock(&br->multicast_lock);
144559 -static void br_mc_disabled_update(struct net_device *dev, bool value)
144560 +static int br_mc_disabled_update(struct net_device *dev, bool value,
144561 +                                struct netlink_ext_ack *extack)
144563         struct switchdev_attr attr = {
144564                 .orig_dev = dev,
144565 @@ -1602,11 +1603,13 @@ static void br_mc_disabled_update(struct net_device *dev, bool value)
144566                 .u.mc_disabled = !value,
144567         };
144569 -       switchdev_port_attr_set(dev, &attr, NULL);
144570 +       return switchdev_port_attr_set(dev, &attr, extack);
144573  int br_multicast_add_port(struct net_bridge_port *port)
144575 +       int err;
144577         port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
144578         port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
144580 @@ -1618,8 +1621,12 @@ int br_multicast_add_port(struct net_bridge_port *port)
144581         timer_setup(&port->ip6_own_query.timer,
144582                     br_ip6_multicast_port_query_expired, 0);
144583  #endif
144584 -       br_mc_disabled_update(port->dev,
144585 -                             br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
144586 +       err = br_mc_disabled_update(port->dev,
144587 +                                   br_opt_get(port->br,
144588 +                                              BROPT_MULTICAST_ENABLED),
144589 +                                   NULL);
144590 +       if (err && err != -EOPNOTSUPP)
144591 +               return err;
144593         port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
144594         if (!port->mcast_stats)
144595 @@ -3152,25 +3159,14 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
144598  #if IS_ENABLED(CONFIG_IPV6)
144599 -static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
144600 -                                   struct net_bridge_port *port,
144601 -                                   struct sk_buff *skb)
144602 +static void br_ip6_multicast_mrd_rcv(struct net_bridge *br,
144603 +                                    struct net_bridge_port *port,
144604 +                                    struct sk_buff *skb)
144606 -       int ret;
144608 -       if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
144609 -               return -ENOMSG;
144611 -       ret = ipv6_mc_check_icmpv6(skb);
144612 -       if (ret < 0)
144613 -               return ret;
144615         if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
144616 -               return -ENOMSG;
144617 +               return;
144619         br_multicast_mark_router(br, port);
144621 -       return 0;
144624  static int br_multicast_ipv6_rcv(struct net_bridge *br,
144625 @@ -3184,18 +3180,12 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
144627         err = ipv6_mc_check_mld(skb);
144629 -       if (err == -ENOMSG) {
144630 +       if (err == -ENOMSG || err == -ENODATA) {
144631                 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
144632                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
144634 -               if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
144635 -                       err = br_ip6_multicast_mrd_rcv(br, port, skb);
144637 -                       if (err < 0 && err != -ENOMSG) {
144638 -                               br_multicast_err_count(br, port, skb->protocol);
144639 -                               return err;
144640 -                       }
144641 -               }
144642 +               if (err == -ENODATA &&
144643 +                   ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
144644 +                       br_ip6_multicast_mrd_rcv(br, port, skb);
144646                 return 0;
144647         } else if (err < 0) {
144648 @@ -3560,16 +3550,23 @@ static void br_multicast_start_querier(struct net_bridge *br,
144649         rcu_read_unlock();
144652 -int br_multicast_toggle(struct net_bridge *br, unsigned long val)
144653 +int br_multicast_toggle(struct net_bridge *br, unsigned long val,
144654 +                       struct netlink_ext_ack *extack)
144656         struct net_bridge_port *port;
144657         bool change_snoopers = false;
144658 +       int err = 0;
144660         spin_lock_bh(&br->multicast_lock);
144661         if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
144662                 goto unlock;
144664 -       br_mc_disabled_update(br->dev, val);
144665 +       err = br_mc_disabled_update(br->dev, val, extack);
144666 +       if (err == -EOPNOTSUPP)
144667 +               err = 0;
144668 +       if (err)
144669 +               goto unlock;
144671         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
144672         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
144673                 change_snoopers = true;
144674 @@ -3607,7 +3604,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
144675                         br_multicast_leave_snoopers(br);
144676         }
144678 -       return 0;
144679 +       return err;
144682  bool br_multicast_enabled(const struct net_device *dev)
144683 diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
144684 index f2b1343f8332..0456593aceec 100644
144685 --- a/net/bridge/br_netlink.c
144686 +++ b/net/bridge/br_netlink.c
144687 @@ -1293,7 +1293,9 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
144688         if (data[IFLA_BR_MCAST_SNOOPING]) {
144689                 u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
144691 -               br_multicast_toggle(br, mcast_snooping);
144692 +               err = br_multicast_toggle(br, mcast_snooping, extack);
144693 +               if (err)
144694 +                       return err;
144695         }
144697         if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
144698 diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
144699 index d7d167e10b70..af3430c2d6ea 100644
144700 --- a/net/bridge/br_private.h
144701 +++ b/net/bridge/br_private.h
144702 @@ -810,7 +810,8 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
144703                         struct sk_buff *skb, bool local_rcv, bool local_orig);
144704  int br_multicast_set_router(struct net_bridge *br, unsigned long val);
144705  int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val);
144706 -int br_multicast_toggle(struct net_bridge *br, unsigned long val);
144707 +int br_multicast_toggle(struct net_bridge *br, unsigned long val,
144708 +                       struct netlink_ext_ack *extack);
144709  int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
144710  int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
144711  int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val);
144712 diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
144713 index 072e29840082..381467b691d5 100644
144714 --- a/net/bridge/br_sysfs_br.c
144715 +++ b/net/bridge/br_sysfs_br.c
144716 @@ -409,17 +409,11 @@ static ssize_t multicast_snooping_show(struct device *d,
144717         return sprintf(buf, "%d\n", br_opt_get(br, BROPT_MULTICAST_ENABLED));
144720 -static int toggle_multicast(struct net_bridge *br, unsigned long val,
144721 -                           struct netlink_ext_ack *extack)
144723 -       return br_multicast_toggle(br, val);
144726  static ssize_t multicast_snooping_store(struct device *d,
144727                                         struct device_attribute *attr,
144728                                         const char *buf, size_t len)
144730 -       return store_bridge_parm(d, buf, len, toggle_multicast);
144731 +       return store_bridge_parm(d, buf, len, br_multicast_toggle);
144733  static DEVICE_ATTR_RW(multicast_snooping);
144735 diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
144736 index ca44c327bace..79641c4afee9 100644
144737 --- a/net/ceph/auth_x.c
144738 +++ b/net/ceph/auth_x.c
144739 @@ -526,7 +526,7 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
144740                 if (ret < 0)
144741                         return ret;
144743 -               auth->struct_v = 2;  /* nautilus+ */
144744 +               auth->struct_v = 3;  /* nautilus+ */
144745                 auth->key = 0;
144746                 for (u = (u64 *)enc_buf; u + 1 <= (u64 *)(enc_buf + ret); u++)
144747                         auth->key ^= *(__le64 *)u;
144748 diff --git a/net/ceph/decode.c b/net/ceph/decode.c
144749 index b44f7651be04..bc109a1a4616 100644
144750 --- a/net/ceph/decode.c
144751 +++ b/net/ceph/decode.c
144752 @@ -4,6 +4,7 @@
144753  #include <linux/inet.h>
144755  #include <linux/ceph/decode.h>
144756 +#include <linux/ceph/messenger.h>  /* for ceph_pr_addr() */
144758  static int
144759  ceph_decode_entity_addr_versioned(void **p, void *end,
144760 @@ -110,6 +111,7 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
144761         }
144763         ceph_decode_32_safe(p, end, addr_cnt, e_inval);
144764 +       dout("%s addr_cnt %d\n", __func__, addr_cnt);
144766         found = false;
144767         for (i = 0; i < addr_cnt; i++) {
144768 @@ -117,6 +119,7 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
144769                 if (ret)
144770                         return ret;
144772 +               dout("%s i %d addr %s\n", __func__, i, ceph_pr_addr(&tmp_addr));
144773                 if (tmp_addr.type == my_type) {
144774                         if (found) {
144775                                 pr_err("another match of type %d in addrvec\n",
144776 @@ -128,13 +131,18 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
144777                         found = true;
144778                 }
144779         }
144780 -       if (!found && addr_cnt != 0) {
144781 -               pr_err("no match of type %d in addrvec\n",
144782 -                      le32_to_cpu(my_type));
144783 -               return -ENOENT;
144784 -       }
144786 -       return 0;
144787 +       if (found)
144788 +               return 0;
144790 +       if (!addr_cnt)
144791 +               return 0;  /* normal -- e.g. unused OSD id/slot */
144793 +       if (addr_cnt == 1 && !memchr_inv(&tmp_addr, 0, sizeof(tmp_addr)))
144794 +               return 0;  /* weird but effectively the same as !addr_cnt */
144796 +       pr_err("no match of type %d in addrvec\n", le32_to_cpu(my_type));
144797 +       return -ENOENT;
144799  e_inval:
144800         return -EINVAL;
144801 diff --git a/net/core/dev.c b/net/core/dev.c
144802 index 1f79b9aa9a3f..70829c568645 100644
144803 --- a/net/core/dev.c
144804 +++ b/net/core/dev.c
144805 @@ -4672,10 +4672,10 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
144806         void *orig_data, *orig_data_end, *hard_start;
144807         struct netdev_rx_queue *rxqueue;
144808         u32 metalen, act = XDP_DROP;
144809 +       bool orig_bcast, orig_host;
144810         u32 mac_len, frame_sz;
144811         __be16 orig_eth_type;
144812         struct ethhdr *eth;
144813 -       bool orig_bcast;
144814         int off;
144816         /* Reinjected packets coming from act_mirred or similar should
144817 @@ -4722,6 +4722,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
144818         orig_data_end = xdp->data_end;
144819         orig_data = xdp->data;
144820         eth = (struct ethhdr *)xdp->data;
144821 +       orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
144822         orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
144823         orig_eth_type = eth->h_proto;
144825 @@ -4749,8 +4750,11 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
144826         /* check if XDP changed eth hdr such SKB needs update */
144827         eth = (struct ethhdr *)xdp->data;
144828         if ((orig_eth_type != eth->h_proto) ||
144829 +           (orig_host != ether_addr_equal_64bits(eth->h_dest,
144830 +                                                 skb->dev->dev_addr)) ||
144831             (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
144832                 __skb_push(skb, ETH_HLEN);
144833 +               skb->pkt_type = PACKET_HOST;
144834                 skb->protocol = eth_type_trans(skb, skb->dev);
144835         }
144837 @@ -5914,7 +5918,7 @@ static struct list_head *gro_list_prepare(struct napi_struct *napi,
144838         return head;
144841 -static void skb_gro_reset_offset(struct sk_buff *skb)
144842 +static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
144844         const struct skb_shared_info *pinfo = skb_shinfo(skb);
144845         const skb_frag_t *frag0 = &pinfo->frags[0];
144846 @@ -5925,7 +5929,7 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
144848         if (!skb_headlen(skb) && pinfo->nr_frags &&
144849             !PageHighMem(skb_frag_page(frag0)) &&
144850 -           (!NET_IP_ALIGN || !(skb_frag_off(frag0) & 3))) {
144851 +           (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
144852                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
144853                 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
144854                                                     skb_frag_size(frag0),
144855 @@ -6143,7 +6147,7 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
144856         skb_mark_napi_id(skb, napi);
144857         trace_napi_gro_receive_entry(skb);
144859 -       skb_gro_reset_offset(skb);
144860 +       skb_gro_reset_offset(skb, 0);
144862         ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
144863         trace_napi_gro_receive_exit(ret);
144864 @@ -6232,7 +6236,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
144865         napi->skb = NULL;
144867         skb_reset_mac_header(skb);
144868 -       skb_gro_reset_offset(skb);
144869 +       skb_gro_reset_offset(skb, hlen);
144871         if (unlikely(skb_gro_header_hard(skb, hlen))) {
144872                 eth = skb_gro_header_slow(skb, hlen, 0);
144873 diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
144874 index a96a4f5de0ce..3f36b04d86a0 100644
144875 --- a/net/core/flow_dissector.c
144876 +++ b/net/core/flow_dissector.c
144877 @@ -828,8 +828,10 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
144878                 key_addrs = skb_flow_dissector_target(flow_dissector,
144879                                                       FLOW_DISSECTOR_KEY_IPV6_ADDRS,
144880                                                       target_container);
144881 -               memcpy(&key_addrs->v6addrs, &flow_keys->ipv6_src,
144882 -                      sizeof(key_addrs->v6addrs));
144883 +               memcpy(&key_addrs->v6addrs.src, &flow_keys->ipv6_src,
144884 +                      sizeof(key_addrs->v6addrs.src));
144885 +               memcpy(&key_addrs->v6addrs.dst, &flow_keys->ipv6_dst,
144886 +                      sizeof(key_addrs->v6addrs.dst));
144887                 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
144888         }
144890 diff --git a/net/core/page_pool.c b/net/core/page_pool.c
144891 index ad8b0707af04..f014fd8c19a6 100644
144892 --- a/net/core/page_pool.c
144893 +++ b/net/core/page_pool.c
144894 @@ -174,8 +174,10 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool,
144895                                           struct page *page,
144896                                           unsigned int dma_sync_size)
144898 +       dma_addr_t dma_addr = page_pool_get_dma_addr(page);
144900         dma_sync_size = min(dma_sync_size, pool->p.max_len);
144901 -       dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
144902 +       dma_sync_single_range_for_device(pool->p.dev, dma_addr,
144903                                          pool->p.offset, dma_sync_size,
144904                                          pool->p.dma_dir);
144906 @@ -226,7 +228,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
144907                 put_page(page);
144908                 return NULL;
144909         }
144910 -       page->dma_addr = dma;
144911 +       page_pool_set_dma_addr(page, dma);
144913         if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
144914                 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
144915 @@ -294,13 +296,13 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
144916                  */
144917                 goto skip_dma_unmap;
144919 -       dma = page->dma_addr;
144920 +       dma = page_pool_get_dma_addr(page);
144922 -       /* When page is unmapped, it cannot be returned our pool */
144923 +       /* When page is unmapped, it cannot be returned to our pool */
144924         dma_unmap_page_attrs(pool->p.dev, dma,
144925                              PAGE_SIZE << pool->p.order, pool->p.dma_dir,
144926                              DMA_ATTR_SKIP_CPU_SYNC);
144927 -       page->dma_addr = 0;
144928 +       page_pool_set_dma_addr(page, 0);
144929  skip_dma_unmap:
144930         /* This may be the last page returned, releasing the pool, so
144931          * it is not safe to reference pool afterwards.
144932 diff --git a/net/core/pktgen.c b/net/core/pktgen.c
144933 index 3fba429f1f57..9a3a9a6eb837 100644
144934 --- a/net/core/pktgen.c
144935 +++ b/net/core/pktgen.c
144936 @@ -1894,7 +1894,7 @@ static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname)
144937                 mutex_unlock(&pktgen_thread_lock);
144938                 pr_debug("%s: waiting for %s to disappear....\n",
144939                          __func__, ifname);
144940 -               schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
144941 +               schedule_msec_hrtimeout_interruptible((msec_per_try));
144942                 mutex_lock(&pktgen_thread_lock);
144944                 if (++i >= max_tries) {
144945 diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
144946 index 771688e1b0da..2603966da904 100644
144947 --- a/net/ethtool/ioctl.c
144948 +++ b/net/ethtool/ioctl.c
144949 @@ -489,7 +489,7 @@ store_link_ksettings_for_user(void __user *to,
144951         struct ethtool_link_usettings link_usettings;
144953 -       memcpy(&link_usettings.base, &from->base, sizeof(link_usettings));
144954 +       memcpy(&link_usettings, from, sizeof(link_usettings));
144955         bitmap_to_arr32(link_usettings.link_modes.supported,
144956                         from->link_modes.supported,
144957                         __ETHTOOL_LINK_MODE_MASK_NBITS);
144958 diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
144959 index 50d3c8896f91..25a55086d2b6 100644
144960 --- a/net/ethtool/netlink.c
144961 +++ b/net/ethtool/netlink.c
144962 @@ -384,7 +384,8 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
144963         int ret;
144965         ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
144966 -                          &ethtool_genl_family, 0, ctx->ops->reply_cmd);
144967 +                          &ethtool_genl_family, NLM_F_MULTI,
144968 +                          ctx->ops->reply_cmd);
144969         if (!ehdr)
144970                 return -EMSGSIZE;
144972 diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
144973 index 87983e70f03f..a833a7a67ce7 100644
144974 --- a/net/ipv4/Kconfig
144975 +++ b/net/ipv4/Kconfig
144976 @@ -669,6 +669,24 @@ config TCP_CONG_BBR
144977           AQM schemes that do not provide a delay signal. It requires the fq
144978           ("Fair Queue") pacing packet scheduler.
144980 +config TCP_CONG_BBR2
144981 +       tristate "BBR2 TCP"
144982 +       default n
144983 +       help
144985 +       BBR2 TCP congestion control is a model-based congestion control
144986 +       algorithm that aims to maximize network utilization, keep queues and
144987 +       retransmit rates low, and to be able to coexist with Reno/CUBIC in
144988 +       common scenarios. It builds an explicit model of the network path.  It
144989 +       tolerates a targeted degree of random packet loss and delay that are
144990 +       unrelated to congestion. It can operate over LAN, WAN, cellular, wifi,
144991 +       or cable modem links, and can use DCTCP-L4S-style ECN signals.  It can
144992 +       coexist with flows that use loss-based congestion control, and can
144993 +       operate with shallow buffers, deep buffers, bufferbloat, policers, or
144994 +       AQM schemes that do not provide a delay signal. It requires pacing,
144995 +       using either TCP internal pacing or the fq ("Fair Queue") pacing packet
144996 +       scheduler.
144998  choice
144999         prompt "Default TCP congestion control"
145000         default DEFAULT_CUBIC
145001 @@ -706,6 +724,9 @@ choice
145002         config DEFAULT_BBR
145003                 bool "BBR" if TCP_CONG_BBR=y
145005 +       config DEFAULT_BBR2
145006 +               bool "BBR2" if TCP_CONG_BBR2=y
145008         config DEFAULT_RENO
145009                 bool "Reno"
145010  endchoice
145011 @@ -730,6 +751,7 @@ config DEFAULT_TCP_CONG
145012         default "dctcp" if DEFAULT_DCTCP
145013         default "cdg" if DEFAULT_CDG
145014         default "bbr" if DEFAULT_BBR
145015 +       default "bbr2" if DEFAULT_BBR2
145016         default "cubic"
145018  config TCP_MD5SIG
145019 diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
145020 index 5b77a46885b9..8c5779dba462 100644
145021 --- a/net/ipv4/Makefile
145022 +++ b/net/ipv4/Makefile
145023 @@ -46,6 +46,7 @@ obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
145024  obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
145025  obj-$(CONFIG_INET_RAW_DIAG) += raw_diag.o
145026  obj-$(CONFIG_TCP_CONG_BBR) += tcp_bbr.o
145027 +obj-$(CONFIG_TCP_CONG_BBR2) += tcp_bbr2.o
145028  obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
145029  obj-$(CONFIG_TCP_CONG_CDG) += tcp_cdg.o
145030  obj-$(CONFIG_TCP_CONG_CUBIC) += tcp_cubic.o
145031 diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
145032 index d520e61649c8..22129c1c56a2 100644
145033 --- a/net/ipv4/bpf_tcp_ca.c
145034 +++ b/net/ipv4/bpf_tcp_ca.c
145035 @@ -16,7 +16,7 @@ static u32 optional_ops[] = {
145036         offsetof(struct tcp_congestion_ops, cwnd_event),
145037         offsetof(struct tcp_congestion_ops, in_ack_event),
145038         offsetof(struct tcp_congestion_ops, pkts_acked),
145039 -       offsetof(struct tcp_congestion_ops, min_tso_segs),
145040 +       offsetof(struct tcp_congestion_ops, tso_segs),
145041         offsetof(struct tcp_congestion_ops, sndbuf_expand),
145042         offsetof(struct tcp_congestion_ops, cong_control),
145044 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
145045 index bba150fdd265..d635b4f32d34 100644
145046 --- a/net/ipv4/route.c
145047 +++ b/net/ipv4/route.c
145048 @@ -66,6 +66,7 @@
145049  #include <linux/types.h>
145050  #include <linux/kernel.h>
145051  #include <linux/mm.h>
145052 +#include <linux/memblock.h>
145053  #include <linux/string.h>
145054  #include <linux/socket.h>
145055  #include <linux/sockios.h>
145056 @@ -478,8 +479,10 @@ static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
145057         __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
145060 -#define IP_IDENTS_SZ 2048u
145062 +/* Hash tables of size 2048..262144 depending on RAM size.
145063 + * Each bucket uses 8 bytes.
145064 + */
145065 +static u32 ip_idents_mask __read_mostly;
145066  static atomic_t *ip_idents __read_mostly;
145067  static u32 *ip_tstamps __read_mostly;
145069 @@ -489,12 +492,16 @@ static u32 *ip_tstamps __read_mostly;
145070   */
145071  u32 ip_idents_reserve(u32 hash, int segs)
145073 -       u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
145074 -       atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
145075 -       u32 old = READ_ONCE(*p_tstamp);
145076 -       u32 now = (u32)jiffies;
145077 +       u32 bucket, old, now = (u32)jiffies;
145078 +       atomic_t *p_id;
145079 +       u32 *p_tstamp;
145080         u32 delta = 0;
145082 +       bucket = hash & ip_idents_mask;
145083 +       p_tstamp = ip_tstamps + bucket;
145084 +       p_id = ip_idents + bucket;
145085 +       old = READ_ONCE(*p_tstamp);
145087         if (old != now && cmpxchg(p_tstamp, old, now) == old)
145088                 delta = prandom_u32_max(now - old);
145090 @@ -3553,18 +3560,25 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
145092  int __init ip_rt_init(void)
145094 +       void *idents_hash;
145095         int cpu;
145097 -       ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
145098 -                                 GFP_KERNEL);
145099 -       if (!ip_idents)
145100 -               panic("IP: failed to allocate ip_idents\n");
145101 +       /* For modern hosts, this will use 2 MB of memory */
145102 +       idents_hash = alloc_large_system_hash("IP idents",
145103 +                                             sizeof(*ip_idents) + sizeof(*ip_tstamps),
145104 +                                             0,
145105 +                                             16, /* one bucket per 64 KB */
145106 +                                             HASH_ZERO,
145107 +                                             NULL,
145108 +                                             &ip_idents_mask,
145109 +                                             2048,
145110 +                                             256*1024);
145112 +       ip_idents = idents_hash;
145114 -       prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
145115 +       prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
145117 -       ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
145118 -       if (!ip_tstamps)
145119 -               panic("IP: failed to allocate ip_tstamps\n");
145120 +       ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
145122         for_each_possible_cpu(cpu) {
145123                 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
145124 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
145125 index de7cc8445ac0..521f310f2ac1 100644
145126 --- a/net/ipv4/tcp.c
145127 +++ b/net/ipv4/tcp.c
145128 @@ -3033,6 +3033,7 @@ int tcp_disconnect(struct sock *sk, int flags)
145129         tp->rx_opt.dsack = 0;
145130         tp->rx_opt.num_sacks = 0;
145131         tp->rcv_ooopack = 0;
145132 +       tp->fast_ack_mode = 0;
145135         /* Clean up fastopen related fields */
145136 diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
145137 index 6ea3dc2e4219..8ef512fefe25 100644
145138 --- a/net/ipv4/tcp_bbr.c
145139 +++ b/net/ipv4/tcp_bbr.c
145140 @@ -292,26 +292,40 @@ static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
145141                 sk->sk_pacing_rate = rate;
145144 -/* override sysctl_tcp_min_tso_segs */
145145  static u32 bbr_min_tso_segs(struct sock *sk)
145147         return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
145150 +/* Return the number of segments BBR would like in a TSO/GSO skb, given
145151 + * a particular max gso size as a constraint.
145152 + */
145153 +static u32 bbr_tso_segs_generic(struct sock *sk, unsigned int mss_now,
145154 +                               u32 gso_max_size)
145156 +       u32 segs;
145157 +       u64 bytes;
145159 +       /* Budget a TSO/GSO burst size allowance based on bw (pacing_rate). */
145160 +       bytes = sk->sk_pacing_rate >> sk->sk_pacing_shift;
145162 +       bytes = min_t(u32, bytes, gso_max_size - 1 - MAX_TCP_HEADER);
145163 +       segs = max_t(u32, bytes / mss_now, bbr_min_tso_segs(sk));
145164 +       return segs;
145167 +/* Custom tcp_tso_autosize() for BBR, used at transmit time to cap skb size. */
145168 +static u32  bbr_tso_segs(struct sock *sk, unsigned int mss_now)
145170 +       return bbr_tso_segs_generic(sk, mss_now, sk->sk_gso_max_size);
145173 +/* Like bbr_tso_segs(), using mss_cache, ignoring driver's sk_gso_max_size. */
145174  static u32 bbr_tso_segs_goal(struct sock *sk)
145176         struct tcp_sock *tp = tcp_sk(sk);
145177 -       u32 segs, bytes;
145179 -       /* Sort of tcp_tso_autosize() but ignoring
145180 -        * driver provided sk_gso_max_size.
145181 -        */
145182 -       bytes = min_t(unsigned long,
145183 -                     sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
145184 -                     GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
145185 -       segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
145187 -       return min(segs, 0x7FU);
145188 +       return  bbr_tso_segs_generic(sk, tp->mss_cache, GSO_MAX_SIZE);
145191  /* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
145192 @@ -1147,7 +1161,7 @@ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
145193         .undo_cwnd      = bbr_undo_cwnd,
145194         .cwnd_event     = bbr_cwnd_event,
145195         .ssthresh       = bbr_ssthresh,
145196 -       .min_tso_segs   = bbr_min_tso_segs,
145197 +       .tso_segs       = bbr_tso_segs,
145198         .get_info       = bbr_get_info,
145199         .set_state      = bbr_set_state,
145201 diff --git a/net/ipv4/tcp_bbr2.c b/net/ipv4/tcp_bbr2.c
145202 new file mode 100644
145203 index 000000000000..5510adc92bbb
145204 --- /dev/null
145205 +++ b/net/ipv4/tcp_bbr2.c
145206 @@ -0,0 +1,2671 @@
145207 +/* BBR (Bottleneck Bandwidth and RTT) congestion control, v2
145209 + * BBRv2 is a model-based congestion control algorithm that aims for low
145210 + * queues, low loss, and (bounded) Reno/CUBIC coexistence. To maintain a model
145211 + * of the network path, it uses measurements of bandwidth and RTT, as well as
145212 + * (if they occur) packet loss and/or DCTCP/L4S-style ECN signals.  Note that
145213 + * although it can use ECN or loss signals explicitly, it does not require
145214 + * either; it can bound its in-flight data based on its estimate of the BDP.
145216 + * The model has both higher and lower bounds for the operating range:
145217 + *   lo: bw_lo, inflight_lo: conservative short-term lower bound
145218 + *   hi: bw_hi, inflight_hi: robust long-term upper bound
145219 + * The bandwidth-probing time scale is (a) extended dynamically based on
145220 + * estimated BDP to improve coexistence with Reno/CUBIC; (b) bounded by
145221 + * an interactive wall-clock time-scale to be more scalable and responsive
145222 + * than Reno and CUBIC.
145224 + * Here is a state transition diagram for BBR:
145226 + *             |
145227 + *             V
145228 + *    +---> STARTUP  ----+
145229 + *    |        |         |
145230 + *    |        V         |
145231 + *    |      DRAIN   ----+
145232 + *    |        |         |
145233 + *    |        V         |
145234 + *    +---> PROBE_BW ----+
145235 + *    |      ^    |      |
145236 + *    |      |    |      |
145237 + *    |      +----+      |
145238 + *    |                  |
145239 + *    +---- PROBE_RTT <--+
145241 + * A BBR flow starts in STARTUP, and ramps up its sending rate quickly.
145242 + * When it estimates the pipe is full, it enters DRAIN to drain the queue.
145243 + * In steady state a BBR flow only uses PROBE_BW and PROBE_RTT.
145244 + * A long-lived BBR flow spends the vast majority of its time remaining
145245 + * (repeatedly) in PROBE_BW, fully probing and utilizing the pipe's bandwidth
145246 + * in a fair manner, with a small, bounded queue. *If* a flow has been
145247 + * continuously sending for the entire min_rtt window, and hasn't seen an RTT
145248 + * sample that matches or decreases its min_rtt estimate for 10 seconds, then
145249 + * it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe
145250 + * the path's two-way propagation delay (min_rtt). When exiting PROBE_RTT, if
145251 + * we estimated that we reached the full bw of the pipe then we enter PROBE_BW;
145252 + * otherwise we enter STARTUP to try to fill the pipe.
145254 + * BBR is described in detail in:
145255 + *   "BBR: Congestion-Based Congestion Control",
145256 + *   Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh,
145257 + *   Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016.
145259 + * There is a public e-mail list for discussing BBR development and testing:
145260 + *   https://groups.google.com/forum/#!forum/bbr-dev
145262 + * NOTE: BBR might be used with the fq qdisc ("man tc-fq") with pacing enabled,
145263 + * otherwise TCP stack falls back to an internal pacing using one high
145264 + * resolution timer per TCP socket and may use more resources.
145265 + */
145266 +#include <linux/module.h>
145267 +#include <net/tcp.h>
145268 +#include <linux/inet_diag.h>
145269 +#include <linux/inet.h>
145270 +#include <linux/random.h>
145272 +#include "tcp_dctcp.h"
145274 +/* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth
145275 + * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps.
145276 + * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32.
145277 + * Since the minimum window is >=4 packets, the lower bound isn't
145278 + * an issue. The upper bound isn't an issue with existing technologies.
145279 + */
145280 +#define BW_SCALE 24
145281 +#define BW_UNIT (1 << BW_SCALE)
145283 +#define BBR_SCALE 8    /* scaling factor for fractions in BBR (e.g. gains) */
145284 +#define BBR_UNIT (1 << BBR_SCALE)
145286 +#define FLAG_DEBUG_VERBOSE     0x1     /* Verbose debugging messages */
145287 +#define FLAG_DEBUG_LOOPBACK    0x2     /* Do NOT skip loopback addr */
145289 +#define CYCLE_LEN              8       /* number of phases in a pacing gain cycle */
145291 +/* BBR has the following modes for deciding how fast to send: */
145292 +enum bbr_mode {
145293 +       BBR_STARTUP,    /* ramp up sending rate rapidly to fill pipe */
145294 +       BBR_DRAIN,      /* drain any queue created during startup */
145295 +       BBR_PROBE_BW,   /* discover, share bw: pace around estimated bw */
145296 +       BBR_PROBE_RTT,  /* cut inflight to min to probe min_rtt */
145299 +/* How does the incoming ACK stream relate to our bandwidth probing? */
145300 +enum bbr_ack_phase {
145301 +       BBR_ACKS_INIT,            /* not probing; not getting probe feedback */
145302 +       BBR_ACKS_REFILLING,       /* sending at est. bw to fill pipe */
145303 +       BBR_ACKS_PROBE_STARTING,  /* inflight rising to probe bw */
145304 +       BBR_ACKS_PROBE_FEEDBACK,  /* getting feedback from bw probing */
145305 +       BBR_ACKS_PROBE_STOPPING,  /* stopped probing; still getting feedback */
145308 +/* BBR congestion control block */
145309 +struct bbr {
145310 +       u32     min_rtt_us;             /* min RTT in min_rtt_win_sec window */
145311 +       u32     min_rtt_stamp;          /* timestamp of min_rtt_us */
145312 +       u32     probe_rtt_done_stamp;   /* end time for BBR_PROBE_RTT mode */
145313 +       u32     probe_rtt_min_us;       /* min RTT in bbr_probe_rtt_win_ms window */
145314 +       u32     probe_rtt_min_stamp;    /* timestamp of probe_rtt_min_us*/
145315 +       u32     next_rtt_delivered; /* scb->tx.delivered at end of round */
145316 +       u32     prior_rcv_nxt;  /* tp->rcv_nxt when CE state last changed */
145317 +       u64     cycle_mstamp;        /* time of this cycle phase start */
145318 +       u32     mode:3,              /* current bbr_mode in state machine */
145319 +               prev_ca_state:3,     /* CA state on previous ACK */
145320 +               packet_conservation:1,  /* use packet conservation? */
145321 +               round_start:1,       /* start of packet-timed tx->ack round? */
145322 +               ce_state:1,          /* If most recent data has CE bit set */
145323 +               bw_probe_up_rounds:5,   /* cwnd-limited rounds in PROBE_UP */
145324 +               try_fast_path:1,        /* can we take fast path? */
145325 +               unused2:11,
145326 +               idle_restart:1,      /* restarting after idle? */
145327 +               probe_rtt_round_done:1,  /* a BBR_PROBE_RTT round at 4 pkts? */
145328 +               cycle_idx:3,    /* current index in pacing_gain cycle array */
145329 +               has_seen_rtt:1;      /* have we seen an RTT sample yet? */
145330 +       u32     pacing_gain:11, /* current gain for setting pacing rate */
145331 +               cwnd_gain:11,   /* current gain for setting cwnd */
145332 +               full_bw_reached:1,   /* reached full bw in Startup? */
145333 +               full_bw_cnt:2,  /* number of rounds without large bw gains */
145334 +               init_cwnd:7;    /* initial cwnd */
145335 +       u32     prior_cwnd;     /* prior cwnd upon entering loss recovery */
145336 +       u32     full_bw;        /* recent bw, to estimate if pipe is full */
145338 +       /* For tracking ACK aggregation: */
145339 +       u64     ack_epoch_mstamp;       /* start of ACK sampling epoch */
145340 +       u16     extra_acked[2];         /* max excess data ACKed in epoch */
145341 +       u32     ack_epoch_acked:20,     /* packets (S)ACKed in sampling epoch */
145342 +               extra_acked_win_rtts:5, /* age of extra_acked, in round trips */
145343 +               extra_acked_win_idx:1,  /* current index in extra_acked array */
145344 +       /* BBR v2 state: */
145345 +               unused1:2,
145346 +               startup_ecn_rounds:2,   /* consecutive hi ECN STARTUP rounds */
145347 +               loss_in_cycle:1,        /* packet loss in this cycle? */
145348 +               ecn_in_cycle:1;         /* ECN in this cycle? */
145349 +       u32     loss_round_delivered; /* scb->tx.delivered ending loss round */
145350 +       u32     undo_bw_lo;          /* bw_lo before latest losses */
145351 +       u32     undo_inflight_lo;    /* inflight_lo before latest losses */
145352 +       u32     undo_inflight_hi;    /* inflight_hi before latest losses */
145353 +       u32     bw_latest;       /* max delivered bw in last round trip */
145354 +       u32     bw_lo;           /* lower bound on sending bandwidth */
145355 +       u32     bw_hi[2];        /* upper bound of sending bandwidth range*/
145356 +       u32     inflight_latest; /* max delivered data in last round trip */
145357 +       u32     inflight_lo;     /* lower bound of inflight data range */
145358 +       u32     inflight_hi;     /* upper bound of inflight data range */
145359 +       u32     bw_probe_up_cnt; /* packets delivered per inflight_hi incr */
145360 +       u32     bw_probe_up_acks;  /* packets (S)ACKed since inflight_hi incr */
145361 +       u32     probe_wait_us;   /* PROBE_DOWN until next clock-driven probe */
145362 +       u32     ecn_eligible:1, /* sender can use ECN (RTT, handshake)? */
145363 +               ecn_alpha:9,    /* EWMA delivered_ce/delivered; 0..256 */
145364 +               bw_probe_samples:1,    /* rate samples reflect bw probing? */
145365 +               prev_probe_too_high:1, /* did last PROBE_UP go too high? */
145366 +               stopped_risky_probe:1, /* last PROBE_UP stopped due to risk? */
145367 +               rounds_since_probe:8,  /* packet-timed rounds since probed bw */
145368 +               loss_round_start:1,    /* loss_round_delivered round trip? */
145369 +               loss_in_round:1,       /* loss marked in this round trip? */
145370 +               ecn_in_round:1,        /* ECN marked in this round trip? */
145371 +               ack_phase:3,           /* bbr_ack_phase: meaning of ACKs */
145372 +               loss_events_in_round:4,/* losses in STARTUP round */
145373 +               initialized:1;         /* has bbr_init() been called? */
145374 +       u32     alpha_last_delivered;    /* tp->delivered    at alpha update */
145375 +       u32     alpha_last_delivered_ce; /* tp->delivered_ce at alpha update */
145377 +       /* Params configurable using setsockopt. Refer to correspoding
145378 +        * module param for detailed description of params.
145379 +        */
145380 +       struct bbr_params {
145381 +               u32     high_gain:11,           /* max allowed value: 2047 */
145382 +                       drain_gain:10,          /* max allowed value: 1023 */
145383 +                       cwnd_gain:11;           /* max allowed value: 2047 */
145384 +               u32     cwnd_min_target:4,      /* max allowed value: 15 */
145385 +                       min_rtt_win_sec:5,      /* max allowed value: 31 */
145386 +                       probe_rtt_mode_ms:9,    /* max allowed value: 511 */
145387 +                       full_bw_cnt:3,          /* max allowed value: 7 */
145388 +                       cwnd_tso_budget:1,      /* allowed values: {0, 1} */
145389 +                       unused3:6,
145390 +                       drain_to_target:1,      /* boolean */
145391 +                       precise_ece_ack:1,      /* boolean */
145392 +                       extra_acked_in_startup:1, /* allowed values: {0, 1} */
145393 +                       fast_path:1;            /* boolean */
145394 +               u32     full_bw_thresh:10,      /* max allowed value: 1023 */
145395 +                       startup_cwnd_gain:11,   /* max allowed value: 2047 */
145396 +                       bw_probe_pif_gain:9,    /* max allowed value: 511 */
145397 +                       usage_based_cwnd:1,     /* boolean */
145398 +                       unused2:1;
145399 +               u16     probe_rtt_win_ms:14,    /* max allowed value: 16383 */
145400 +                       refill_add_inc:2;       /* max allowed value: 3 */
145401 +               u16     extra_acked_gain:11,    /* max allowed value: 2047 */
145402 +                       extra_acked_win_rtts:5; /* max allowed value: 31*/
145403 +               u16     pacing_gain[CYCLE_LEN]; /* max allowed value: 1023 */
145404 +               /* Mostly BBR v2 parameters below here: */
145405 +               u32     ecn_alpha_gain:8,       /* max allowed value: 255 */
145406 +                       ecn_factor:8,           /* max allowed value: 255 */
145407 +                       ecn_thresh:8,           /* max allowed value: 255 */
145408 +                       beta:8;                 /* max allowed value: 255 */
145409 +               u32     ecn_max_rtt_us:19,      /* max allowed value: 524287 */
145410 +                       bw_probe_reno_gain:9,   /* max allowed value: 511 */
145411 +                       full_loss_cnt:4;        /* max allowed value: 15 */
145412 +               u32     probe_rtt_cwnd_gain:8,  /* max allowed value: 255 */
145413 +                       inflight_headroom:8,    /* max allowed value: 255 */
145414 +                       loss_thresh:8,          /* max allowed value: 255 */
145415 +                       bw_probe_max_rounds:8;  /* max allowed value: 255 */
145416 +               u32     bw_probe_rand_rounds:4, /* max allowed value: 15 */
145417 +                       bw_probe_base_us:26,    /* usecs: 0..2^26-1 (67 secs) */
145418 +                       full_ecn_cnt:2;         /* max allowed value: 3 */
145419 +               u32     bw_probe_rand_us:26,    /* usecs: 0..2^26-1 (67 secs) */
145420 +                       undo:1,                 /* boolean */
145421 +                       tso_rtt_shift:4,        /* max allowed value: 15 */
145422 +                       unused5:1;
145423 +               u32     ecn_reprobe_gain:9,     /* max allowed value: 511 */
145424 +                       unused1:14,
145425 +                       ecn_alpha_init:9;       /* max allowed value: 256 */
145426 +       } params;
145428 +       struct {
145429 +               u32     snd_isn; /* Initial sequence number */
145430 +               u32     rs_bw;   /* last valid rate sample bw */
145431 +               u32     target_cwnd; /* target cwnd, based on BDP */
145432 +               u8      undo:1,  /* Undo even happened but not yet logged */
145433 +                       unused:7;
145434 +               char    event;   /* single-letter event debug codes */
145435 +               u16     unused2;
145436 +       } debug;
145439 +struct bbr_context {
145440 +       u32 sample_bw;
145441 +       u32 target_cwnd;
145442 +       u32 log:1;
145445 +/* Window length of min_rtt filter (in sec). Max allowed value is 31 (0x1F) */
145446 +static u32 bbr_min_rtt_win_sec = 10;
145447 +/* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode.
145448 + * Max allowed value is 511 (0x1FF).
145449 + */
145450 +static u32 bbr_probe_rtt_mode_ms = 200;
145451 +/* Window length of probe_rtt_min_us filter (in ms), and consequently the
145452 + * typical interval between PROBE_RTT mode entries.
145453 + * Note that bbr_probe_rtt_win_ms must be <= bbr_min_rtt_win_sec * MSEC_PER_SEC
145454 + */
145455 +static u32 bbr_probe_rtt_win_ms = 5000;
145456 +/* Skip TSO below the following bandwidth (bits/sec): */
145457 +static int bbr_min_tso_rate = 1200000;
145459 +/* Use min_rtt to help adapt TSO burst size, with smaller min_rtt resulting
145460 + * in bigger TSO bursts. By default we cut the RTT-based allowance in half
145461 + * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance
145462 + * is below 1500 bytes after 6 * ~500 usec = 3ms.
145463 + */
145464 +static u32 bbr_tso_rtt_shift = 9;  /* halve allowance per 2^9 usecs, 512us */
145466 +/* Select cwnd TSO budget approach:
145467 + *  0: padding
145468 + *  1: flooring
145469 + */
145470 +static uint bbr_cwnd_tso_budget = 1;
145472 +/* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck.
145473 + * In order to help drive the network toward lower queues and low latency while
145474 + * maintaining high utilization, the average pacing rate aims to be slightly
145475 + * lower than the estimated bandwidth. This is an important aspect of the
145476 + * design.
145477 + */
145478 +static const int bbr_pacing_margin_percent = 1;
145480 +/* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
145481 + * that will allow a smoothly increasing pacing rate that will double each RTT
145482 + * and send the same number of packets per RTT that an un-paced, slow-starting
145483 + * Reno or CUBIC flow would. Max allowed value is 2047 (0x7FF).
145484 + */
145485 +static int bbr_high_gain  = BBR_UNIT * 2885 / 1000 + 1;
145486 +/* The gain for deriving startup cwnd. Max allowed value is 2047 (0x7FF). */
145487 +static int bbr_startup_cwnd_gain  = BBR_UNIT * 2885 / 1000 + 1;
145488 +/* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain
145489 + * the queue created in BBR_STARTUP in a single round. Max allowed value
145490 + * is 1023 (0x3FF).
145491 + */
145492 +static int bbr_drain_gain = BBR_UNIT * 1000 / 2885;
145493 +/* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs.
145494 + * Max allowed value is 2047 (0x7FF).
145495 + */
145496 +static int bbr_cwnd_gain  = BBR_UNIT * 2;
145497 +/* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw.
145498 + * Max allowed value for each element is 1023 (0x3FF).
145499 + */
145500 +enum bbr_pacing_gain_phase {
145501 +       BBR_BW_PROBE_UP         = 0,  /* push up inflight to probe for bw/vol */
145502 +       BBR_BW_PROBE_DOWN       = 1,  /* drain excess inflight from the queue */
145503 +       BBR_BW_PROBE_CRUISE     = 2,  /* use pipe, w/ headroom in queue/pipe */
145504 +       BBR_BW_PROBE_REFILL     = 3,  /* v2: refill the pipe again to 100% */
145506 +static int bbr_pacing_gain[] = {
145507 +       BBR_UNIT * 5 / 4,       /* probe for more available bw */
145508 +       BBR_UNIT * 3 / 4,       /* drain queue and/or yield bw to other flows */
145509 +       BBR_UNIT, BBR_UNIT, BBR_UNIT,   /* cruise at 1.0*bw to utilize pipe, */
145510 +       BBR_UNIT, BBR_UNIT, BBR_UNIT    /* without creating excess queue... */
145513 +/* Try to keep at least this many packets in flight, if things go smoothly. For
145514 + * smooth functioning, a sliding window protocol ACKing every other packet
145515 + * needs at least 4 packets in flight. Max allowed value is 15 (0xF).
145516 + */
145517 +static u32 bbr_cwnd_min_target = 4;
145519 +/* Cwnd to BDP proportion in PROBE_RTT mode scaled by BBR_UNIT. Default: 50%.
145520 + * Use 0 to disable. Max allowed value is 255.
145521 + */
145522 +static u32 bbr_probe_rtt_cwnd_gain = BBR_UNIT * 1 / 2;
145524 +/* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */
145525 +/* If bw has increased significantly (1.25x), there may be more bw available.
145526 + * Max allowed value is 1023 (0x3FF).
145527 + */
145528 +static u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4;
145529 +/* But after 3 rounds w/o significant bw growth, estimate pipe is full.
145530 + * Max allowed value is 7 (0x7).
145531 + */
145532 +static u32 bbr_full_bw_cnt = 3;
145534 +static u32 bbr_flags;          /* Debugging related stuff */
145536 +/* Whether to debug using printk.
145537 + */
145538 +static bool bbr_debug_with_printk;
145540 +/* Whether to debug using ftrace event tcp:tcp_bbr_event.
145541 + * Ignored when bbr_debug_with_printk is set.
145542 + */
145543 +static bool bbr_debug_ftrace;
145545 +/* Experiment: each cycle, try to hold sub-unity gain until inflight <= BDP. */
145546 +static bool bbr_drain_to_target = true;                /* default: enabled */
145548 +/* Experiment: Flags to control BBR with ECN behavior.
145549 + */
145550 +static bool bbr_precise_ece_ack = true;                /* default: enabled */
145552 +/* The max rwin scaling shift factor is 14 (RFC 1323), so the max sane rwin is
145553 + * (2^(16+14) B)/(1024 B/packet) = 1M packets.
145554 + */
145555 +static u32 bbr_cwnd_warn_val   = 1U << 20;
145557 +static u16 bbr_debug_port_mask;
145559 +/* BBR module parameters. These are module parameters only in Google prod.
145560 + * Upstream these are intentionally not module parameters.
145561 + */
145562 +static int bbr_pacing_gain_size = CYCLE_LEN;
145564 +/* Gain factor for adding extra_acked to target cwnd: */
145565 +static int bbr_extra_acked_gain = 256;
145567 +/* Window length of extra_acked window. Max allowed val is 31. */
145568 +static u32 bbr_extra_acked_win_rtts = 5;
145570 +/* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */
145571 +static u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20;
145573 +/* Time period for clamping cwnd increment due to ack aggregation */
145574 +static u32 bbr_extra_acked_max_us = 100 * 1000;
145576 +/* Use extra acked in startup ?
145577 + * 0: disabled
145578 + * 1: use latest extra_acked value from 1-2 rtt in startup
145579 + */
145580 +static int bbr_extra_acked_in_startup = 1;             /* default: enabled */
145582 +/* Experiment: don't grow cwnd beyond twice of what we just probed. */
145583 +static bool bbr_usage_based_cwnd;              /* default: disabled */
145585 +/* For lab testing, researchers can enable BBRv2 ECN support with this flag,
145586 + * when they know that any ECN marks that the connections experience will be
145587 + * DCTCP/L4S-style ECN marks, rather than RFC3168 ECN marks.
145588 + * TODO(ncardwell): Production use of the BBRv2 ECN functionality depends on
145589 + * negotiation or configuration that is outside the scope of the BBRv2
145590 + * alpha release.
145591 + */
145592 +static bool bbr_ecn_enable = false;
145594 +module_param_named(min_tso_rate,      bbr_min_tso_rate,      int,    0644);
145595 +module_param_named(tso_rtt_shift,     bbr_tso_rtt_shift,     int,    0644);
145596 +module_param_named(high_gain,         bbr_high_gain,         int,    0644);
145597 +module_param_named(drain_gain,        bbr_drain_gain,        int,    0644);
145598 +module_param_named(startup_cwnd_gain, bbr_startup_cwnd_gain, int,    0644);
145599 +module_param_named(cwnd_gain,         bbr_cwnd_gain,         int,    0644);
145600 +module_param_array_named(pacing_gain, bbr_pacing_gain,       int,
145601 +                        &bbr_pacing_gain_size, 0644);
145602 +module_param_named(cwnd_min_target,   bbr_cwnd_min_target,   uint,   0644);
145603 +module_param_named(probe_rtt_cwnd_gain,
145604 +                  bbr_probe_rtt_cwnd_gain,                  uint,   0664);
145605 +module_param_named(cwnd_warn_val,     bbr_cwnd_warn_val,     uint,   0664);
145606 +module_param_named(debug_port_mask,   bbr_debug_port_mask,   ushort, 0644);
145607 +module_param_named(flags,             bbr_flags,             uint,   0644);
145608 +module_param_named(debug_ftrace,      bbr_debug_ftrace, bool,   0644);
145609 +module_param_named(debug_with_printk, bbr_debug_with_printk, bool,   0644);
145610 +module_param_named(min_rtt_win_sec,   bbr_min_rtt_win_sec,   uint,   0644);
145611 +module_param_named(probe_rtt_mode_ms, bbr_probe_rtt_mode_ms, uint,   0644);
145612 +module_param_named(probe_rtt_win_ms,  bbr_probe_rtt_win_ms,  uint,   0644);
145613 +module_param_named(full_bw_thresh,    bbr_full_bw_thresh,    uint,   0644);
145614 +module_param_named(full_bw_cnt,       bbr_full_bw_cnt,       uint,   0644);
145615 +module_param_named(cwnd_tso_bduget,   bbr_cwnd_tso_budget,   uint,   0664);
145616 +module_param_named(extra_acked_gain,  bbr_extra_acked_gain,  int,    0664);
145617 +module_param_named(extra_acked_win_rtts,
145618 +                  bbr_extra_acked_win_rtts, uint,   0664);
145619 +module_param_named(extra_acked_max_us,
145620 +                  bbr_extra_acked_max_us, uint,   0664);
145621 +module_param_named(ack_epoch_acked_reset_thresh,
145622 +                  bbr_ack_epoch_acked_reset_thresh, uint,   0664);
145623 +module_param_named(drain_to_target,   bbr_drain_to_target,   bool,   0664);
145624 +module_param_named(precise_ece_ack,   bbr_precise_ece_ack,   bool,   0664);
145625 +module_param_named(extra_acked_in_startup,
145626 +                  bbr_extra_acked_in_startup, int, 0664);
145627 +module_param_named(usage_based_cwnd, bbr_usage_based_cwnd, bool,   0664);
145628 +module_param_named(ecn_enable,       bbr_ecn_enable,         bool,   0664);
145630 +static void bbr2_exit_probe_rtt(struct sock *sk);
145631 +static void bbr2_reset_congestion_signals(struct sock *sk);
145633 +static void bbr_check_probe_rtt_done(struct sock *sk);
145635 +/* Do we estimate that STARTUP filled the pipe? */
145636 +static bool bbr_full_bw_reached(const struct sock *sk)
145638 +       const struct bbr *bbr = inet_csk_ca(sk);
145640 +       return bbr->full_bw_reached;
145643 +/* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
145644 +static u32 bbr_max_bw(const struct sock *sk)
145646 +       struct bbr *bbr = inet_csk_ca(sk);
145648 +       return max(bbr->bw_hi[0], bbr->bw_hi[1]);
145651 +/* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */
145652 +static u32 bbr_bw(const struct sock *sk)
145654 +       struct bbr *bbr = inet_csk_ca(sk);
145656 +       return min(bbr_max_bw(sk), bbr->bw_lo);
145659 +/* Return maximum extra acked in past k-2k round trips,
145660 + * where k = bbr_extra_acked_win_rtts.
145661 + */
145662 +static u16 bbr_extra_acked(const struct sock *sk)
145664 +       struct bbr *bbr = inet_csk_ca(sk);
145666 +       return max(bbr->extra_acked[0], bbr->extra_acked[1]);
145669 +/* Return rate in bytes per second, optionally with a gain.
145670 + * The order here is chosen carefully to avoid overflow of u64. This should
145671 + * work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
145672 + */
145673 +static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain,
145674 +                                 int margin)
145676 +       unsigned int mss = tcp_sk(sk)->mss_cache;
145678 +       rate *= mss;
145679 +       rate *= gain;
145680 +       rate >>= BBR_SCALE;
145681 +       rate *= USEC_PER_SEC / 100 * (100 - margin);
145682 +       rate >>= BW_SCALE;
145683 +       rate = max(rate, 1ULL);
145684 +       return rate;
145687 +static u64 bbr_bw_bytes_per_sec(struct sock *sk, u64 rate)
145689 +       return bbr_rate_bytes_per_sec(sk, rate, BBR_UNIT, 0);
145692 +static u64 bbr_rate_kbps(struct sock *sk, u64 rate)
145694 +       rate = bbr_bw_bytes_per_sec(sk, rate);
145695 +       rate *= 8;
145696 +       do_div(rate, 1000);
145697 +       return rate;
145700 +static u32 bbr_tso_segs_goal(struct sock *sk);
145701 +static void bbr_debug(struct sock *sk, u32 acked,
145702 +                     const struct rate_sample *rs, struct bbr_context *ctx)
145704 +       static const char ca_states[] = {
145705 +               [TCP_CA_Open]           = 'O',
145706 +               [TCP_CA_Disorder]       = 'D',
145707 +               [TCP_CA_CWR]            = 'C',
145708 +               [TCP_CA_Recovery]       = 'R',
145709 +               [TCP_CA_Loss]           = 'L',
145710 +       };
145711 +       static const char mode[] = {
145712 +               'G',  /* Growing   - BBR_STARTUP */
145713 +               'D',  /* Drain     - BBR_DRAIN */
145714 +               'W',  /* Window    - BBR_PROBE_BW */
145715 +               'M',  /* Min RTT   - BBR_PROBE_RTT */
145716 +       };
145717 +       static const char ack_phase[] = { /* bbr_ack_phase strings */
145718 +               'I',    /* BBR_ACKS_INIT           - 'Init' */
145719 +               'R',    /* BBR_ACKS_REFILLING      - 'Refilling' */
145720 +               'B',    /* BBR_ACKS_PROBE_STARTING - 'Before' */
145721 +               'F',    /* BBR_ACKS_PROBE_FEEDBACK - 'Feedback' */
145722 +               'A',    /* BBR_ACKS_PROBE_STOPPING - 'After' */
145723 +       };
145724 +       struct tcp_sock *tp = tcp_sk(sk);
145725 +       struct bbr *bbr = inet_csk_ca(sk);
145726 +       const u32 una = tp->snd_una - bbr->debug.snd_isn;
145727 +       const u32 fack = tcp_highest_sack_seq(tp);
145728 +       const u16 dport = ntohs(inet_sk(sk)->inet_dport);
145729 +       bool is_port_match = (bbr_debug_port_mask &&
145730 +                             ((dport & bbr_debug_port_mask) == 0));
145731 +       char debugmsg[320];
145733 +       if (sk->sk_state == TCP_SYN_SENT)
145734 +               return;  /* no bbr_init() yet if SYN retransmit -> CA_Loss */
145736 +       if (!tp->snd_cwnd || tp->snd_cwnd > bbr_cwnd_warn_val) {
145737 +               char addr[INET6_ADDRSTRLEN + 10] = { 0 };
145739 +               if (sk->sk_family == AF_INET)
145740 +                       snprintf(addr, sizeof(addr), "%pI4:%u",
145741 +                                &inet_sk(sk)->inet_daddr, dport);
145742 +               else if (sk->sk_family == AF_INET6)
145743 +                       snprintf(addr, sizeof(addr), "%pI6:%u",
145744 +                                &sk->sk_v6_daddr, dport);
145746 +               WARN_ONCE(1,
145747 +                       "BBR %s cwnd alert: %u "
145748 +                       "snd_una: %u ca: %d pacing_gain: %u cwnd_gain: %u "
145749 +                       "bw: %u rtt: %u min_rtt: %u "
145750 +                       "acked: %u tso_segs: %u "
145751 +                       "bw: %d %ld %d pif: %u\n",
145752 +                       addr, tp->snd_cwnd,
145753 +                       una, inet_csk(sk)->icsk_ca_state,
145754 +                       bbr->pacing_gain, bbr->cwnd_gain,
145755 +                       bbr_max_bw(sk), (tp->srtt_us >> 3), bbr->min_rtt_us,
145756 +                       acked, bbr_tso_segs_goal(sk),
145757 +                       rs->delivered, rs->interval_us, rs->is_retrans,
145758 +                       tcp_packets_in_flight(tp));
145759 +       }
145761 +       if (likely(!bbr_debug_with_printk && !bbr_debug_ftrace))
145762 +               return;
145764 +       if (!sock_flag(sk, SOCK_DBG) && !is_port_match)
145765 +               return;
145767 +       if (!ctx->log && !tp->app_limited && !(bbr_flags & FLAG_DEBUG_VERBOSE))
145768 +               return;
145770 +       if (ipv4_is_loopback(inet_sk(sk)->inet_daddr) &&
145771 +           !(bbr_flags & FLAG_DEBUG_LOOPBACK))
145772 +               return;
145774 +       snprintf(debugmsg, sizeof(debugmsg) - 1,
145775 +                "BBR %pI4:%-5u %5u,%03u:%-7u %c "
145776 +                "%c %2u br %2u cr %2d rtt %5ld d %2d i %5ld mrtt %d %cbw %llu "
145777 +                "bw %llu lb %llu ib %llu qb %llu "
145778 +                "a %u if %2u %c %c dl %u l %u al %u # %u t %u %c %c "
145779 +                "lr %d er %d ea %d bwl %lld il %d ih %d c %d "
145780 +                "v %d %c %u %c %s\n",
145781 +                &inet_sk(sk)->inet_daddr, dport,
145782 +                una / 1000, una % 1000, fack - tp->snd_una,
145783 +                ca_states[inet_csk(sk)->icsk_ca_state],
145784 +                bbr->debug.undo ? '@' : mode[bbr->mode],
145785 +                tp->snd_cwnd,
145786 +                bbr_extra_acked(sk),   /* br (legacy): extra_acked */
145787 +                rs->tx_in_flight,      /* cr (legacy): tx_inflight */
145788 +                rs->rtt_us,
145789 +                rs->delivered,
145790 +                rs->interval_us,
145791 +                bbr->min_rtt_us,
145792 +                rs->is_app_limited ? '_' : 'l',
145793 +                bbr_rate_kbps(sk, ctx->sample_bw), /* lbw: latest sample bw */
145794 +                bbr_rate_kbps(sk, bbr_max_bw(sk)), /* bw: max bw */
145795 +                0ULL,                              /* lb: [obsolete] */
145796 +                0ULL,                              /* ib: [obsolete] */
145797 +                (u64)sk->sk_pacing_rate * 8 / 1000,
145798 +                acked,
145799 +                tcp_packets_in_flight(tp),
145800 +                rs->is_ack_delayed ? 'd' : '.',
145801 +                bbr->round_start ? '*' : '.',
145802 +                tp->delivered, tp->lost,
145803 +                tp->app_limited,
145804 +                0,                                 /* #: [obsolete] */
145805 +                ctx->target_cwnd,
145806 +                tp->reord_seen ? 'r' : '.',  /* r: reordering seen? */
145807 +                ca_states[bbr->prev_ca_state],
145808 +                (rs->lost + rs->delivered) > 0 ?
145809 +                (1000 * rs->lost /
145810 +                 (rs->lost + rs->delivered)) : 0,    /* lr: loss rate x1000 */
145811 +                (rs->delivered) > 0 ?
145812 +                (1000 * rs->delivered_ce /
145813 +                 (rs->delivered)) : 0,               /* er: ECN rate x1000 */
145814 +                1000 * bbr->ecn_alpha >> BBR_SCALE,  /* ea: ECN alpha x1000 */
145815 +                bbr->bw_lo == ~0U ?
145816 +                  -1 : (s64)bbr_rate_kbps(sk, bbr->bw_lo), /* bwl */
145817 +                bbr->inflight_lo,      /* il */
145818 +                bbr->inflight_hi,      /* ih */
145819 +                bbr->bw_probe_up_cnt,  /* c */
145820 +                2,                     /* v: version */
145821 +                bbr->debug.event,
145822 +                bbr->cycle_idx,
145823 +                ack_phase[bbr->ack_phase],
145824 +                bbr->bw_probe_samples ? "Y" : "N");
145825 +       debugmsg[sizeof(debugmsg) - 1] = 0;
145827 +       /* printk takes a higher precedence. */
145828 +       if (bbr_debug_with_printk)
145829 +               printk(KERN_DEBUG "%s", debugmsg);
145831 +       if (unlikely(bbr->debug.undo))
145832 +               bbr->debug.undo = 0;
145835 +/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
145836 +static unsigned long bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
145838 +       u64 rate = bw;
145840 +       rate = bbr_rate_bytes_per_sec(sk, rate, gain,
145841 +                                     bbr_pacing_margin_percent);
145842 +       rate = min_t(u64, rate, sk->sk_max_pacing_rate);
145843 +       return rate;
145846 +/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
145847 +static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
145849 +       struct tcp_sock *tp = tcp_sk(sk);
145850 +       struct bbr *bbr = inet_csk_ca(sk);
145851 +       u64 bw;
145852 +       u32 rtt_us;
145854 +       if (tp->srtt_us) {              /* any RTT sample yet? */
145855 +               rtt_us = max(tp->srtt_us >> 3, 1U);
145856 +               bbr->has_seen_rtt = 1;
145857 +       } else {                         /* no RTT sample yet */
145858 +               rtt_us = USEC_PER_MSEC;  /* use nominal default RTT */
145859 +       }
145860 +       bw = (u64)tp->snd_cwnd * BW_UNIT;
145861 +       do_div(bw, rtt_us);
145862 +       sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr->params.high_gain);
145865 +/* Pace using current bw estimate and a gain factor. */
145866 +static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
145868 +       struct tcp_sock *tp = tcp_sk(sk);
145869 +       struct bbr *bbr = inet_csk_ca(sk);
145870 +       unsigned long rate = bbr_bw_to_pacing_rate(sk, bw, gain);
145872 +       if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
145873 +               bbr_init_pacing_rate_from_rtt(sk);
145874 +       if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
145875 +               sk->sk_pacing_rate = rate;
145878 +static u32 bbr_min_tso_segs(struct sock *sk)
145880 +       return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
145883 +/* Return the number of segments BBR would like in a TSO/GSO skb, given
145884 + * a particular max gso size as a constraint.
145885 + */
145886 +static u32 bbr_tso_segs_generic(struct sock *sk, unsigned int mss_now,
145887 +                               u32 gso_max_size)
145889 +       struct bbr *bbr = inet_csk_ca(sk);
145890 +       u32 segs, r;
145891 +       u64 bytes;
145893 +       /* Budget a TSO/GSO burst size allowance based on bw (pacing_rate). */
145894 +       bytes = sk->sk_pacing_rate >> sk->sk_pacing_shift;
145896 +       /* Budget a TSO/GSO burst size allowance based on min_rtt. For every
145897 +        * K = 2^tso_rtt_shift microseconds of min_rtt, halve the burst.
145898 +        * The min_rtt-based burst allowance is: 64 KBytes / 2^(min_rtt/K)
145899 +        */
145900 +       if (bbr->params.tso_rtt_shift) {
145901 +               r = bbr->min_rtt_us >> bbr->params.tso_rtt_shift;
145902 +               if (r < BITS_PER_TYPE(u32))   /* prevent undefined behavior */
145903 +                       bytes += GSO_MAX_SIZE >> r;
145904 +       }
145906 +       bytes = min_t(u32, bytes, gso_max_size - 1 - MAX_TCP_HEADER);
145907 +       segs = max_t(u32, bytes / mss_now, bbr_min_tso_segs(sk));
145908 +       return segs;
145911 +/* Custom tcp_tso_autosize() for BBR, used at transmit time to cap skb size. */
145912 +static u32  bbr_tso_segs(struct sock *sk, unsigned int mss_now)
145914 +       return bbr_tso_segs_generic(sk, mss_now, sk->sk_gso_max_size);
145917 +/* Like bbr_tso_segs(), using mss_cache, ignoring driver's sk_gso_max_size. */
145918 +static u32 bbr_tso_segs_goal(struct sock *sk)
145920 +       struct tcp_sock *tp = tcp_sk(sk);
145922 +       return  bbr_tso_segs_generic(sk, tp->mss_cache, GSO_MAX_SIZE);
145925 +/* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
145926 +static void bbr_save_cwnd(struct sock *sk)
145928 +       struct tcp_sock *tp = tcp_sk(sk);
145929 +       struct bbr *bbr = inet_csk_ca(sk);
145931 +       if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
145932 +               bbr->prior_cwnd = tp->snd_cwnd;  /* this cwnd is good enough */
145933 +       else  /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
145934 +               bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd);
145937 +static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
145939 +       struct tcp_sock *tp = tcp_sk(sk);
145940 +       struct bbr *bbr = inet_csk_ca(sk);
145942 +       if (event == CA_EVENT_TX_START && tp->app_limited) {
145943 +               bbr->idle_restart = 1;
145944 +               bbr->ack_epoch_mstamp = tp->tcp_mstamp;
145945 +               bbr->ack_epoch_acked = 0;
145946 +               /* Avoid pointless buffer overflows: pace at est. bw if we don't
145947 +                * need more speed (we're restarting from idle and app-limited).
145948 +                */
145949 +               if (bbr->mode == BBR_PROBE_BW)
145950 +                       bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
145951 +               else if (bbr->mode == BBR_PROBE_RTT)
145952 +                       bbr_check_probe_rtt_done(sk);
145953 +       } else if ((event == CA_EVENT_ECN_IS_CE ||
145954 +                   event == CA_EVENT_ECN_NO_CE) &&
145955 +                   bbr_ecn_enable &&
145956 +                   bbr->params.precise_ece_ack) {
145957 +               u32 state = bbr->ce_state;
145958 +               dctcp_ece_ack_update(sk, event, &bbr->prior_rcv_nxt, &state);
145959 +               bbr->ce_state = state;
145960 +               if (tp->fast_ack_mode == 2 && event == CA_EVENT_ECN_IS_CE)
145961 +                       tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
145962 +       }
145965 +/* Calculate bdp based on min RTT and the estimated bottleneck bandwidth:
145967 + * bdp = ceil(bw * min_rtt * gain)
145969 + * The key factor, gain, controls the amount of queue. While a small gain
145970 + * builds a smaller queue, it becomes more vulnerable to noise in RTT
145971 + * measurements (e.g., delayed ACKs or other ACK compression effects). This
145972 + * noise may cause BBR to under-estimate the rate.
145973 + */
145974 +static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
145976 +       struct bbr *bbr = inet_csk_ca(sk);
145977 +       u32 bdp;
145978 +       u64 w;
145980 +       /* If we've never had a valid RTT sample, cap cwnd at the initial
145981 +        * default. This should only happen when the connection is not using TCP
145982 +        * timestamps and has retransmitted all of the SYN/SYNACK/data packets
145983 +        * ACKed so far. In this case, an RTO can cut cwnd to 1, in which
145984 +        * case we need to slow-start up toward something safe: initial cwnd.
145985 +        */
145986 +       if (unlikely(bbr->min_rtt_us == ~0U))    /* no valid RTT samples yet? */
145987 +               return bbr->init_cwnd;  /* be safe: cap at initial cwnd */
145989 +       w = (u64)bw * bbr->min_rtt_us;
145991 +       /* Apply a gain to the given value, remove the BW_SCALE shift, and
145992 +        * round the value up to avoid a negative feedback loop.
145993 +        */
145994 +       bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
145996 +       return bdp;
145999 +/* To achieve full performance in high-speed paths, we budget enough cwnd to
146000 + * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
146001 + *   - one skb in sending host Qdisc,
146002 + *   - one skb in sending host TSO/GSO engine
146003 + *   - one skb being received by receiver host LRO/GRO/delayed-ACK engine
146004 + * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
146005 + * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
146006 + * which allows 2 outstanding 2-packet sequences, to try to keep pipe
146007 + * full even with ACK-every-other-packet delayed ACKs.
146008 + */
146009 +static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd)
146011 +       struct bbr *bbr = inet_csk_ca(sk);
146012 +       u32 tso_segs_goal;
146014 +       tso_segs_goal = 3 * bbr_tso_segs_goal(sk);
146016 +       /* Allow enough full-sized skbs in flight to utilize end systems. */
146017 +       if (bbr->params.cwnd_tso_budget == 1) {
146018 +               cwnd = max_t(u32, cwnd, tso_segs_goal);
146019 +               cwnd = max_t(u32, cwnd, bbr->params.cwnd_min_target);
146020 +       } else {
146021 +               cwnd += tso_segs_goal;
146022 +               cwnd = (cwnd + 1) & ~1U;
146023 +       }
146024 +       /* Ensure gain cycling gets inflight above BDP even for small BDPs. */
146025 +       if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == BBR_BW_PROBE_UP)
146026 +               cwnd += 2;
146028 +       return cwnd;
146031 +/* Find inflight based on min RTT and the estimated bottleneck bandwidth. */
146032 +static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
146034 +       u32 inflight;
146036 +       inflight = bbr_bdp(sk, bw, gain);
146037 +       inflight = bbr_quantization_budget(sk, inflight);
146039 +       return inflight;
146042 +/* With pacing at lower layers, there's often less data "in the network" than
146043 + * "in flight". With TSQ and departure time pacing at lower layers (e.g. fq),
146044 + * we often have several skbs queued in the pacing layer with a pre-scheduled
146045 + * earliest departure time (EDT). BBR adapts its pacing rate based on the
146046 + * inflight level that it estimates has already been "baked in" by previous
146047 + * departure time decisions. We calculate a rough estimate of the number of our
146048 + * packets that might be in the network at the earliest departure time for the
146049 + * next skb scheduled:
146050 + *   in_network_at_edt = inflight_at_edt - (EDT - now) * bw
146051 + * If we're increasing inflight, then we want to know if the transmit of the
146052 + * EDT skb will push inflight above the target, so inflight_at_edt includes
146053 + * bbr_tso_segs_goal() from the skb departing at EDT. If decreasing inflight,
146054 + * then estimate if inflight will sink too low just before the EDT transmit.
146055 + */
146056 +static u32 bbr_packets_in_net_at_edt(struct sock *sk, u32 inflight_now)
146058 +       struct tcp_sock *tp = tcp_sk(sk);
146059 +       struct bbr *bbr = inet_csk_ca(sk);
146060 +       u64 now_ns, edt_ns, interval_us;
146061 +       u32 interval_delivered, inflight_at_edt;
146063 +       now_ns = tp->tcp_clock_cache;
146064 +       edt_ns = max(tp->tcp_wstamp_ns, now_ns);
146065 +       interval_us = div_u64(edt_ns - now_ns, NSEC_PER_USEC);
146066 +       interval_delivered = (u64)bbr_bw(sk) * interval_us >> BW_SCALE;
146067 +       inflight_at_edt = inflight_now;
146068 +       if (bbr->pacing_gain > BBR_UNIT)              /* increasing inflight */
146069 +               inflight_at_edt += bbr_tso_segs_goal(sk);  /* include EDT skb */
146070 +       if (interval_delivered >= inflight_at_edt)
146071 +               return 0;
146072 +       return inflight_at_edt - interval_delivered;
146075 +/* Find the cwnd increment based on estimate of ack aggregation */
146076 +static u32 bbr_ack_aggregation_cwnd(struct sock *sk)
146078 +       struct bbr *bbr = inet_csk_ca(sk);
146079 +       u32 max_aggr_cwnd, aggr_cwnd = 0;
146081 +       if (bbr->params.extra_acked_gain &&
146082 +           (bbr_full_bw_reached(sk) || bbr->params.extra_acked_in_startup)) {
146083 +               max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us)
146084 +                               / BW_UNIT;
146085 +               aggr_cwnd = (bbr->params.extra_acked_gain * bbr_extra_acked(sk))
146086 +                            >> BBR_SCALE;
146087 +               aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd);
146088 +       }
146090 +       return aggr_cwnd;
146093 +/* Returns the cwnd for PROBE_RTT mode. */
146094 +static u32 bbr_probe_rtt_cwnd(struct sock *sk)
146096 +       struct bbr *bbr = inet_csk_ca(sk);
146098 +       if (bbr->params.probe_rtt_cwnd_gain == 0)
146099 +               return bbr->params.cwnd_min_target;
146100 +       return max_t(u32, bbr->params.cwnd_min_target,
146101 +                    bbr_bdp(sk, bbr_bw(sk), bbr->params.probe_rtt_cwnd_gain));
146104 +/* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss
146105 + * has drawn us down below target), or snap down to target if we're above it.
146106 + */
146107 +static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
146108 +                        u32 acked, u32 bw, int gain, u32 cwnd,
146109 +                        struct bbr_context *ctx)
146111 +       struct tcp_sock *tp = tcp_sk(sk);
146112 +       struct bbr *bbr = inet_csk_ca(sk);
146113 +       u32 target_cwnd = 0, prev_cwnd = tp->snd_cwnd, max_probe;
146115 +       if (!acked)
146116 +               goto done;  /* no packet fully ACKed; just apply caps */
146118 +       target_cwnd = bbr_bdp(sk, bw, gain);
146120 +       /* Increment the cwnd to account for excess ACKed data that seems
146121 +        * due to aggregation (of data and/or ACKs) visible in the ACK stream.
146122 +        */
146123 +       target_cwnd += bbr_ack_aggregation_cwnd(sk);
146124 +       target_cwnd = bbr_quantization_budget(sk, target_cwnd);
146126 +       /* If we're below target cwnd, slow start cwnd toward target cwnd. */
146127 +       bbr->debug.target_cwnd = target_cwnd;
146129 +       /* Update cwnd and enable fast path if cwnd reaches target_cwnd. */
146130 +       bbr->try_fast_path = 0;
146131 +       if (bbr_full_bw_reached(sk)) { /* only cut cwnd if we filled the pipe */
146132 +               cwnd += acked;
146133 +               if (cwnd >= target_cwnd) {
146134 +                       cwnd = target_cwnd;
146135 +                       bbr->try_fast_path = 1;
146136 +               }
146137 +       } else if (cwnd < target_cwnd || cwnd  < 2 * bbr->init_cwnd) {
146138 +               cwnd += acked;
146139 +       } else {
146140 +               bbr->try_fast_path = 1;
146141 +       }
146143 +       /* When growing cwnd, don't grow beyond twice what we just probed. */
146144 +       if (bbr->params.usage_based_cwnd) {
146145 +               max_probe = max(2 * tp->max_packets_out, tp->snd_cwnd);
146146 +               cwnd = min(cwnd, max_probe);
146147 +       }
146149 +       cwnd = max_t(u32, cwnd, bbr->params.cwnd_min_target);
146150 +done:
146151 +       tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);   /* apply global cap */
146152 +       if (bbr->mode == BBR_PROBE_RTT)  /* drain queue, refresh min_rtt */
146153 +               tp->snd_cwnd = min_t(u32, tp->snd_cwnd, bbr_probe_rtt_cwnd(sk));
146155 +       ctx->target_cwnd = target_cwnd;
146156 +       ctx->log = (tp->snd_cwnd != prev_cwnd);
146159 +/* See if we have reached next round trip */
146160 +static void bbr_update_round_start(struct sock *sk,
146161 +               const struct rate_sample *rs, struct bbr_context *ctx)
146163 +       struct tcp_sock *tp = tcp_sk(sk);
146164 +       struct bbr *bbr = inet_csk_ca(sk);
146166 +       bbr->round_start = 0;
146168 +       /* See if we've reached the next RTT */
146169 +       if (rs->interval_us > 0 &&
146170 +           !before(rs->prior_delivered, bbr->next_rtt_delivered)) {
146171 +               bbr->next_rtt_delivered = tp->delivered;
146172 +               bbr->round_start = 1;
146173 +       }
146176 +/* Calculate the bandwidth based on how fast packets are delivered */
146177 +static void bbr_calculate_bw_sample(struct sock *sk,
146178 +                       const struct rate_sample *rs, struct bbr_context *ctx)
146180 +       struct bbr *bbr = inet_csk_ca(sk);
146181 +       u64 bw = 0;
146183 +       /* Divide delivered by the interval to find a (lower bound) bottleneck
146184 +        * bandwidth sample. Delivered is in packets and interval_us in uS and
146185 +        * ratio will be <<1 for most connections. So delivered is first scaled.
146186 +        * Round up to allow growth at low rates, even with integer division.
146187 +        */
146188 +       if (rs->interval_us > 0) {
146189 +               if (WARN_ONCE(rs->delivered < 0,
146190 +                             "negative delivered: %d interval_us: %ld\n",
146191 +                             rs->delivered, rs->interval_us))
146192 +                       return;
146194 +               bw = DIV_ROUND_UP_ULL((u64)rs->delivered * BW_UNIT, rs->interval_us);
146195 +       }
146197 +       ctx->sample_bw = bw;
146198 +       bbr->debug.rs_bw = bw;
146201 +/* Estimates the windowed max degree of ack aggregation.
146202 + * This is used to provision extra in-flight data to keep sending during
146203 + * inter-ACK silences.
146205 + * Degree of ack aggregation is estimated as extra data acked beyond expected.
146207 + * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval"
146208 + * cwnd += max_extra_acked
146210 + * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms).
146211 + * Max filter is an approximate sliding window of 5-10 (packet timed) round
146212 + * trips for non-startup phase, and 1-2 round trips for startup.
146213 + */
146214 +static void bbr_update_ack_aggregation(struct sock *sk,
146215 +                                      const struct rate_sample *rs)
146217 +       u32 epoch_us, expected_acked, extra_acked;
146218 +       struct bbr *bbr = inet_csk_ca(sk);
146219 +       struct tcp_sock *tp = tcp_sk(sk);
146220 +       u32 extra_acked_win_rtts_thresh = bbr->params.extra_acked_win_rtts;
146222 +       if (!bbr->params.extra_acked_gain || rs->acked_sacked <= 0 ||
146223 +           rs->delivered < 0 || rs->interval_us <= 0)
146224 +               return;
146226 +       if (bbr->round_start) {
146227 +               bbr->extra_acked_win_rtts = min(0x1F,
146228 +                                               bbr->extra_acked_win_rtts + 1);
146229 +               if (bbr->params.extra_acked_in_startup &&
146230 +                   !bbr_full_bw_reached(sk))
146231 +                       extra_acked_win_rtts_thresh = 1;
146232 +               if (bbr->extra_acked_win_rtts >=
146233 +                   extra_acked_win_rtts_thresh) {
146234 +                       bbr->extra_acked_win_rtts = 0;
146235 +                       bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ?
146236 +                                                  0 : 1;
146237 +                       bbr->extra_acked[bbr->extra_acked_win_idx] = 0;
146238 +               }
146239 +       }
146241 +       /* Compute how many packets we expected to be delivered over epoch. */
146242 +       epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp,
146243 +                                     bbr->ack_epoch_mstamp);
146244 +       expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT;
146246 +       /* Reset the aggregation epoch if ACK rate is below expected rate or
146247 +        * significantly large no. of ack received since epoch (potentially
146248 +        * quite old epoch).
146249 +        */
146250 +       if (bbr->ack_epoch_acked <= expected_acked ||
146251 +           (bbr->ack_epoch_acked + rs->acked_sacked >=
146252 +            bbr_ack_epoch_acked_reset_thresh)) {
146253 +               bbr->ack_epoch_acked = 0;
146254 +               bbr->ack_epoch_mstamp = tp->delivered_mstamp;
146255 +               expected_acked = 0;
146256 +       }
146258 +       /* Compute excess data delivered, beyond what was expected. */
146259 +       bbr->ack_epoch_acked = min_t(u32, 0xFFFFF,
146260 +                                  bbr->ack_epoch_acked + rs->acked_sacked);
146261 +       extra_acked = bbr->ack_epoch_acked - expected_acked;
146262 +       extra_acked = min(extra_acked, tp->snd_cwnd);
146263 +       if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx])
146264 +               bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
146267 +/* Estimate when the pipe is full, using the change in delivery rate: BBR
146268 + * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
146269 + * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
146270 + * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the
146271 + * higher rwin, 3: we get higher delivery rate samples. Or transient
146272 + * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar
146273 + * design goal, but uses delay and inter-ACK spacing instead of bandwidth.
146274 + */
146275 +static void bbr_check_full_bw_reached(struct sock *sk,
146276 +                                     const struct rate_sample *rs)
146278 +       struct bbr *bbr = inet_csk_ca(sk);
146279 +       u32 bw_thresh;
146281 +       if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited)
146282 +               return;
146284 +       bw_thresh = (u64)bbr->full_bw * bbr->params.full_bw_thresh >> BBR_SCALE;
146285 +       if (bbr_max_bw(sk) >= bw_thresh) {
146286 +               bbr->full_bw = bbr_max_bw(sk);
146287 +               bbr->full_bw_cnt = 0;
146288 +               return;
146289 +       }
146290 +       ++bbr->full_bw_cnt;
146291 +       bbr->full_bw_reached = bbr->full_bw_cnt >= bbr->params.full_bw_cnt;
146294 +/* If pipe is probably full, drain the queue and then enter steady-state. */
146295 +static bool bbr_check_drain(struct sock *sk, const struct rate_sample *rs,
146296 +                           struct bbr_context *ctx)
146298 +       struct bbr *bbr = inet_csk_ca(sk);
146300 +       if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
146301 +               bbr->mode = BBR_DRAIN;  /* drain queue we created */
146302 +               tcp_sk(sk)->snd_ssthresh =
146303 +                               bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
146304 +               bbr2_reset_congestion_signals(sk);
146305 +       }       /* fall through to check if in-flight is already small: */
146306 +       if (bbr->mode == BBR_DRAIN &&
146307 +           bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <=
146308 +           bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT))
146309 +               return true;  /* exiting DRAIN now */
146310 +       return false;
146313 +static void bbr_check_probe_rtt_done(struct sock *sk)
146315 +       struct tcp_sock *tp = tcp_sk(sk);
146316 +       struct bbr *bbr = inet_csk_ca(sk);
146318 +       if (!(bbr->probe_rtt_done_stamp &&
146319 +             after(tcp_jiffies32, bbr->probe_rtt_done_stamp)))
146320 +               return;
146322 +       bbr->probe_rtt_min_stamp = tcp_jiffies32; /* schedule next PROBE_RTT */
146323 +       tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
146324 +       bbr2_exit_probe_rtt(sk);
146327 +/* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
146328 + * periodically drain the bottleneck queue, to converge to measure the true
146329 + * min_rtt (unloaded propagation delay). This allows the flows to keep queues
146330 + * small (reducing queuing delay and packet loss) and achieve fairness among
146331 + * BBR flows.
146333 + * The min_rtt filter window is 10 seconds. When the min_rtt estimate expires,
146334 + * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets.
146335 + * After at least bbr_probe_rtt_mode_ms=200ms and at least one packet-timed
146336 + * round trip elapsed with that flight size <= 4, we leave PROBE_RTT mode and
146337 + * re-enter the previous mode. BBR uses 200ms to approximately bound the
146338 + * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s).
146340 + * Note that flows need only pay 2% if they are busy sending over the last 10
146341 + * seconds. Interactive applications (e.g., Web, RPCs, video chunks) often have
146342 + * natural silences or low-rate periods within 10 seconds where the rate is low
146343 + * enough for long enough to drain its queue in the bottleneck. We pick up
146344 + * these min RTT measurements opportunistically with our min_rtt filter. :-)
146345 + */
146346 +static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
146348 +       struct tcp_sock *tp = tcp_sk(sk);
146349 +       struct bbr *bbr = inet_csk_ca(sk);
146350 +       bool probe_rtt_expired, min_rtt_expired;
146351 +       u32 expire;
146353 +       /* Track min RTT in probe_rtt_win_ms to time next PROBE_RTT state. */
146354 +       expire = bbr->probe_rtt_min_stamp +
146355 +                msecs_to_jiffies(bbr->params.probe_rtt_win_ms);
146356 +       probe_rtt_expired = after(tcp_jiffies32, expire);
146357 +       if (rs->rtt_us >= 0 &&
146358 +           (rs->rtt_us <= bbr->probe_rtt_min_us ||
146359 +            (probe_rtt_expired && !rs->is_ack_delayed))) {
146360 +               bbr->probe_rtt_min_us = rs->rtt_us;
146361 +               bbr->probe_rtt_min_stamp = tcp_jiffies32;
146362 +       }
146363 +       /* Track min RTT seen in the min_rtt_win_sec filter window: */
146364 +       expire = bbr->min_rtt_stamp + bbr->params.min_rtt_win_sec * HZ;
146365 +       min_rtt_expired = after(tcp_jiffies32, expire);
146366 +       if (bbr->probe_rtt_min_us <= bbr->min_rtt_us ||
146367 +           min_rtt_expired) {
146368 +               bbr->min_rtt_us = bbr->probe_rtt_min_us;
146369 +               bbr->min_rtt_stamp = bbr->probe_rtt_min_stamp;
146370 +       }
146372 +       if (bbr->params.probe_rtt_mode_ms > 0 && probe_rtt_expired &&
146373 +           !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) {
146374 +               bbr->mode = BBR_PROBE_RTT;  /* dip, drain queue */
146375 +               bbr_save_cwnd(sk);  /* note cwnd so we can restore it */
146376 +               bbr->probe_rtt_done_stamp = 0;
146377 +               bbr->ack_phase = BBR_ACKS_PROBE_STOPPING;
146378 +               bbr->next_rtt_delivered = tp->delivered;
146379 +       }
146381 +       if (bbr->mode == BBR_PROBE_RTT) {
146382 +               /* Ignore low rate samples during this mode. */
146383 +               tp->app_limited =
146384 +                       (tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
146385 +               /* Maintain min packets in flight for max(200 ms, 1 round). */
146386 +               if (!bbr->probe_rtt_done_stamp &&
146387 +                   tcp_packets_in_flight(tp) <= bbr_probe_rtt_cwnd(sk)) {
146388 +                       bbr->probe_rtt_done_stamp = tcp_jiffies32 +
146389 +                               msecs_to_jiffies(bbr->params.probe_rtt_mode_ms);
146390 +                       bbr->probe_rtt_round_done = 0;
146391 +                       bbr->next_rtt_delivered = tp->delivered;
146392 +               } else if (bbr->probe_rtt_done_stamp) {
146393 +                       if (bbr->round_start)
146394 +                               bbr->probe_rtt_round_done = 1;
146395 +                       if (bbr->probe_rtt_round_done)
146396 +                               bbr_check_probe_rtt_done(sk);
146397 +               }
146398 +       }
146399 +       /* Restart after idle ends only once we process a new S/ACK for data */
146400 +       if (rs->delivered > 0)
146401 +               bbr->idle_restart = 0;
146404 +static void bbr_update_gains(struct sock *sk)
146406 +       struct bbr *bbr = inet_csk_ca(sk);
146408 +       switch (bbr->mode) {
146409 +       case BBR_STARTUP:
146410 +               bbr->pacing_gain = bbr->params.high_gain;
146411 +               bbr->cwnd_gain   = bbr->params.startup_cwnd_gain;
146412 +               break;
146413 +       case BBR_DRAIN:
146414 +               bbr->pacing_gain = bbr->params.drain_gain;  /* slow, to drain */
146415 +               bbr->cwnd_gain = bbr->params.startup_cwnd_gain;  /* keep cwnd */
146416 +               break;
146417 +       case BBR_PROBE_BW:
146418 +               bbr->pacing_gain = bbr->params.pacing_gain[bbr->cycle_idx];
146419 +               bbr->cwnd_gain = bbr->params.cwnd_gain;
146420 +               break;
146421 +       case BBR_PROBE_RTT:
146422 +               bbr->pacing_gain = BBR_UNIT;
146423 +               bbr->cwnd_gain = BBR_UNIT;
146424 +               break;
146425 +       default:
146426 +               WARN_ONCE(1, "BBR bad mode: %u\n", bbr->mode);
146427 +               break;
146428 +       }
146431 +static void bbr_init(struct sock *sk)
146433 +       struct tcp_sock *tp = tcp_sk(sk);
146434 +       struct bbr *bbr = inet_csk_ca(sk);
146435 +       int i;
146437 +       WARN_ON_ONCE(tp->snd_cwnd >= bbr_cwnd_warn_val);
146439 +       bbr->initialized = 1;
146440 +       bbr->params.high_gain = min(0x7FF, bbr_high_gain);
146441 +       bbr->params.drain_gain = min(0x3FF, bbr_drain_gain);
146442 +       bbr->params.startup_cwnd_gain = min(0x7FF, bbr_startup_cwnd_gain);
146443 +       bbr->params.cwnd_gain = min(0x7FF, bbr_cwnd_gain);
146444 +       bbr->params.cwnd_tso_budget = min(0x1U, bbr_cwnd_tso_budget);
146445 +       bbr->params.cwnd_min_target = min(0xFU, bbr_cwnd_min_target);
146446 +       bbr->params.min_rtt_win_sec = min(0x1FU, bbr_min_rtt_win_sec);
146447 +       bbr->params.probe_rtt_mode_ms = min(0x1FFU, bbr_probe_rtt_mode_ms);
146448 +       bbr->params.full_bw_cnt = min(0x7U, bbr_full_bw_cnt);
146449 +       bbr->params.full_bw_thresh = min(0x3FFU, bbr_full_bw_thresh);
146450 +       bbr->params.extra_acked_gain = min(0x7FF, bbr_extra_acked_gain);
146451 +       bbr->params.extra_acked_win_rtts = min(0x1FU, bbr_extra_acked_win_rtts);
146452 +       bbr->params.drain_to_target = bbr_drain_to_target ? 1 : 0;
146453 +       bbr->params.precise_ece_ack = bbr_precise_ece_ack ? 1 : 0;
146454 +       bbr->params.extra_acked_in_startup = bbr_extra_acked_in_startup ? 1 : 0;
146455 +       bbr->params.probe_rtt_cwnd_gain = min(0xFFU, bbr_probe_rtt_cwnd_gain);
146456 +       bbr->params.probe_rtt_win_ms =
146457 +               min(0x3FFFU,
146458 +                   min_t(u32, bbr_probe_rtt_win_ms,
146459 +                         bbr->params.min_rtt_win_sec * MSEC_PER_SEC));
146460 +       for (i = 0; i < CYCLE_LEN; i++)
146461 +               bbr->params.pacing_gain[i] = min(0x3FF, bbr_pacing_gain[i]);
146462 +       bbr->params.usage_based_cwnd = bbr_usage_based_cwnd ? 1 : 0;
146463 +       bbr->params.tso_rtt_shift =  min(0xFU, bbr_tso_rtt_shift);
146465 +       bbr->debug.snd_isn = tp->snd_una;
146466 +       bbr->debug.target_cwnd = 0;
146467 +       bbr->debug.undo = 0;
146469 +       bbr->init_cwnd = min(0x7FU, tp->snd_cwnd);
146470 +       bbr->prior_cwnd = tp->prior_cwnd;
146471 +       tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
146472 +       bbr->next_rtt_delivered = 0;
146473 +       bbr->prev_ca_state = TCP_CA_Open;
146474 +       bbr->packet_conservation = 0;
146476 +       bbr->probe_rtt_done_stamp = 0;
146477 +       bbr->probe_rtt_round_done = 0;
146478 +       bbr->probe_rtt_min_us = tcp_min_rtt(tp);
146479 +       bbr->probe_rtt_min_stamp = tcp_jiffies32;
146480 +       bbr->min_rtt_us = tcp_min_rtt(tp);
146481 +       bbr->min_rtt_stamp = tcp_jiffies32;
146483 +       bbr->has_seen_rtt = 0;
146484 +       bbr_init_pacing_rate_from_rtt(sk);
146486 +       bbr->round_start = 0;
146487 +       bbr->idle_restart = 0;
146488 +       bbr->full_bw_reached = 0;
146489 +       bbr->full_bw = 0;
146490 +       bbr->full_bw_cnt = 0;
146491 +       bbr->cycle_mstamp = 0;
146492 +       bbr->cycle_idx = 0;
146493 +       bbr->mode = BBR_STARTUP;
146494 +       bbr->debug.rs_bw = 0;
146496 +       bbr->ack_epoch_mstamp = tp->tcp_mstamp;
146497 +       bbr->ack_epoch_acked = 0;
146498 +       bbr->extra_acked_win_rtts = 0;
146499 +       bbr->extra_acked_win_idx = 0;
146500 +       bbr->extra_acked[0] = 0;
146501 +       bbr->extra_acked[1] = 0;
146503 +       bbr->ce_state = 0;
146504 +       bbr->prior_rcv_nxt = tp->rcv_nxt;
146505 +       bbr->try_fast_path = 0;
146507 +       cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
146510 +static u32 bbr_sndbuf_expand(struct sock *sk)
146512 +       /* Provision 3 * cwnd since BBR may slow-start even during recovery. */
146513 +       return 3;
146516 +/* __________________________________________________________________________
146518 + * Functions new to BBR v2 ("bbr") congestion control are below here.
146519 + * __________________________________________________________________________
146520 + */
146522 +/* Incorporate a new bw sample into the current window of our max filter. */
146523 +static void bbr2_take_bw_hi_sample(struct sock *sk, u32 bw)
146525 +       struct bbr *bbr = inet_csk_ca(sk);
146527 +       bbr->bw_hi[1] = max(bw, bbr->bw_hi[1]);
146530 +/* Keep max of last 1-2 cycles. Each PROBE_BW cycle, flip filter window. */
146531 +static void bbr2_advance_bw_hi_filter(struct sock *sk)
146533 +       struct bbr *bbr = inet_csk_ca(sk);
146535 +       if (!bbr->bw_hi[1])
146536 +               return;  /* no samples in this window; remember old window */
146537 +       bbr->bw_hi[0] = bbr->bw_hi[1];
146538 +       bbr->bw_hi[1] = 0;
146541 +/* How much do we want in flight? Our BDP, unless congestion cut cwnd. */
146542 +static u32 bbr2_target_inflight(struct sock *sk)
146544 +       u32 bdp = bbr_inflight(sk, bbr_bw(sk), BBR_UNIT);
146546 +       return min(bdp, tcp_sk(sk)->snd_cwnd);
146549 +static bool bbr2_is_probing_bandwidth(struct sock *sk)
146551 +       struct bbr *bbr = inet_csk_ca(sk);
146553 +       return (bbr->mode == BBR_STARTUP) ||
146554 +               (bbr->mode == BBR_PROBE_BW &&
146555 +                (bbr->cycle_idx == BBR_BW_PROBE_REFILL ||
146556 +                 bbr->cycle_idx == BBR_BW_PROBE_UP));
146559 +/* Has the given amount of time elapsed since we marked the phase start? */
146560 +static bool bbr2_has_elapsed_in_phase(const struct sock *sk, u32 interval_us)
146562 +       const struct tcp_sock *tp = tcp_sk(sk);
146563 +       const struct bbr *bbr = inet_csk_ca(sk);
146565 +       return tcp_stamp_us_delta(tp->tcp_mstamp,
146566 +                                 bbr->cycle_mstamp + interval_us) > 0;
146569 +static void bbr2_handle_queue_too_high_in_startup(struct sock *sk)
146571 +       struct bbr *bbr = inet_csk_ca(sk);
146573 +       bbr->full_bw_reached = 1;
146574 +       bbr->inflight_hi = bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
146577 +/* Exit STARTUP upon N consecutive rounds with ECN mark rate > ecn_thresh. */
146578 +static void bbr2_check_ecn_too_high_in_startup(struct sock *sk, u32 ce_ratio)
146580 +       struct bbr *bbr = inet_csk_ca(sk);
146582 +       if (bbr_full_bw_reached(sk) || !bbr->ecn_eligible ||
146583 +           !bbr->params.full_ecn_cnt || !bbr->params.ecn_thresh)
146584 +               return;
146586 +       if (ce_ratio >= bbr->params.ecn_thresh)
146587 +               bbr->startup_ecn_rounds++;
146588 +       else
146589 +               bbr->startup_ecn_rounds = 0;
146591 +       if (bbr->startup_ecn_rounds >= bbr->params.full_ecn_cnt) {
146592 +               bbr->debug.event = 'E';  /* ECN caused STARTUP exit */
146593 +               bbr2_handle_queue_too_high_in_startup(sk);
146594 +               return;
146595 +       }
146598 +static void bbr2_update_ecn_alpha(struct sock *sk)
146600 +       struct tcp_sock *tp = tcp_sk(sk);
146601 +       struct bbr *bbr = inet_csk_ca(sk);
146602 +       s32 delivered, delivered_ce;
146603 +       u64 alpha, ce_ratio;
146604 +       u32 gain;
146606 +       if (bbr->params.ecn_factor == 0)
146607 +               return;
146609 +       delivered = tp->delivered - bbr->alpha_last_delivered;
146610 +       delivered_ce = tp->delivered_ce - bbr->alpha_last_delivered_ce;
146612 +       if (delivered == 0 ||           /* avoid divide by zero */
146613 +           WARN_ON_ONCE(delivered < 0 || delivered_ce < 0))  /* backwards? */
146614 +               return;
146616 +       /* See if we should use ECN sender logic for this connection. */
146617 +       if (!bbr->ecn_eligible && bbr_ecn_enable &&
146618 +           (bbr->min_rtt_us <= bbr->params.ecn_max_rtt_us ||
146619 +            !bbr->params.ecn_max_rtt_us))
146620 +               bbr->ecn_eligible = 1;
146622 +       ce_ratio = (u64)delivered_ce << BBR_SCALE;
146623 +       do_div(ce_ratio, delivered);
146624 +       gain = bbr->params.ecn_alpha_gain;
146625 +       alpha = ((BBR_UNIT - gain) * bbr->ecn_alpha) >> BBR_SCALE;
146626 +       alpha += (gain * ce_ratio) >> BBR_SCALE;
146627 +       bbr->ecn_alpha = min_t(u32, alpha, BBR_UNIT);
146629 +       bbr->alpha_last_delivered = tp->delivered;
146630 +       bbr->alpha_last_delivered_ce = tp->delivered_ce;
146632 +       bbr2_check_ecn_too_high_in_startup(sk, ce_ratio);
146635 +/* Each round trip of BBR_BW_PROBE_UP, double volume of probing data. */
146636 +static void bbr2_raise_inflight_hi_slope(struct sock *sk)
146638 +       struct tcp_sock *tp = tcp_sk(sk);
146639 +       struct bbr *bbr = inet_csk_ca(sk);
146640 +       u32 growth_this_round, cnt;
146642 +       /* Calculate "slope": packets S/Acked per inflight_hi increment. */
146643 +       growth_this_round = 1 << bbr->bw_probe_up_rounds;
146644 +       bbr->bw_probe_up_rounds = min(bbr->bw_probe_up_rounds + 1, 30);
146645 +       cnt = tp->snd_cwnd / growth_this_round;
146646 +       cnt = max(cnt, 1U);
146647 +       bbr->bw_probe_up_cnt = cnt;
146648 +       bbr->debug.event = 'G';  /* Grow inflight_hi slope */
146651 +/* In BBR_BW_PROBE_UP, not seeing high loss/ECN/queue, so raise inflight_hi. */
146652 +static void bbr2_probe_inflight_hi_upward(struct sock *sk,
146653 +                                         const struct rate_sample *rs)
146655 +       struct tcp_sock *tp = tcp_sk(sk);
146656 +       struct bbr *bbr = inet_csk_ca(sk);
146657 +       u32 delta;
146659 +       if (!tp->is_cwnd_limited || tp->snd_cwnd < bbr->inflight_hi) {
146660 +               bbr->bw_probe_up_acks = 0;  /* don't accmulate unused credits */
146661 +               return;  /* not fully using inflight_hi, so don't grow it */
146662 +       }
146664 +       /* For each bw_probe_up_cnt packets ACKed, increase inflight_hi by 1. */
146665 +       bbr->bw_probe_up_acks += rs->acked_sacked;
146666 +       if (bbr->bw_probe_up_acks >=  bbr->bw_probe_up_cnt) {
146667 +               delta = bbr->bw_probe_up_acks / bbr->bw_probe_up_cnt;
146668 +               bbr->bw_probe_up_acks -= delta * bbr->bw_probe_up_cnt;
146669 +               bbr->inflight_hi += delta;
146670 +               bbr->debug.event = 'I';  /* Increment inflight_hi */
146671 +       }
146673 +       if (bbr->round_start)
146674 +               bbr2_raise_inflight_hi_slope(sk);
146677 +/* Does loss/ECN rate for this sample say inflight is "too high"?
146678 + * This is used by both the bbr_check_loss_too_high_in_startup() function,
146679 + * which can be used in either v1 or v2, and the PROBE_UP phase of v2, which
146680 + * uses it to notice when loss/ECN rates suggest inflight is too high.
146681 + */
146682 +static bool bbr2_is_inflight_too_high(const struct sock *sk,
146683 +                                    const struct rate_sample *rs)
146685 +       const struct bbr *bbr = inet_csk_ca(sk);
146686 +       u32 loss_thresh, ecn_thresh;
146688 +       if (rs->lost > 0 && rs->tx_in_flight) {
146689 +               loss_thresh = (u64)rs->tx_in_flight * bbr->params.loss_thresh >>
146690 +                               BBR_SCALE;
146691 +               if (rs->lost > loss_thresh)
146692 +                       return true;
146693 +       }
146695 +       if (rs->delivered_ce > 0 && rs->delivered > 0 &&
146696 +           bbr->ecn_eligible && bbr->params.ecn_thresh) {
146697 +               ecn_thresh = (u64)rs->delivered * bbr->params.ecn_thresh >>
146698 +                               BBR_SCALE;
146699 +               if (rs->delivered_ce >= ecn_thresh)
146700 +                       return true;
146701 +       }
146703 +       return false;
146706 +/* Calculate the tx_in_flight level that corresponded to excessive loss.
146707 + * We find "lost_prefix" segs of the skb where loss rate went too high,
146708 + * by solving for "lost_prefix" in the following equation:
146709 + *   lost                     /  inflight                     >= loss_thresh
146710 + *  (lost_prev + lost_prefix) / (inflight_prev + lost_prefix) >= loss_thresh
146711 + * Then we take that equation, convert it to fixed point, and
146712 + * round up to the nearest packet.
146713 + */
146714 +static u32 bbr2_inflight_hi_from_lost_skb(const struct sock *sk,
146715 +                                         const struct rate_sample *rs,
146716 +                                         const struct sk_buff *skb)
146718 +       const struct bbr *bbr = inet_csk_ca(sk);
146719 +       u32 loss_thresh  = bbr->params.loss_thresh;
146720 +       u32 pcount, divisor, inflight_hi;
146721 +       s32 inflight_prev, lost_prev;
146722 +       u64 loss_budget, lost_prefix;
146724 +       pcount = tcp_skb_pcount(skb);
146726 +       /* How much data was in flight before this skb? */
146727 +       inflight_prev = rs->tx_in_flight - pcount;
146728 +       if (WARN_ONCE(inflight_prev < 0,
146729 +                     "tx_in_flight: %u pcount: %u reneg: %u",
146730 +                     rs->tx_in_flight, pcount, tcp_sk(sk)->is_sack_reneg))
146731 +               return ~0U;
146733 +       /* How much inflight data was marked lost before this skb? */
146734 +       lost_prev = rs->lost - pcount;
146735 +       if (WARN_ON_ONCE(lost_prev < 0))
146736 +               return ~0U;
146738 +       /* At what prefix of this lost skb did losss rate exceed loss_thresh? */
146739 +       loss_budget = (u64)inflight_prev * loss_thresh + BBR_UNIT - 1;
146740 +       loss_budget >>= BBR_SCALE;
146741 +       if (lost_prev >= loss_budget) {
146742 +               lost_prefix = 0;   /* previous losses crossed loss_thresh */
146743 +       } else {
146744 +               lost_prefix = loss_budget - lost_prev;
146745 +               lost_prefix <<= BBR_SCALE;
146746 +               divisor = BBR_UNIT - loss_thresh;
146747 +               if (WARN_ON_ONCE(!divisor))  /* loss_thresh is 8 bits */
146748 +                       return ~0U;
146749 +               do_div(lost_prefix, divisor);
146750 +       }
146752 +       inflight_hi = inflight_prev + lost_prefix;
146753 +       return inflight_hi;
146756 +/* If loss/ECN rates during probing indicated we may have overfilled a
146757 + * buffer, return an operating point that tries to leave unutilized headroom in
146758 + * the path for other flows, for fairness convergence and lower RTTs and loss.
146759 + */
146760 +static u32 bbr2_inflight_with_headroom(const struct sock *sk)
146762 +       struct bbr *bbr = inet_csk_ca(sk);
146763 +       u32 headroom, headroom_fraction;
146765 +       if (bbr->inflight_hi == ~0U)
146766 +               return ~0U;
146768 +       headroom_fraction = bbr->params.inflight_headroom;
146769 +       headroom = ((u64)bbr->inflight_hi * headroom_fraction) >> BBR_SCALE;
146770 +       headroom = max(headroom, 1U);
146771 +       return max_t(s32, bbr->inflight_hi - headroom,
146772 +                    bbr->params.cwnd_min_target);
146775 +/* Bound cwnd to a sensible level, based on our current probing state
146776 + * machine phase and model of a good inflight level (inflight_lo, inflight_hi).
146777 + */
146778 +static void bbr2_bound_cwnd_for_inflight_model(struct sock *sk)
146780 +       struct tcp_sock *tp = tcp_sk(sk);
146781 +       struct bbr *bbr = inet_csk_ca(sk);
146782 +       u32 cap;
146784 +       /* tcp_rcv_synsent_state_process() currently calls tcp_ack()
146785 +        * and thus cong_control() without first initializing us(!).
146786 +        */
146787 +       if (!bbr->initialized)
146788 +               return;
146790 +       cap = ~0U;
146791 +       if (bbr->mode == BBR_PROBE_BW &&
146792 +           bbr->cycle_idx != BBR_BW_PROBE_CRUISE) {
146793 +               /* Probe to see if more packets fit in the path. */
146794 +               cap = bbr->inflight_hi;
146795 +       } else {
146796 +               if (bbr->mode == BBR_PROBE_RTT ||
146797 +                   (bbr->mode == BBR_PROBE_BW &&
146798 +                    bbr->cycle_idx == BBR_BW_PROBE_CRUISE))
146799 +                       cap = bbr2_inflight_with_headroom(sk);
146800 +       }
146801 +       /* Adapt to any loss/ECN since our last bw probe. */
146802 +       cap = min(cap, bbr->inflight_lo);
146804 +       cap = max_t(u32, cap, bbr->params.cwnd_min_target);
146805 +       tp->snd_cwnd = min(cap, tp->snd_cwnd);
146808 +/* Estimate a short-term lower bound on the capacity available now, based
146809 + * on measurements of the current delivery process and recent history. When we
146810 + * are seeing loss/ECN at times when we are not probing bw, then conservatively
146811 + * move toward flow balance by multiplicatively cutting our short-term
146812 + * estimated safe rate and volume of data (bw_lo and inflight_lo). We use a
146813 + * multiplicative decrease in order to converge to a lower capacity in time
146814 + * logarithmic in the magnitude of the decrease.
146816 + * However, we do not cut our short-term estimates lower than the current rate
146817 + * and volume of delivered data from this round trip, since from the current
146818 + * delivery process we can estimate the measured capacity available now.
146820 + * Anything faster than that approach would knowingly risk high loss, which can
146821 + * cause low bw for Reno/CUBIC and high loss recovery latency for
146822 + * request/response flows using any congestion control.
146823 + */
146824 +static void bbr2_adapt_lower_bounds(struct sock *sk)
146826 +       struct tcp_sock *tp = tcp_sk(sk);
146827 +       struct bbr *bbr = inet_csk_ca(sk);
146828 +       u32 ecn_cut, ecn_inflight_lo, beta;
146830 +       /* We only use lower-bound estimates when not probing bw.
146831 +        * When probing we need to push inflight higher to probe bw.
146832 +        */
146833 +       if (bbr2_is_probing_bandwidth(sk))
146834 +               return;
146836 +       /* ECN response. */
146837 +       if (bbr->ecn_in_round && bbr->ecn_eligible && bbr->params.ecn_factor) {
146838 +               /* Reduce inflight to (1 - alpha*ecn_factor). */
146839 +               ecn_cut = (BBR_UNIT -
146840 +                          ((bbr->ecn_alpha * bbr->params.ecn_factor) >>
146841 +                           BBR_SCALE));
146842 +               if (bbr->inflight_lo == ~0U)
146843 +                       bbr->inflight_lo = tp->snd_cwnd;
146844 +               ecn_inflight_lo = (u64)bbr->inflight_lo * ecn_cut >> BBR_SCALE;
146845 +       } else {
146846 +               ecn_inflight_lo = ~0U;
146847 +       }
146849 +       /* Loss response. */
146850 +       if (bbr->loss_in_round) {
146851 +               /* Reduce bw and inflight to (1 - beta). */
146852 +               if (bbr->bw_lo == ~0U)
146853 +                       bbr->bw_lo = bbr_max_bw(sk);
146854 +               if (bbr->inflight_lo == ~0U)
146855 +                       bbr->inflight_lo = tp->snd_cwnd;
146856 +               beta = bbr->params.beta;
146857 +               bbr->bw_lo =
146858 +                       max_t(u32, bbr->bw_latest,
146859 +                             (u64)bbr->bw_lo *
146860 +                             (BBR_UNIT - beta) >> BBR_SCALE);
146861 +               bbr->inflight_lo =
146862 +                       max_t(u32, bbr->inflight_latest,
146863 +                             (u64)bbr->inflight_lo *
146864 +                             (BBR_UNIT - beta) >> BBR_SCALE);
146865 +       }
146867 +       /* Adjust to the lower of the levels implied by loss or ECN. */
146868 +       bbr->inflight_lo = min(bbr->inflight_lo, ecn_inflight_lo);
146871 +/* Reset any short-term lower-bound adaptation to congestion, so that we can
146872 + * push our inflight up.
146873 + */
146874 +static void bbr2_reset_lower_bounds(struct sock *sk)
146876 +       struct bbr *bbr = inet_csk_ca(sk);
146878 +       bbr->bw_lo = ~0U;
146879 +       bbr->inflight_lo = ~0U;
146882 +/* After bw probing (STARTUP/PROBE_UP), reset signals before entering a state
146883 + * machine phase where we adapt our lower bound based on congestion signals.
146884 + */
146885 +static void bbr2_reset_congestion_signals(struct sock *sk)
146887 +       struct bbr *bbr = inet_csk_ca(sk);
146889 +       bbr->loss_in_round = 0;
146890 +       bbr->ecn_in_round = 0;
146891 +       bbr->loss_in_cycle = 0;
146892 +       bbr->ecn_in_cycle = 0;
146893 +       bbr->bw_latest = 0;
146894 +       bbr->inflight_latest = 0;
146897 +/* Update (most of) our congestion signals: track the recent rate and volume of
146898 + * delivered data, presence of loss, and EWMA degree of ECN marking.
146899 + */
146900 +static void bbr2_update_congestion_signals(
146901 +       struct sock *sk, const struct rate_sample *rs, struct bbr_context *ctx)
146903 +       struct tcp_sock *tp = tcp_sk(sk);
146904 +       struct bbr *bbr = inet_csk_ca(sk);
146905 +       u64 bw;
146907 +       bbr->loss_round_start = 0;
146908 +       if (rs->interval_us <= 0 || !rs->acked_sacked)
146909 +               return; /* Not a valid observation */
146910 +       bw = ctx->sample_bw;
146912 +       if (!rs->is_app_limited || bw >= bbr_max_bw(sk))
146913 +               bbr2_take_bw_hi_sample(sk, bw);
146915 +       bbr->loss_in_round |= (rs->losses > 0);
146917 +       /* Update rate and volume of delivered data from latest round trip: */
146918 +       bbr->bw_latest       = max_t(u32, bbr->bw_latest,       ctx->sample_bw);
146919 +       bbr->inflight_latest = max_t(u32, bbr->inflight_latest, rs->delivered);
146921 +       if (before(rs->prior_delivered, bbr->loss_round_delivered))
146922 +               return;         /* skip the per-round-trip updates */
146923 +       /* Now do per-round-trip updates. */
146924 +       bbr->loss_round_delivered = tp->delivered;  /* mark round trip */
146925 +       bbr->loss_round_start = 1;
146926 +       bbr2_adapt_lower_bounds(sk);
146928 +       /* Update windowed "latest" (single-round-trip) filters. */
146929 +       bbr->loss_in_round = 0;
146930 +       bbr->ecn_in_round  = 0;
146931 +       bbr->bw_latest = ctx->sample_bw;
146932 +       bbr->inflight_latest = rs->delivered;
146935 +/* Bandwidth probing can cause loss. To help coexistence with loss-based
146936 + * congestion control we spread out our probing in a Reno-conscious way. Due to
146937 + * the shape of the Reno sawtooth, the time required between loss epochs for an
146938 + * idealized Reno flow is a number of round trips that is the BDP of that
146939 + * flow. We count packet-timed round trips directly, since measured RTT can
146940 + * vary widely, and Reno is driven by packet-timed round trips.
146941 + */
146942 +static bool bbr2_is_reno_coexistence_probe_time(struct sock *sk)
146944 +       struct bbr *bbr = inet_csk_ca(sk);
146945 +       u32 inflight, rounds, reno_gain, reno_rounds;
146947 +       /* Random loss can shave some small percentage off of our inflight
146948 +        * in each round. To survive this, flows need robust periodic probes.
146949 +        */
146950 +       rounds = bbr->params.bw_probe_max_rounds;
146952 +       reno_gain = bbr->params.bw_probe_reno_gain;
146953 +       if (reno_gain) {
146954 +               inflight = bbr2_target_inflight(sk);
146955 +               reno_rounds = ((u64)inflight * reno_gain) >> BBR_SCALE;
146956 +               rounds = min(rounds, reno_rounds);
146957 +       }
146958 +       return bbr->rounds_since_probe >= rounds;
146961 +/* How long do we want to wait before probing for bandwidth (and risking
146962 + * loss)? We randomize the wait, for better mixing and fairness convergence.
146964 + * We bound the Reno-coexistence inter-bw-probe time to be 62-63 round trips.
146965 + * This is calculated to allow fairness with a 25Mbps, 30ms Reno flow,
146966 + * (eg 4K video to a broadband user):
146967 + *   BDP = 25Mbps * .030sec /(1514bytes) = 61.9 packets
146969 + * We bound the BBR-native inter-bw-probe wall clock time to be:
146970 + *  (a) higher than 2 sec: to try to avoid causing loss for a long enough time
146971 + *      to allow Reno at 30ms to get 4K video bw, the inter-bw-probe time must
146972 + *      be at least: 25Mbps * .030sec / (1514bytes) * 0.030sec = 1.9secs
146973 + *  (b) lower than 3 sec: to ensure flows can start probing in a reasonable
146974 + *      amount of time to discover unutilized bw on human-scale interactive
146975 + *      time-scales (e.g. perhaps traffic from a web page download that we
146976 + *      were competing with is now complete).
146977 + */
146978 +static void bbr2_pick_probe_wait(struct sock *sk)
146980 +       struct bbr *bbr = inet_csk_ca(sk);
146982 +       /* Decide the random round-trip bound for wait until probe: */
146983 +       bbr->rounds_since_probe =
146984 +               prandom_u32_max(bbr->params.bw_probe_rand_rounds);
146985 +       /* Decide the random wall clock bound for wait until probe: */
146986 +       bbr->probe_wait_us = bbr->params.bw_probe_base_us +
146987 +                            prandom_u32_max(bbr->params.bw_probe_rand_us);
146990 +static void bbr2_set_cycle_idx(struct sock *sk, int cycle_idx)
146992 +       struct bbr *bbr = inet_csk_ca(sk);
146994 +       bbr->cycle_idx = cycle_idx;
146995 +       /* New phase, so need to update cwnd and pacing rate. */
146996 +       bbr->try_fast_path = 0;
146999 +/* Send at estimated bw to fill the pipe, but not queue. We need this phase
147000 + * before PROBE_UP, because as soon as we send faster than the available bw
147001 + * we will start building a queue, and if the buffer is shallow we can cause
147002 + * loss. If we do not fill the pipe before we cause this loss, our bw_hi and
147003 + * inflight_hi estimates will underestimate.
147004 + */
147005 +static void bbr2_start_bw_probe_refill(struct sock *sk, u32 bw_probe_up_rounds)
147007 +       struct tcp_sock *tp = tcp_sk(sk);
147008 +       struct bbr *bbr = inet_csk_ca(sk);
147010 +       bbr2_reset_lower_bounds(sk);
147011 +       if (bbr->inflight_hi != ~0U)
147012 +               bbr->inflight_hi += bbr->params.refill_add_inc;
147013 +       bbr->bw_probe_up_rounds = bw_probe_up_rounds;
147014 +       bbr->bw_probe_up_acks = 0;
147015 +       bbr->stopped_risky_probe = 0;
147016 +       bbr->ack_phase = BBR_ACKS_REFILLING;
147017 +       bbr->next_rtt_delivered = tp->delivered;
147018 +       bbr2_set_cycle_idx(sk, BBR_BW_PROBE_REFILL);
147021 +/* Now probe max deliverable data rate and volume. */
147022 +static void bbr2_start_bw_probe_up(struct sock *sk)
147024 +       struct tcp_sock *tp = tcp_sk(sk);
147025 +       struct bbr *bbr = inet_csk_ca(sk);
147027 +       bbr->ack_phase = BBR_ACKS_PROBE_STARTING;
147028 +       bbr->next_rtt_delivered = tp->delivered;
147029 +       bbr->cycle_mstamp = tp->tcp_mstamp;
147030 +       bbr2_set_cycle_idx(sk, BBR_BW_PROBE_UP);
147031 +       bbr2_raise_inflight_hi_slope(sk);
147034 +/* Start a new PROBE_BW probing cycle of some wall clock length. Pick a wall
147035 + * clock time at which to probe beyond an inflight that we think to be
147036 + * safe. This will knowingly risk packet loss, so we want to do this rarely, to
147037 + * keep packet loss rates low. Also start a round-trip counter, to probe faster
147038 + * if we estimate a Reno flow at our BDP would probe faster.
147039 + */
147040 +static void bbr2_start_bw_probe_down(struct sock *sk)
147042 +       struct tcp_sock *tp = tcp_sk(sk);
147043 +       struct bbr *bbr = inet_csk_ca(sk);
147045 +       bbr2_reset_congestion_signals(sk);
147046 +       bbr->bw_probe_up_cnt = ~0U;     /* not growing inflight_hi any more */
147047 +       bbr2_pick_probe_wait(sk);
147048 +       bbr->cycle_mstamp = tp->tcp_mstamp;             /* start wall clock */
147049 +       bbr->ack_phase = BBR_ACKS_PROBE_STOPPING;
147050 +       bbr->next_rtt_delivered = tp->delivered;
147051 +       bbr2_set_cycle_idx(sk, BBR_BW_PROBE_DOWN);
147054 +/* Cruise: maintain what we estimate to be a neutral, conservative
147055 + * operating point, without attempting to probe up for bandwidth or down for
147056 + * RTT, and only reducing inflight in response to loss/ECN signals.
147057 + */
147058 +static void bbr2_start_bw_probe_cruise(struct sock *sk)
147060 +       struct bbr *bbr = inet_csk_ca(sk);
147062 +       if (bbr->inflight_lo != ~0U)
147063 +               bbr->inflight_lo = min(bbr->inflight_lo, bbr->inflight_hi);
147065 +       bbr2_set_cycle_idx(sk, BBR_BW_PROBE_CRUISE);
147068 +/* Loss and/or ECN rate is too high while probing.
147069 + * Adapt (once per bw probe) by cutting inflight_hi and then restarting cycle.
147070 + */
147071 +static void bbr2_handle_inflight_too_high(struct sock *sk,
147072 +                                         const struct rate_sample *rs)
147074 +       struct bbr *bbr = inet_csk_ca(sk);
147075 +       const u32 beta = bbr->params.beta;
147077 +       bbr->prev_probe_too_high = 1;
147078 +       bbr->bw_probe_samples = 0;  /* only react once per probe */
147079 +       bbr->debug.event = 'L';     /* Loss/ECN too high */
147080 +       /* If we are app-limited then we are not robustly
147081 +        * probing the max volume of inflight data we think
147082 +        * might be safe (analogous to how app-limited bw
147083 +        * samples are not known to be robustly probing bw).
147084 +        */
147085 +       if (!rs->is_app_limited)
147086 +               bbr->inflight_hi = max_t(u32, rs->tx_in_flight,
147087 +                                        (u64)bbr2_target_inflight(sk) *
147088 +                                        (BBR_UNIT - beta) >> BBR_SCALE);
147089 +       if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == BBR_BW_PROBE_UP)
147090 +               bbr2_start_bw_probe_down(sk);
147093 +/* If we're seeing bw and loss samples reflecting our bw probing, adapt
147094 + * using the signals we see. If loss or ECN mark rate gets too high, then adapt
147095 + * inflight_hi downward. If we're able to push inflight higher without such
147096 + * signals, push higher: adapt inflight_hi upward.
147097 + */
147098 +static bool bbr2_adapt_upper_bounds(struct sock *sk,
147099 +                                  const struct rate_sample *rs)
147101 +       struct bbr *bbr = inet_csk_ca(sk);
147103 +       /* Track when we'll see bw/loss samples resulting from our bw probes. */
147104 +       if (bbr->ack_phase == BBR_ACKS_PROBE_STARTING && bbr->round_start)
147105 +               bbr->ack_phase = BBR_ACKS_PROBE_FEEDBACK;
147106 +       if (bbr->ack_phase == BBR_ACKS_PROBE_STOPPING && bbr->round_start) {
147107 +               /* End of samples from bw probing phase. */
147108 +               bbr->bw_probe_samples = 0;
147109 +               bbr->ack_phase = BBR_ACKS_INIT;
147110 +               /* At this point in the cycle, our current bw sample is also
147111 +                * our best recent chance at finding the highest available bw
147112 +                * for this flow. So now is the best time to forget the bw
147113 +                * samples from the previous cycle, by advancing the window.
147114 +                */
147115 +               if (bbr->mode == BBR_PROBE_BW && !rs->is_app_limited)
147116 +                       bbr2_advance_bw_hi_filter(sk);
147117 +               /* If we had an inflight_hi, then probed and pushed inflight all
147118 +                * the way up to hit that inflight_hi without seeing any
147119 +                * high loss/ECN in all the resulting ACKs from that probing,
147120 +                * then probe up again, this time letting inflight persist at
147121 +                * inflight_hi for a round trip, then accelerating beyond.
147122 +                */
147123 +               if (bbr->mode == BBR_PROBE_BW &&
147124 +                   bbr->stopped_risky_probe && !bbr->prev_probe_too_high) {
147125 +                       bbr->debug.event = 'R';  /* reprobe */
147126 +                       bbr2_start_bw_probe_refill(sk, 0);
147127 +                       return true;  /* yes, decided state transition */
147128 +               }
147129 +       }
147131 +       if (bbr2_is_inflight_too_high(sk, rs)) {
147132 +               if (bbr->bw_probe_samples)  /*  sample is from bw probing? */
147133 +                       bbr2_handle_inflight_too_high(sk, rs);
147134 +       } else {
147135 +               /* Loss/ECN rate is declared safe. Adjust upper bound upward. */
147136 +               if (bbr->inflight_hi == ~0U)  /* no excess queue signals yet? */
147137 +                       return false;
147139 +               /* To be resilient to random loss, we must raise inflight_hi
147140 +                * if we observe in any phase that a higher level is safe.
147141 +                */
147142 +               if (rs->tx_in_flight > bbr->inflight_hi) {
147143 +                       bbr->inflight_hi = rs->tx_in_flight;
147144 +                       bbr->debug.event = 'U';  /* raise up inflight_hi */
147145 +               }
147147 +               if (bbr->mode == BBR_PROBE_BW &&
147148 +                   bbr->cycle_idx == BBR_BW_PROBE_UP)
147149 +                       bbr2_probe_inflight_hi_upward(sk, rs);
147150 +       }
147152 +       return false;
147155 +/* Check if it's time to probe for bandwidth now, and if so, kick it off. */
147156 +static bool bbr2_check_time_to_probe_bw(struct sock *sk)
147158 +       struct bbr *bbr = inet_csk_ca(sk);
147159 +       u32 n;
147161 +       /* If we seem to be at an operating point where we are not seeing loss
147162 +        * but we are seeing ECN marks, then when the ECN marks cease we reprobe
147163 +        * quickly (in case a burst of cross-traffic has ceased and freed up bw,
147164 +        * or in case we are sharing with multiplicatively probing traffic).
147165 +        */
147166 +       if (bbr->params.ecn_reprobe_gain && bbr->ecn_eligible &&
147167 +           bbr->ecn_in_cycle && !bbr->loss_in_cycle &&
147168 +           inet_csk(sk)->icsk_ca_state == TCP_CA_Open) {
147169 +               bbr->debug.event = 'A';  /* *A*ll clear to probe *A*gain */
147170 +               /* Calculate n so that when bbr2_raise_inflight_hi_slope()
147171 +                * computes growth_this_round as 2^n it will be roughly the
147172 +                * desired volume of data (inflight_hi*ecn_reprobe_gain).
147173 +                */
147174 +               n = ilog2((((u64)bbr->inflight_hi *
147175 +                           bbr->params.ecn_reprobe_gain) >> BBR_SCALE));
147176 +               bbr2_start_bw_probe_refill(sk, n);
147177 +               return true;
147178 +       }
147180 +       if (bbr2_has_elapsed_in_phase(sk, bbr->probe_wait_us) ||
147181 +           bbr2_is_reno_coexistence_probe_time(sk)) {
147182 +               bbr2_start_bw_probe_refill(sk, 0);
147183 +               return true;
147184 +       }
147185 +       return false;
147188 +/* Is it time to transition from PROBE_DOWN to PROBE_CRUISE? */
147189 +static bool bbr2_check_time_to_cruise(struct sock *sk, u32 inflight, u32 bw)
147191 +       struct bbr *bbr = inet_csk_ca(sk);
147192 +       bool is_under_bdp, is_long_enough;
147194 +       /* Always need to pull inflight down to leave headroom in queue. */
147195 +       if (inflight > bbr2_inflight_with_headroom(sk))
147196 +               return false;
147198 +       is_under_bdp = inflight <= bbr_inflight(sk, bw, BBR_UNIT);
147199 +       if (bbr->params.drain_to_target)
147200 +               return is_under_bdp;
147202 +       is_long_enough = bbr2_has_elapsed_in_phase(sk, bbr->min_rtt_us);
147203 +       return is_under_bdp || is_long_enough;
147206 +/* PROBE_BW state machine: cruise, refill, probe for bw, or drain? */
147207 +static void bbr2_update_cycle_phase(struct sock *sk,
147208 +                                   const struct rate_sample *rs)
147210 +       struct bbr *bbr = inet_csk_ca(sk);
147211 +       bool is_risky = false, is_queuing = false;
147212 +       u32 inflight, bw;
147214 +       if (!bbr_full_bw_reached(sk))
147215 +               return;
147217 +       /* In DRAIN, PROBE_BW, or PROBE_RTT, adjust upper bounds. */
147218 +       if (bbr2_adapt_upper_bounds(sk, rs))
147219 +               return;         /* already decided state transition */
147221 +       if (bbr->mode != BBR_PROBE_BW)
147222 +               return;
147224 +       inflight = bbr_packets_in_net_at_edt(sk, rs->prior_in_flight);
147225 +       bw = bbr_max_bw(sk);
147227 +       switch (bbr->cycle_idx) {
147228 +       /* First we spend most of our time cruising with a pacing_gain of 1.0,
147229 +        * which paces at the estimated bw, to try to fully use the pipe
147230 +        * without building queue. If we encounter loss/ECN marks, we adapt
147231 +        * by slowing down.
147232 +        */
147233 +       case BBR_BW_PROBE_CRUISE:
147234 +               if (bbr2_check_time_to_probe_bw(sk))
147235 +                       return;         /* already decided state transition */
147236 +               break;
147238 +       /* After cruising, when it's time to probe, we first "refill": we send
147239 +        * at the estimated bw to fill the pipe, before probing higher and
147240 +        * knowingly risking overflowing the bottleneck buffer (causing loss).
147241 +        */
147242 +       case BBR_BW_PROBE_REFILL:
147243 +               if (bbr->round_start) {
147244 +                       /* After one full round trip of sending in REFILL, we
147245 +                        * start to see bw samples reflecting our REFILL, which
147246 +                        * may be putting too much data in flight.
147247 +                        */
147248 +                       bbr->bw_probe_samples = 1;
147249 +                       bbr2_start_bw_probe_up(sk);
147250 +               }
147251 +               break;
147253 +       /* After we refill the pipe, we probe by using a pacing_gain > 1.0, to
147254 +        * probe for bw. If we have not seen loss/ECN, we try to raise inflight
147255 +        * to at least pacing_gain*BDP; note that this may take more than
147256 +        * min_rtt if min_rtt is small (e.g. on a LAN).
147257 +        *
147258 +        * We terminate PROBE_UP bandwidth probing upon any of the following:
147259 +        *
147260 +        * (1) We've pushed inflight up to hit the inflight_hi target set in the
147261 +        *     most recent previous bw probe phase. Thus we want to start
147262 +        *     draining the queue immediately because it's very likely the most
147263 +        *     recently sent packets will fill the queue and cause drops.
147264 +        *     (checked here)
147265 +        * (2) We have probed for at least 1*min_rtt_us, and the
147266 +        *     estimated queue is high enough (inflight > 1.25 * estimated_bdp).
147267 +        *     (checked here)
147268 +        * (3) Loss filter says loss rate is "too high".
147269 +        *     (checked in bbr_is_inflight_too_high())
147270 +        * (4) ECN filter says ECN mark rate is "too high".
147271 +        *     (checked in bbr_is_inflight_too_high())
147272 +        */
147273 +       case BBR_BW_PROBE_UP:
147274 +               if (bbr->prev_probe_too_high &&
147275 +                   inflight >= bbr->inflight_hi) {
147276 +                       bbr->stopped_risky_probe = 1;
147277 +                       is_risky = true;
147278 +                       bbr->debug.event = 'D';   /* D for danger */
147279 +               } else if (bbr2_has_elapsed_in_phase(sk, bbr->min_rtt_us) &&
147280 +                          inflight >=
147281 +                          bbr_inflight(sk, bw,
147282 +                                       bbr->params.bw_probe_pif_gain)) {
147283 +                       is_queuing = true;
147284 +                       bbr->debug.event = 'Q'; /* building Queue */
147285 +               }
147286 +               if (is_risky || is_queuing) {
147287 +                       bbr->prev_probe_too_high = 0;  /* no loss/ECN (yet) */
147288 +                       bbr2_start_bw_probe_down(sk);  /* restart w/ down */
147289 +               }
147290 +               break;
147292 +       /* After probing in PROBE_UP, we have usually accumulated some data in
147293 +        * the bottleneck buffer (if bw probing didn't find more bw). We next
147294 +        * enter PROBE_DOWN to try to drain any excess data from the queue. To
147295 +        * do this, we use a pacing_gain < 1.0. We hold this pacing gain until
147296 +        * our inflight is less then that target cruising point, which is the
147297 +        * minimum of (a) the amount needed to leave headroom, and (b) the
147298 +        * estimated BDP. Once inflight falls to match the target, we estimate
147299 +        * the queue is drained; persisting would underutilize the pipe.
147300 +        */
147301 +       case BBR_BW_PROBE_DOWN:
147302 +               if (bbr2_check_time_to_probe_bw(sk))
147303 +                       return;         /* already decided state transition */
147304 +               if (bbr2_check_time_to_cruise(sk, inflight, bw))
147305 +                       bbr2_start_bw_probe_cruise(sk);
147306 +               break;
147308 +       default:
147309 +               WARN_ONCE(1, "BBR invalid cycle index %u\n", bbr->cycle_idx);
147310 +       }
147313 +/* Exiting PROBE_RTT, so return to bandwidth probing in STARTUP or PROBE_BW. */
147314 +static void bbr2_exit_probe_rtt(struct sock *sk)
147316 +       struct bbr *bbr = inet_csk_ca(sk);
147318 +       bbr2_reset_lower_bounds(sk);
147319 +       if (bbr_full_bw_reached(sk)) {
147320 +               bbr->mode = BBR_PROBE_BW;
147321 +               /* Raising inflight after PROBE_RTT may cause loss, so reset
147322 +                * the PROBE_BW clock and schedule the next bandwidth probe for
147323 +                * a friendly and randomized future point in time.
147324 +                */
147325 +               bbr2_start_bw_probe_down(sk);
147326 +               /* Since we are exiting PROBE_RTT, we know inflight is
147327 +                * below our estimated BDP, so it is reasonable to cruise.
147328 +                */
147329 +               bbr2_start_bw_probe_cruise(sk);
147330 +       } else {
147331 +               bbr->mode = BBR_STARTUP;
147332 +       }
147335 +/* Exit STARTUP based on loss rate > 1% and loss gaps in round >= N. Wait until
147336 + * the end of the round in recovery to get a good estimate of how many packets
147337 + * have been lost, and how many we need to drain with a low pacing rate.
147338 + */
147339 +static void bbr2_check_loss_too_high_in_startup(struct sock *sk,
147340 +                                              const struct rate_sample *rs)
147342 +       struct bbr *bbr = inet_csk_ca(sk);
147344 +       if (bbr_full_bw_reached(sk))
147345 +               return;
147347 +       /* For STARTUP exit, check the loss rate at the end of each round trip
147348 +        * of Recovery episodes in STARTUP. We check the loss rate at the end
147349 +        * of the round trip to filter out noisy/low loss and have a better
147350 +        * sense of inflight (extent of loss), so we can drain more accurately.
147351 +        */
147352 +       if (rs->losses && bbr->loss_events_in_round < 0xf)
147353 +               bbr->loss_events_in_round++;  /* update saturating counter */
147354 +       if (bbr->params.full_loss_cnt && bbr->loss_round_start &&
147355 +           inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery &&
147356 +           bbr->loss_events_in_round >= bbr->params.full_loss_cnt &&
147357 +           bbr2_is_inflight_too_high(sk, rs)) {
147358 +               bbr->debug.event = 'P';  /* Packet loss caused STARTUP exit */
147359 +               bbr2_handle_queue_too_high_in_startup(sk);
147360 +               return;
147361 +       }
147362 +       if (bbr->loss_round_start)
147363 +               bbr->loss_events_in_round = 0;
147366 +/* If we are done draining, advance into steady state operation in PROBE_BW. */
147367 +static void bbr2_check_drain(struct sock *sk, const struct rate_sample *rs,
147368 +                            struct bbr_context *ctx)
147370 +       struct bbr *bbr = inet_csk_ca(sk);
147372 +       if (bbr_check_drain(sk, rs, ctx)) {
147373 +               bbr->mode = BBR_PROBE_BW;
147374 +               bbr2_start_bw_probe_down(sk);
147375 +       }
147378 +static void bbr2_update_model(struct sock *sk, const struct rate_sample *rs,
147379 +                             struct bbr_context *ctx)
147381 +       bbr2_update_congestion_signals(sk, rs, ctx);
147382 +       bbr_update_ack_aggregation(sk, rs);
147383 +       bbr2_check_loss_too_high_in_startup(sk, rs);
147384 +       bbr_check_full_bw_reached(sk, rs);
147385 +       bbr2_check_drain(sk, rs, ctx);
147386 +       bbr2_update_cycle_phase(sk, rs);
147387 +       bbr_update_min_rtt(sk, rs);
147390 +/* Fast path for app-limited case.
147392 + * On each ack, we execute bbr state machine, which primarily consists of:
147393 + * 1) update model based on new rate sample, and
147394 + * 2) update control based on updated model or state change.
147396 + * There are certain workload/scenarios, e.g. app-limited case, where
147397 + * either we can skip updating model or we can skip update of both model
147398 + * as well as control. This provides signifcant softirq cpu savings for
147399 + * processing incoming acks.
147401 + * In case of app-limited, if there is no congestion (loss/ecn) and
147402 + * if observed bw sample is less than current estimated bw, then we can
147403 + * skip some of the computation in bbr state processing:
147405 + * - if there is no rtt/mode/phase change: In this case, since all the
147406 + *   parameters of the network model are constant, we can skip model
147407 + *   as well control update.
147409 + * - else we can skip rest of the model update. But we still need to
147410 + *   update the control to account for the new rtt/mode/phase.
147412 + * Returns whether we can take fast path or not.
147413 + */
147414 +static bool bbr2_fast_path(struct sock *sk, bool *update_model,
147415 +               const struct rate_sample *rs, struct bbr_context *ctx)
147417 +       struct bbr *bbr = inet_csk_ca(sk);
147418 +       u32 prev_min_rtt_us, prev_mode;
147420 +       if (bbr->params.fast_path && bbr->try_fast_path &&
147421 +           rs->is_app_limited && ctx->sample_bw < bbr_max_bw(sk) &&
147422 +           !bbr->loss_in_round && !bbr->ecn_in_round) {
147423 +               prev_mode = bbr->mode;
147424 +               prev_min_rtt_us = bbr->min_rtt_us;
147425 +               bbr2_check_drain(sk, rs, ctx);
147426 +               bbr2_update_cycle_phase(sk, rs);
147427 +               bbr_update_min_rtt(sk, rs);
147429 +               if (bbr->mode == prev_mode &&
147430 +                   bbr->min_rtt_us == prev_min_rtt_us &&
147431 +                   bbr->try_fast_path)
147432 +                       return true;
147434 +               /* Skip model update, but control still needs to be updated */
147435 +               *update_model = false;
147436 +       }
147437 +       return false;
147440 +static void bbr2_main(struct sock *sk, const struct rate_sample *rs)
147442 +       struct tcp_sock *tp = tcp_sk(sk);
147443 +       struct bbr *bbr = inet_csk_ca(sk);
147444 +       struct bbr_context ctx = { 0 };
147445 +       bool update_model = true;
147446 +       u32 bw;
147448 +       bbr->debug.event = '.';  /* init to default NOP (no event yet) */
147450 +       bbr_update_round_start(sk, rs, &ctx);
147451 +       if (bbr->round_start) {
147452 +               bbr->rounds_since_probe =
147453 +                       min_t(s32, bbr->rounds_since_probe + 1, 0xFF);
147454 +               bbr2_update_ecn_alpha(sk);
147455 +       }
147457 +       bbr->ecn_in_round  |= rs->is_ece;
147458 +       bbr_calculate_bw_sample(sk, rs, &ctx);
147460 +       if (bbr2_fast_path(sk, &update_model, rs, &ctx))
147461 +               goto out;
147463 +       if (update_model)
147464 +               bbr2_update_model(sk, rs, &ctx);
147466 +       bbr_update_gains(sk);
147467 +       bw = bbr_bw(sk);
147468 +       bbr_set_pacing_rate(sk, bw, bbr->pacing_gain);
147469 +       bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain,
147470 +                    tp->snd_cwnd, &ctx);
147471 +       bbr2_bound_cwnd_for_inflight_model(sk);
147473 +out:
147474 +       bbr->prev_ca_state = inet_csk(sk)->icsk_ca_state;
147475 +       bbr->loss_in_cycle |= rs->lost > 0;
147476 +       bbr->ecn_in_cycle  |= rs->delivered_ce > 0;
147478 +       bbr_debug(sk, rs->acked_sacked, rs, &ctx);
147481 +/* Module parameters that are settable by TCP_CONGESTION_PARAMS are declared
147482 + * down here, so that the algorithm functions that use the parameters must use
147483 + * the per-socket parameters; if they accidentally use the global version
147484 + * then there will be a compile error.
147485 + * TODO(ncardwell): move all per-socket parameters down to this section.
147486 + */
147488 +/* On losses, scale down inflight and pacing rate by beta scaled by BBR_SCALE.
147489 + * No loss response when 0. Max allwed value is 255.
147490 + */
147491 +static u32 bbr_beta = BBR_UNIT * 30 / 100;
147493 +/* Gain factor for ECN mark ratio samples, scaled by BBR_SCALE.
147494 + * Max allowed value is 255.
147495 + */
147496 +static u32 bbr_ecn_alpha_gain = BBR_UNIT * 1 / 16;  /* 1/16 = 6.25% */
147498 +/* The initial value for the ecn_alpha state variable. Default and max
147499 + * BBR_UNIT (256), representing 1.0. This allows a flow to respond quickly
147500 + * to congestion if the bottleneck is congested when the flow starts up.
147501 + */
147502 +static u32 bbr_ecn_alpha_init = BBR_UNIT;      /* 1.0, to respond quickly */
147504 +/* On ECN, cut inflight_lo to (1 - ecn_factor * ecn_alpha) scaled by BBR_SCALE.
147505 + * No ECN based bounding when 0. Max allwed value is 255.
147506 + */
147507 +static u32 bbr_ecn_factor = BBR_UNIT * 1 / 3;      /* 1/3 = 33% */
147509 +/* Estimate bw probing has gone too far if CE ratio exceeds this threshold.
147510 + * Scaled by BBR_SCALE. Disabled when 0. Max allowed is 255.
147511 + */
147512 +static u32 bbr_ecn_thresh = BBR_UNIT * 1 / 2;  /* 1/2 = 50% */
147514 +/* Max RTT (in usec) at which to use sender-side ECN logic.
147515 + * Disabled when 0 (ECN allowed at any RTT).
147516 + * Max allowed for the parameter is 524287 (0x7ffff) us, ~524 ms.
147517 + */
147518 +static u32 bbr_ecn_max_rtt_us = 5000;
147520 +/* If non-zero, if in a cycle with no losses but some ECN marks, after ECN
147521 + * clears then use a multiplicative increase to quickly reprobe bw by
147522 + * starting inflight probing at the given multiple of inflight_hi.
147523 + * Default for this experimental knob is 0 (disabled).
147524 + * Planned value for experiments: BBR_UNIT * 1 / 2 = 128, representing 0.5.
147525 + */
147526 +static u32 bbr_ecn_reprobe_gain;
147528 +/* Estimate bw probing has gone too far if loss rate exceeds this level. */
147529 +static u32 bbr_loss_thresh = BBR_UNIT * 2 / 100;  /* 2% loss */
147531 +/* Exit STARTUP if number of loss marking events in a Recovery round is >= N,
147532 + * and loss rate is higher than bbr_loss_thresh.
147533 + * Disabled if 0. Max allowed value is 15 (0xF).
147534 + */
147535 +static u32 bbr_full_loss_cnt = 8;
147537 +/* Exit STARTUP if number of round trips with ECN mark rate above ecn_thresh
147538 + * meets this count. Max allowed value is 3.
147539 + */
147540 +static u32 bbr_full_ecn_cnt = 2;
147542 +/* Fraction of unutilized headroom to try to leave in path upon high loss. */
147543 +static u32 bbr_inflight_headroom = BBR_UNIT * 15 / 100;
147545 +/* Multiplier to get target inflight (as multiple of BDP) for PROBE_UP phase.
147546 + * Default is 1.25x, as in BBR v1. Max allowed is 511.
147547 + */
147548 +static u32 bbr_bw_probe_pif_gain = BBR_UNIT * 5 / 4;
147550 +/* Multiplier to get Reno-style probe epoch duration as: k * BDP round trips.
147551 + * If zero, disables this BBR v2 Reno-style BDP-scaled coexistence mechanism.
147552 + * Max allowed is 511.
147553 + */
147554 +static u32 bbr_bw_probe_reno_gain = BBR_UNIT;
147556 +/* Max number of packet-timed rounds to wait before probing for bandwidth.  If
147557 + * we want to tolerate 1% random loss per round, and not have this cut our
147558 + * inflight too much, we must probe for bw periodically on roughly this scale.
147559 + * If low, limits Reno/CUBIC coexistence; if high, limits loss tolerance.
147560 + * We aim to be fair with Reno/CUBIC up to a BDP of at least:
147561 + *  BDP = 25Mbps * .030sec /(1514bytes) = 61.9 packets
147562 + */
147563 +static u32 bbr_bw_probe_max_rounds = 63;
147565 +/* Max amount of randomness to inject in round counting for Reno-coexistence.
147566 + * Max value is 15.
147567 + */
147568 +static u32 bbr_bw_probe_rand_rounds = 2;
147570 +/* Use BBR-native probe time scale starting at this many usec.
147571 + * We aim to be fair with Reno/CUBIC up to an inter-loss time epoch of at least:
147572 + *  BDP*RTT = 25Mbps * .030sec /(1514bytes) * 0.030sec = 1.9 secs
147573 + */
147574 +static u32 bbr_bw_probe_base_us = 2 * USEC_PER_SEC;  /* 2 secs */
147576 +/* Use BBR-native probes spread over this many usec: */
147577 +static u32 bbr_bw_probe_rand_us = 1 * USEC_PER_SEC;  /* 1 secs */
147579 +/* Undo the model changes made in loss recovery if recovery was spurious? */
147580 +static bool bbr_undo = true;
147582 +/* Use fast path if app-limited, no loss/ECN, and target cwnd was reached? */
147583 +static bool bbr_fast_path = true;      /* default: enabled */
147585 +/* Use fast ack mode ? */
147586 +static int bbr_fast_ack_mode = 1;      /* default: rwnd check off */
147588 +/* How much to additively increase inflight_hi when entering REFILL? */
147589 +static u32 bbr_refill_add_inc;         /* default: disabled */
147591 +module_param_named(beta,                 bbr_beta,                 uint, 0644);
147592 +module_param_named(ecn_alpha_gain,       bbr_ecn_alpha_gain,       uint, 0644);
147593 +module_param_named(ecn_alpha_init,       bbr_ecn_alpha_init,       uint, 0644);
147594 +module_param_named(ecn_factor,           bbr_ecn_factor,           uint, 0644);
147595 +module_param_named(ecn_thresh,           bbr_ecn_thresh,           uint, 0644);
147596 +module_param_named(ecn_max_rtt_us,       bbr_ecn_max_rtt_us,       uint, 0644);
147597 +module_param_named(ecn_reprobe_gain,     bbr_ecn_reprobe_gain,     uint, 0644);
147598 +module_param_named(loss_thresh,          bbr_loss_thresh,          uint, 0664);
147599 +module_param_named(full_loss_cnt,        bbr_full_loss_cnt,        uint, 0664);
147600 +module_param_named(full_ecn_cnt,         bbr_full_ecn_cnt,         uint, 0664);
147601 +module_param_named(inflight_headroom,    bbr_inflight_headroom,    uint, 0664);
147602 +module_param_named(bw_probe_pif_gain,    bbr_bw_probe_pif_gain,    uint, 0664);
147603 +module_param_named(bw_probe_reno_gain,   bbr_bw_probe_reno_gain,   uint, 0664);
147604 +module_param_named(bw_probe_max_rounds,  bbr_bw_probe_max_rounds,  uint, 0664);
147605 +module_param_named(bw_probe_rand_rounds, bbr_bw_probe_rand_rounds, uint, 0664);
147606 +module_param_named(bw_probe_base_us,     bbr_bw_probe_base_us,     uint, 0664);
147607 +module_param_named(bw_probe_rand_us,     bbr_bw_probe_rand_us,     uint, 0664);
147608 +module_param_named(undo,                 bbr_undo,                 bool, 0664);
147609 +module_param_named(fast_path,           bbr_fast_path,            bool, 0664);
147610 +module_param_named(fast_ack_mode,       bbr_fast_ack_mode,        uint, 0664);
147611 +module_param_named(refill_add_inc,       bbr_refill_add_inc,       uint, 0664);
147613 +static void bbr2_init(struct sock *sk)
147615 +       struct tcp_sock *tp = tcp_sk(sk);
147616 +       struct bbr *bbr = inet_csk_ca(sk);
147618 +       bbr_init(sk);   /* run shared init code for v1 and v2 */
147620 +       /* BBR v2 parameters: */
147621 +       bbr->params.beta = min_t(u32, 0xFFU, bbr_beta);
147622 +       bbr->params.ecn_alpha_gain = min_t(u32, 0xFFU, bbr_ecn_alpha_gain);
147623 +       bbr->params.ecn_alpha_init = min_t(u32, BBR_UNIT, bbr_ecn_alpha_init);
147624 +       bbr->params.ecn_factor = min_t(u32, 0xFFU, bbr_ecn_factor);
147625 +       bbr->params.ecn_thresh = min_t(u32, 0xFFU, bbr_ecn_thresh);
147626 +       bbr->params.ecn_max_rtt_us = min_t(u32, 0x7ffffU, bbr_ecn_max_rtt_us);
147627 +       bbr->params.ecn_reprobe_gain = min_t(u32, 0x1FF, bbr_ecn_reprobe_gain);
147628 +       bbr->params.loss_thresh = min_t(u32, 0xFFU, bbr_loss_thresh);
147629 +       bbr->params.full_loss_cnt = min_t(u32, 0xFU, bbr_full_loss_cnt);
147630 +       bbr->params.full_ecn_cnt = min_t(u32, 0x3U, bbr_full_ecn_cnt);
147631 +       bbr->params.inflight_headroom =
147632 +               min_t(u32, 0xFFU, bbr_inflight_headroom);
147633 +       bbr->params.bw_probe_pif_gain =
147634 +               min_t(u32, 0x1FFU, bbr_bw_probe_pif_gain);
147635 +       bbr->params.bw_probe_reno_gain =
147636 +               min_t(u32, 0x1FFU, bbr_bw_probe_reno_gain);
147637 +       bbr->params.bw_probe_max_rounds =
147638 +               min_t(u32, 0xFFU, bbr_bw_probe_max_rounds);
147639 +       bbr->params.bw_probe_rand_rounds =
147640 +               min_t(u32, 0xFU, bbr_bw_probe_rand_rounds);
147641 +       bbr->params.bw_probe_base_us =
147642 +               min_t(u32, (1 << 26) - 1, bbr_bw_probe_base_us);
147643 +       bbr->params.bw_probe_rand_us =
147644 +               min_t(u32, (1 << 26) - 1, bbr_bw_probe_rand_us);
147645 +       bbr->params.undo = bbr_undo;
147646 +       bbr->params.fast_path = bbr_fast_path ? 1 : 0;
147647 +       bbr->params.refill_add_inc = min_t(u32, 0x3U, bbr_refill_add_inc);
147649 +       /* BBR v2 state: */
147650 +       bbr->initialized = 1;
147651 +       /* Start sampling ECN mark rate after first full flight is ACKed: */
147652 +       bbr->loss_round_delivered = tp->delivered + 1;
147653 +       bbr->loss_round_start = 0;
147654 +       bbr->undo_bw_lo = 0;
147655 +       bbr->undo_inflight_lo = 0;
147656 +       bbr->undo_inflight_hi = 0;
147657 +       bbr->loss_events_in_round = 0;
147658 +       bbr->startup_ecn_rounds = 0;
147659 +       bbr2_reset_congestion_signals(sk);
147660 +       bbr->bw_lo = ~0U;
147661 +       bbr->bw_hi[0] = 0;
147662 +       bbr->bw_hi[1] = 0;
147663 +       bbr->inflight_lo = ~0U;
147664 +       bbr->inflight_hi = ~0U;
147665 +       bbr->bw_probe_up_cnt = ~0U;
147666 +       bbr->bw_probe_up_acks = 0;
147667 +       bbr->bw_probe_up_rounds = 0;
147668 +       bbr->probe_wait_us = 0;
147669 +       bbr->stopped_risky_probe = 0;
147670 +       bbr->ack_phase = BBR_ACKS_INIT;
147671 +       bbr->rounds_since_probe = 0;
147672 +       bbr->bw_probe_samples = 0;
147673 +       bbr->prev_probe_too_high = 0;
147674 +       bbr->ecn_eligible = 0;
147675 +       bbr->ecn_alpha = bbr->params.ecn_alpha_init;
147676 +       bbr->alpha_last_delivered = 0;
147677 +       bbr->alpha_last_delivered_ce = 0;
147679 +       tp->fast_ack_mode = min_t(u32, 0x2U, bbr_fast_ack_mode);
147682 +/* Core TCP stack informs us that the given skb was just marked lost. */
147683 +static void bbr2_skb_marked_lost(struct sock *sk, const struct sk_buff *skb)
147685 +       struct tcp_sock *tp = tcp_sk(sk);
147686 +       struct bbr *bbr = inet_csk_ca(sk);
147687 +       struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
147688 +       struct rate_sample rs;
147690 +       /* Capture "current" data over the full round trip of loss,
147691 +        * to have a better chance to see the full capacity of the path.
147692 +       */
147693 +       if (!bbr->loss_in_round)  /* first loss in this round trip? */
147694 +               bbr->loss_round_delivered = tp->delivered;  /* set round trip */
147695 +       bbr->loss_in_round = 1;
147696 +       bbr->loss_in_cycle = 1;
147698 +       if (!bbr->bw_probe_samples)
147699 +               return;  /* not an skb sent while probing for bandwidth */
147700 +       if (unlikely(!scb->tx.delivered_mstamp))
147701 +               return;  /* skb was SACKed, reneged, marked lost; ignore it */
147702 +       /* We are probing for bandwidth. Construct a rate sample that
147703 +        * estimates what happened in the flight leading up to this lost skb,
147704 +        * then see if the loss rate went too high, and if so at which packet.
147705 +        */
147706 +       memset(&rs, 0, sizeof(rs));
147707 +       rs.tx_in_flight = scb->tx.in_flight;
147708 +       rs.lost = tp->lost - scb->tx.lost;
147709 +       rs.is_app_limited = scb->tx.is_app_limited;
147710 +       if (bbr2_is_inflight_too_high(sk, &rs)) {
147711 +               rs.tx_in_flight = bbr2_inflight_hi_from_lost_skb(sk, &rs, skb);
147712 +               bbr2_handle_inflight_too_high(sk, &rs);
147713 +       }
147716 +/* Revert short-term model if current loss recovery event was spurious. */
147717 +static u32 bbr2_undo_cwnd(struct sock *sk)
147719 +       struct tcp_sock *tp = tcp_sk(sk);
147720 +       struct bbr *bbr = inet_csk_ca(sk);
147722 +       bbr->debug.undo = 1;
147723 +       bbr->full_bw = 0;   /* spurious slow-down; reset full pipe detection */
147724 +       bbr->full_bw_cnt = 0;
147725 +       bbr->loss_in_round = 0;
147727 +       if (!bbr->params.undo)
147728 +               return tp->snd_cwnd;
147730 +       /* Revert to cwnd and other state saved before loss episode. */
147731 +       bbr->bw_lo = max(bbr->bw_lo, bbr->undo_bw_lo);
147732 +       bbr->inflight_lo = max(bbr->inflight_lo, bbr->undo_inflight_lo);
147733 +       bbr->inflight_hi = max(bbr->inflight_hi, bbr->undo_inflight_hi);
147734 +       return bbr->prior_cwnd;
147737 +/* Entering loss recovery, so save state for when we undo recovery. */
147738 +static u32 bbr2_ssthresh(struct sock *sk)
147740 +       struct bbr *bbr = inet_csk_ca(sk);
147742 +       bbr_save_cwnd(sk);
147743 +       /* For undo, save state that adapts based on loss signal. */
147744 +       bbr->undo_bw_lo         = bbr->bw_lo;
147745 +       bbr->undo_inflight_lo   = bbr->inflight_lo;
147746 +       bbr->undo_inflight_hi   = bbr->inflight_hi;
147747 +       return tcp_sk(sk)->snd_ssthresh;
147750 +static enum tcp_bbr2_phase bbr2_get_phase(struct bbr *bbr)
147752 +       switch (bbr->mode) {
147753 +       case BBR_STARTUP:
147754 +               return BBR2_PHASE_STARTUP;
147755 +       case BBR_DRAIN:
147756 +               return BBR2_PHASE_DRAIN;
147757 +       case BBR_PROBE_BW:
147758 +               break;
147759 +       case BBR_PROBE_RTT:
147760 +               return BBR2_PHASE_PROBE_RTT;
147761 +       default:
147762 +               return BBR2_PHASE_INVALID;
147763 +       }
147764 +       switch (bbr->cycle_idx) {
147765 +       case BBR_BW_PROBE_UP:
147766 +               return BBR2_PHASE_PROBE_BW_UP;
147767 +       case BBR_BW_PROBE_DOWN:
147768 +               return BBR2_PHASE_PROBE_BW_DOWN;
147769 +       case BBR_BW_PROBE_CRUISE:
147770 +               return BBR2_PHASE_PROBE_BW_CRUISE;
147771 +       case BBR_BW_PROBE_REFILL:
147772 +               return BBR2_PHASE_PROBE_BW_REFILL;
147773 +       default:
147774 +               return BBR2_PHASE_INVALID;
147775 +       }
147778 +static size_t bbr2_get_info(struct sock *sk, u32 ext, int *attr,
147779 +                           union tcp_cc_info *info)
147781 +       if (ext & (1 << (INET_DIAG_BBRINFO - 1)) ||
147782 +           ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
147783 +               struct bbr *bbr = inet_csk_ca(sk);
147784 +               u64 bw = bbr_bw_bytes_per_sec(sk, bbr_bw(sk));
147785 +               u64 bw_hi = bbr_bw_bytes_per_sec(sk, bbr_max_bw(sk));
147786 +               u64 bw_lo = bbr->bw_lo == ~0U ?
147787 +                       ~0ULL : bbr_bw_bytes_per_sec(sk, bbr->bw_lo);
147789 +               memset(&info->bbr2, 0, sizeof(info->bbr2));
147790 +               info->bbr2.bbr_bw_lsb           = (u32)bw;
147791 +               info->bbr2.bbr_bw_msb           = (u32)(bw >> 32);
147792 +               info->bbr2.bbr_min_rtt          = bbr->min_rtt_us;
147793 +               info->bbr2.bbr_pacing_gain      = bbr->pacing_gain;
147794 +               info->bbr2.bbr_cwnd_gain        = bbr->cwnd_gain;
147795 +               info->bbr2.bbr_bw_hi_lsb        = (u32)bw_hi;
147796 +               info->bbr2.bbr_bw_hi_msb        = (u32)(bw_hi >> 32);
147797 +               info->bbr2.bbr_bw_lo_lsb        = (u32)bw_lo;
147798 +               info->bbr2.bbr_bw_lo_msb        = (u32)(bw_lo >> 32);
147799 +               info->bbr2.bbr_mode             = bbr->mode;
147800 +               info->bbr2.bbr_phase            = (__u8)bbr2_get_phase(bbr);
147801 +               info->bbr2.bbr_version          = (__u8)2;
147802 +               info->bbr2.bbr_inflight_lo      = bbr->inflight_lo;
147803 +               info->bbr2.bbr_inflight_hi      = bbr->inflight_hi;
147804 +               info->bbr2.bbr_extra_acked      = bbr_extra_acked(sk);
147805 +               *attr = INET_DIAG_BBRINFO;
147806 +               return sizeof(info->bbr2);
147807 +       }
147808 +       return 0;
147811 +static void bbr2_set_state(struct sock *sk, u8 new_state)
147813 +       struct tcp_sock *tp = tcp_sk(sk);
147814 +       struct bbr *bbr = inet_csk_ca(sk);
147816 +       if (new_state == TCP_CA_Loss) {
147817 +               struct rate_sample rs = { .losses = 1 };
147818 +               struct bbr_context ctx = { 0 };
147820 +               bbr->prev_ca_state = TCP_CA_Loss;
147821 +               bbr->full_bw = 0;
147822 +               if (!bbr2_is_probing_bandwidth(sk) && bbr->inflight_lo == ~0U) {
147823 +                       /* bbr_adapt_lower_bounds() needs cwnd before
147824 +                        * we suffered an RTO, to update inflight_lo:
147825 +                        */
147826 +                       bbr->inflight_lo =
147827 +                               max(tp->snd_cwnd, bbr->prior_cwnd);
147828 +               }
147829 +               bbr_debug(sk, 0, &rs, &ctx);
147830 +       } else if (bbr->prev_ca_state == TCP_CA_Loss &&
147831 +                  new_state != TCP_CA_Loss) {
147832 +               tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
147833 +               bbr->try_fast_path = 0; /* bound cwnd using latest model */
147834 +       }
147837 +static struct tcp_congestion_ops tcp_bbr2_cong_ops __read_mostly = {
147838 +       .flags          = TCP_CONG_NON_RESTRICTED | TCP_CONG_WANTS_CE_EVENTS,
147839 +       .name           = "bbr2",
147840 +       .owner          = THIS_MODULE,
147841 +       .init           = bbr2_init,
147842 +       .cong_control   = bbr2_main,
147843 +       .sndbuf_expand  = bbr_sndbuf_expand,
147844 +       .skb_marked_lost = bbr2_skb_marked_lost,
147845 +       .undo_cwnd      = bbr2_undo_cwnd,
147846 +       .cwnd_event     = bbr_cwnd_event,
147847 +       .ssthresh       = bbr2_ssthresh,
147848 +       .tso_segs       = bbr_tso_segs,
147849 +       .get_info       = bbr2_get_info,
147850 +       .set_state      = bbr2_set_state,
147853 +static int __init bbr_register(void)
147855 +       BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE);
147856 +       return tcp_register_congestion_control(&tcp_bbr2_cong_ops);
147859 +static void __exit bbr_unregister(void)
147861 +       tcp_unregister_congestion_control(&tcp_bbr2_cong_ops);
147864 +module_init(bbr_register);
147865 +module_exit(bbr_unregister);
147867 +MODULE_AUTHOR("Van Jacobson <vanj@google.com>");
147868 +MODULE_AUTHOR("Neal Cardwell <ncardwell@google.com>");
147869 +MODULE_AUTHOR("Yuchung Cheng <ycheng@google.com>");
147870 +MODULE_AUTHOR("Soheil Hassas Yeganeh <soheil@google.com>");
147871 +MODULE_AUTHOR("Priyaranjan Jha <priyarjha@google.com>");
147872 +MODULE_AUTHOR("Yousuk Seung <ysseung@google.com>");
147873 +MODULE_AUTHOR("Kevin Yang <yyd@google.com>");
147874 +MODULE_AUTHOR("Arjun Roy <arjunroy@google.com>");
147876 +MODULE_LICENSE("Dual BSD/GPL");
147877 +MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)");
147878 diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
147879 index 563d016e7478..153ed9010c0c 100644
147880 --- a/net/ipv4/tcp_cong.c
147881 +++ b/net/ipv4/tcp_cong.c
147882 @@ -179,6 +179,7 @@ void tcp_init_congestion_control(struct sock *sk)
147883         struct inet_connection_sock *icsk = inet_csk(sk);
147885         tcp_sk(sk)->prior_ssthresh = 0;
147886 +       tcp_sk(sk)->fast_ack_mode = 0;
147887         if (icsk->icsk_ca_ops->init)
147888                 icsk->icsk_ca_ops->init(sk);
147889         if (tcp_ca_needs_ecn(sk))
147890 @@ -230,6 +231,10 @@ int tcp_set_default_congestion_control(struct net *net, const char *name)
147891                 ret = -ENOENT;
147892         } else if (!bpf_try_module_get(ca, ca->owner)) {
147893                 ret = -EBUSY;
147894 +       } else if (!net_eq(net, &init_net) &&
147895 +                       !(ca->flags & TCP_CONG_NON_RESTRICTED)) {
147896 +               /* Only init netns can set default to a restricted algorithm */
147897 +               ret = -EPERM;
147898         } else {
147899                 prev = xchg(&net->ipv4.tcp_congestion_control, ca);
147900                 if (prev)
147901 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
147902 index 69a545db80d2..45aaba87ce8e 100644
147903 --- a/net/ipv4/tcp_input.c
147904 +++ b/net/ipv4/tcp_input.c
147905 @@ -348,7 +348,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
147906                         tcp_enter_quickack_mode(sk, 2);
147907                 break;
147908         case INET_ECN_CE:
147909 -               if (tcp_ca_needs_ecn(sk))
147910 +               if (tcp_ca_wants_ce_events(sk))
147911                         tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
147913                 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
147914 @@ -359,7 +359,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
147915                 tp->ecn_flags |= TCP_ECN_SEEN;
147916                 break;
147917         default:
147918 -               if (tcp_ca_needs_ecn(sk))
147919 +               if (tcp_ca_wants_ce_events(sk))
147920                         tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
147921                 tp->ecn_flags |= TCP_ECN_SEEN;
147922                 break;
147923 @@ -1039,7 +1039,12 @@ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
147924   */
147925  static void tcp_notify_skb_loss_event(struct tcp_sock *tp, const struct sk_buff *skb)
147927 +       struct sock *sk = (struct sock *)tp;
147928 +       const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
147930         tp->lost += tcp_skb_pcount(skb);
147931 +       if (ca_ops->skb_marked_lost)
147932 +               ca_ops->skb_marked_lost(sk, skb);
147935  void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
147936 @@ -1420,6 +1425,17 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
147937         WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
147938         tcp_skb_pcount_add(skb, -pcount);
147940 +       /* Adjust tx.in_flight as pcount is shifted from skb to prev. */
147941 +       if (WARN_ONCE(TCP_SKB_CB(skb)->tx.in_flight < pcount,
147942 +                     "prev in_flight: %u skb in_flight: %u pcount: %u",
147943 +                     TCP_SKB_CB(prev)->tx.in_flight,
147944 +                     TCP_SKB_CB(skb)->tx.in_flight,
147945 +                     pcount))
147946 +               TCP_SKB_CB(skb)->tx.in_flight = 0;
147947 +       else
147948 +               TCP_SKB_CB(skb)->tx.in_flight -= pcount;
147949 +       TCP_SKB_CB(prev)->tx.in_flight += pcount;
147951         /* When we're adding to gso_segs == 1, gso_size will be zero,
147952          * in theory this shouldn't be necessary but as long as DSACK
147953          * code can come after this skb later on it's better to keep
147954 @@ -3182,7 +3198,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
147955         long seq_rtt_us = -1L;
147956         long ca_rtt_us = -1L;
147957         u32 pkts_acked = 0;
147958 -       u32 last_in_flight = 0;
147959         bool rtt_update;
147960         int flag = 0;
147962 @@ -3218,7 +3233,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
147963                         if (!first_ackt)
147964                                 first_ackt = last_ackt;
147966 -                       last_in_flight = TCP_SKB_CB(skb)->tx.in_flight;
147967                         if (before(start_seq, reord))
147968                                 reord = start_seq;
147969                         if (!after(scb->end_seq, tp->high_seq))
147970 @@ -3284,8 +3298,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
147971                 seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt);
147972                 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt);
147974 -               if (pkts_acked == 1 && last_in_flight < tp->mss_cache &&
147975 -                   last_in_flight && !prior_sacked && fully_acked &&
147976 +               if (pkts_acked == 1 && fully_acked && !prior_sacked &&
147977 +                   (tp->snd_una - prior_snd_una) < tp->mss_cache &&
147978                     sack->rate->prior_delivered + 1 == tp->delivered &&
147979                     !(flag & (FLAG_CA_ALERT | FLAG_SYN_ACKED))) {
147980                         /* Conservatively mark a delayed ACK. It's typically
147981 @@ -3342,9 +3356,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
147983         if (icsk->icsk_ca_ops->pkts_acked) {
147984                 struct ack_sample sample = { .pkts_acked = pkts_acked,
147985 -                                            .rtt_us = sack->rate->rtt_us,
147986 -                                            .in_flight = last_in_flight };
147987 +                                            .rtt_us = sack->rate->rtt_us };
147989 +               sample.in_flight = tp->mss_cache *
147990 +                       (tp->delivered - sack->rate->prior_delivered);
147991                 icsk->icsk_ca_ops->pkts_acked(sk, &sample);
147992         }
147994 @@ -3742,6 +3757,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
147996         prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una;
147997         rs.prior_in_flight = tcp_packets_in_flight(tp);
147998 +       tcp_rate_check_app_limited(sk);
148000         /* ts_recent update must be made after we are sure that the packet
148001          * is in window.
148002 @@ -3839,6 +3855,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
148003         delivered = tcp_newly_delivered(sk, delivered, flag);
148004         lost = tp->lost - lost;                 /* freshly marked lost */
148005         rs.is_ack_delayed = !!(flag & FLAG_ACK_MAYBE_DELAYED);
148006 +       rs.is_ece = !!(flag & FLAG_ECE);
148007         tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
148008         tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
148009         tcp_xmit_recovery(sk, rexmit);
148010 @@ -5399,13 +5416,14 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
148012             /* More than one full frame received... */
148013         if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
148014 +            (tp->fast_ack_mode == 1 ||
148015              /* ... and right edge of window advances far enough.
148016               * (tcp_recvmsg() will send ACK otherwise).
148017               * If application uses SO_RCVLOWAT, we want send ack now if
148018               * we have not received enough bytes to satisfy the condition.
148019               */
148020 -           (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
148021 -            __tcp_select_window(sk) >= tp->rcv_wnd)) ||
148022 +             (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
148023 +              __tcp_select_window(sk) >= tp->rcv_wnd))) ||
148024             /* We ACK each frame or... */
148025             tcp_in_quickack_mode(sk) ||
148026             /* Protocol state mandates a one-time immediate ACK */
148027 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
148028 index fbf140a770d8..90d939375b29 100644
148029 --- a/net/ipv4/tcp_output.c
148030 +++ b/net/ipv4/tcp_output.c
148031 @@ -1256,8 +1256,6 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
148032         tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
148033         skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
148034         if (clone_it) {
148035 -               TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
148036 -                       - tp->snd_una;
148037                 oskb = skb;
148039                 tcp_skb_tsorted_save(oskb) {
148040 @@ -1536,7 +1534,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
148042         struct tcp_sock *tp = tcp_sk(sk);
148043         struct sk_buff *buff;
148044 -       int nsize, old_factor;
148045 +       int nsize, old_factor, inflight_prev;
148046         long limit;
148047         int nlen;
148048         u8 flags;
148049 @@ -1615,6 +1613,15 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
148051                 if (diff)
148052                         tcp_adjust_pcount(sk, skb, diff);
148054 +               /* Set buff tx.in_flight as if buff were sent by itself. */
148055 +               inflight_prev = TCP_SKB_CB(skb)->tx.in_flight - old_factor;
148056 +               if (WARN_ONCE(inflight_prev < 0,
148057 +                             "inconsistent: tx.in_flight: %u old_factor: %d",
148058 +                             TCP_SKB_CB(skb)->tx.in_flight, old_factor))
148059 +                       inflight_prev = 0;
148060 +               TCP_SKB_CB(buff)->tx.in_flight = inflight_prev +
148061 +                                                tcp_skb_pcount(buff);
148062         }
148064         /* Link BUFF into the send queue. */
148065 @@ -1982,13 +1989,12 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
148066  static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
148068         const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
148069 -       u32 min_tso, tso_segs;
148071 -       min_tso = ca_ops->min_tso_segs ?
148072 -                       ca_ops->min_tso_segs(sk) :
148073 -                       sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
148074 +       u32 tso_segs;
148076 -       tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
148077 +       tso_segs = ca_ops->tso_segs ?
148078 +               ca_ops->tso_segs(sk, mss_now) :
148079 +               tcp_tso_autosize(sk, mss_now,
148080 +                                sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
148081         return min_t(u32, tso_segs, sk->sk_gso_max_segs);
148084 @@ -2628,6 +2634,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
148085                         skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
148086                         list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
148087                         tcp_init_tso_segs(skb, mss_now);
148088 +                       tcp_set_tx_in_flight(sk, skb);
148089                         goto repair; /* Skip network transmission */
148090                 }
148092 diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
148093 index 0de693565963..796fa6e5310c 100644
148094 --- a/net/ipv4/tcp_rate.c
148095 +++ b/net/ipv4/tcp_rate.c
148096 @@ -34,6 +34,24 @@
148097   * ready to send in the write queue.
148098   */
148100 +void tcp_set_tx_in_flight(struct sock *sk, struct sk_buff *skb)
148102 +       struct tcp_sock *tp = tcp_sk(sk);
148103 +       u32 in_flight;
148105 +       /* Check, sanitize, and record packets in flight after skb was sent. */
148106 +       in_flight = tcp_packets_in_flight(tp) + tcp_skb_pcount(skb);
148107 +       if (WARN_ONCE(in_flight > TCPCB_IN_FLIGHT_MAX,
148108 +                     "insane in_flight %u cc %s mss %u "
148109 +                     "cwnd %u pif %u %u %u %u\n",
148110 +                     in_flight, inet_csk(sk)->icsk_ca_ops->name,
148111 +                     tp->mss_cache, tp->snd_cwnd,
148112 +                     tp->packets_out, tp->retrans_out,
148113 +                     tp->sacked_out, tp->lost_out))
148114 +               in_flight = TCPCB_IN_FLIGHT_MAX;
148115 +       TCP_SKB_CB(skb)->tx.in_flight = in_flight;
148118  /* Snapshot the current delivery information in the skb, to generate
148119   * a rate sample later when the skb is (s)acked in tcp_rate_skb_delivered().
148120   */
148121 @@ -65,7 +83,10 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
148122         TCP_SKB_CB(skb)->tx.first_tx_mstamp     = tp->first_tx_mstamp;
148123         TCP_SKB_CB(skb)->tx.delivered_mstamp    = tp->delivered_mstamp;
148124         TCP_SKB_CB(skb)->tx.delivered           = tp->delivered;
148125 +       TCP_SKB_CB(skb)->tx.delivered_ce        = tp->delivered_ce;
148126 +       TCP_SKB_CB(skb)->tx.lost                = tp->lost;
148127         TCP_SKB_CB(skb)->tx.is_app_limited      = tp->app_limited ? 1 : 0;
148128 +       tcp_set_tx_in_flight(sk, skb);
148131  /* When an skb is sacked or acked, we fill in the rate sample with the (prior)
148132 @@ -86,16 +107,20 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
148134         if (!rs->prior_delivered ||
148135             after(scb->tx.delivered, rs->prior_delivered)) {
148136 +               rs->prior_lost       = scb->tx.lost;
148137 +               rs->prior_delivered_ce  = scb->tx.delivered_ce;
148138                 rs->prior_delivered  = scb->tx.delivered;
148139                 rs->prior_mstamp     = scb->tx.delivered_mstamp;
148140                 rs->is_app_limited   = scb->tx.is_app_limited;
148141                 rs->is_retrans       = scb->sacked & TCPCB_RETRANS;
148142 +               rs->tx_in_flight     = scb->tx.in_flight;
148144                 /* Record send time of most recently ACKed packet: */
148145                 tp->first_tx_mstamp  = tcp_skb_timestamp_us(skb);
148146                 /* Find the duration of the "send phase" of this window: */
148147 -               rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
148148 -                                                    scb->tx.first_tx_mstamp);
148149 +               rs->interval_us      = tcp_stamp32_us_delta(
148150 +                                               tp->first_tx_mstamp,
148151 +                                               scb->tx.first_tx_mstamp);
148153         }
148154         /* Mark off the skb delivered once it's sacked to avoid being
148155 @@ -137,6 +162,11 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
148156                 return;
148157         }
148158         rs->delivered   = tp->delivered - rs->prior_delivered;
148159 +       rs->lost        = tp->lost - rs->prior_lost;
148161 +       rs->delivered_ce = tp->delivered_ce - rs->prior_delivered_ce;
148162 +       /* delivered_ce occupies less than 32 bits in the skb control block */
148163 +       rs->delivered_ce &= TCPCB_DELIVERED_CE_MASK;
148165         /* Model sending data and receiving ACKs as separate pipeline phases
148166          * for a window. Usually the ACK phase is longer, but with ACK
148167 @@ -144,7 +174,7 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
148168          * longer phase.
148169          */
148170         snd_us = rs->interval_us;                               /* send phase */
148171 -       ack_us = tcp_stamp_us_delta(tp->tcp_mstamp,
148172 +       ack_us = tcp_stamp32_us_delta(tp->tcp_mstamp,
148173                                     rs->prior_mstamp); /* ack phase */
148174         rs->interval_us = max(snd_us, ack_us);
148176 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
148177 index 4ef08079ccfa..b5b24caa8ba0 100644
148178 --- a/net/ipv4/tcp_timer.c
148179 +++ b/net/ipv4/tcp_timer.c
148180 @@ -607,6 +607,7 @@ void tcp_write_timer_handler(struct sock *sk)
148181                 goto out;
148182         }
148184 +       tcp_rate_check_app_limited(sk);
148185         tcp_mstamp_refresh(tcp_sk(sk));
148186         event = icsk->icsk_pending;
148188 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
148189 index 99d743eb9dc4..c586a6bb8c6d 100644
148190 --- a/net/ipv4/udp.c
148191 +++ b/net/ipv4/udp.c
148192 @@ -2664,9 +2664,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
148194         case UDP_GRO:
148195                 lock_sock(sk);
148197 +               /* when enabling GRO, accept the related GSO packet type */
148198                 if (valbool)
148199                         udp_tunnel_encap_enable(sk->sk_socket);
148200                 up->gro_enabled = valbool;
148201 +               up->accept_udp_l4 = valbool;
148202                 release_sock(sk);
148203                 break;
148205 diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
148206 index c5b4b586570f..25134a3548e9 100644
148207 --- a/net/ipv4/udp_offload.c
148208 +++ b/net/ipv4/udp_offload.c
148209 @@ -515,21 +515,24 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
148210         unsigned int off = skb_gro_offset(skb);
148211         int flush = 1;
148213 +       /* we can do L4 aggregation only if the packet can't land in a tunnel
148214 +        * otherwise we could corrupt the inner stream
148215 +        */
148216         NAPI_GRO_CB(skb)->is_flist = 0;
148217 -       if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
148218 -               NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled: 1;
148219 +       if (!sk || !udp_sk(sk)->gro_receive) {
148220 +               if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
148221 +                       NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled : 1;
148223 -       if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
148224 -           (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist) {
148225 -               pp = call_gro_receive(udp_gro_receive_segment, head, skb);
148226 +               if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
148227 +                   (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist)
148228 +                       pp = call_gro_receive(udp_gro_receive_segment, head, skb);
148229                 return pp;
148230         }
148232 -       if (!sk || NAPI_GRO_CB(skb)->encap_mark ||
148233 +       if (NAPI_GRO_CB(skb)->encap_mark ||
148234             (uh->check && skb->ip_summed != CHECKSUM_PARTIAL &&
148235              NAPI_GRO_CB(skb)->csum_cnt == 0 &&
148236 -            !NAPI_GRO_CB(skb)->csum_valid) ||
148237 -           !udp_sk(sk)->gro_receive)
148238 +            !NAPI_GRO_CB(skb)->csum_valid))
148239                 goto out;
148241         /* mark that this skb passed once through the tunnel gro layer */
148242 diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
148243 index e0cc32e45880..932b15b13053 100644
148244 --- a/net/ipv6/ip6_vti.c
148245 +++ b/net/ipv6/ip6_vti.c
148246 @@ -193,7 +193,6 @@ static int vti6_tnl_create2(struct net_device *dev)
148248         strcpy(t->parms.name, dev->name);
148250 -       dev_hold(dev);
148251         vti6_tnl_link(ip6n, t);
148253         return 0;
148254 @@ -934,6 +933,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev)
148255         dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
148256         if (!dev->tstats)
148257                 return -ENOMEM;
148258 +       dev_hold(dev);
148259         return 0;
148262 diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
148263 index d3d6b6a66e5f..04d5fcdfa6e0 100644
148264 --- a/net/ipv6/mcast_snoop.c
148265 +++ b/net/ipv6/mcast_snoop.c
148266 @@ -109,7 +109,7 @@ static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
148267         struct mld_msg *mld;
148269         if (!ipv6_mc_may_pull(skb, len))
148270 -               return -EINVAL;
148271 +               return -ENODATA;
148273         mld = (struct mld_msg *)skb_transport_header(skb);
148275 @@ -122,7 +122,7 @@ static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
148276         case ICMPV6_MGM_QUERY:
148277                 return ipv6_mc_check_mld_query(skb);
148278         default:
148279 -               return -ENOMSG;
148280 +               return -ENODATA;
148281         }
148284 @@ -131,7 +131,7 @@ static inline __sum16 ipv6_mc_validate_checksum(struct sk_buff *skb)
148285         return skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo);
148288 -int ipv6_mc_check_icmpv6(struct sk_buff *skb)
148289 +static int ipv6_mc_check_icmpv6(struct sk_buff *skb)
148291         unsigned int len = skb_transport_offset(skb) + sizeof(struct icmp6hdr);
148292         unsigned int transport_len = ipv6_transport_len(skb);
148293 @@ -150,7 +150,6 @@ int ipv6_mc_check_icmpv6(struct sk_buff *skb)
148295         return 0;
148297 -EXPORT_SYMBOL(ipv6_mc_check_icmpv6);
148299  /**
148300   * ipv6_mc_check_mld - checks whether this is a sane MLD packet
148301 @@ -161,7 +160,10 @@ EXPORT_SYMBOL(ipv6_mc_check_icmpv6);
148302   *
148303   * -EINVAL: A broken packet was detected, i.e. it violates some internet
148304   *  standard
148305 - * -ENOMSG: IP header validation succeeded but it is not an MLD packet.
148306 + * -ENOMSG: IP header validation succeeded but it is not an ICMPv6 packet
148307 + *  with a hop-by-hop option.
148308 + * -ENODATA: IP+ICMPv6 header with hop-by-hop option validation succeeded
148309 + *  but it is not an MLD packet.
148310   * -ENOMEM: A memory allocation failure happened.
148311   *
148312   * Caller needs to set the skb network header and free any returned skb if it
148313 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
148314 index 1b9c82616606..0331f3a3c40e 100644
148315 --- a/net/mac80211/main.c
148316 +++ b/net/mac80211/main.c
148317 @@ -1141,8 +1141,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
148318         if (local->hw.wiphy->max_scan_ie_len)
148319                 local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len;
148321 -       WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes,
148322 -                                        local->hw.n_cipher_schemes));
148323 +       if (WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes,
148324 +                                            local->hw.n_cipher_schemes))) {
148325 +               result = -EINVAL;
148326 +               goto fail_workqueue;
148327 +       }
148329         result = ieee80211_init_cipher_suites(local);
148330         if (result < 0)
148331 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
148332 index 96f487fc0071..0fe91dc9817e 100644
148333 --- a/net/mac80211/mlme.c
148334 +++ b/net/mac80211/mlme.c
148335 @@ -1295,6 +1295,11 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata)
148337         sdata->vif.csa_active = false;
148338         ifmgd->csa_waiting_bcn = false;
148339 +       /*
148340 +        * If the CSA IE is still present on the beacon after the switch,
148341 +        * we need to consider it as a new CSA (possibly to self).
148342 +        */
148343 +       ifmgd->beacon_crc_valid = false;
148345         ret = drv_post_channel_switch(sdata);
148346         if (ret) {
148347 @@ -1400,11 +1405,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
148348                 ch_switch.delay = csa_ie.max_switch_time;
148349         }
148351 -       if (res < 0) {
148352 -               ieee80211_queue_work(&local->hw,
148353 -                                    &ifmgd->csa_connection_drop_work);
148354 -               return;
148355 -       }
148356 +       if (res < 0)
148357 +               goto lock_and_drop_connection;
148359         if (beacon && sdata->vif.csa_active && !ifmgd->csa_waiting_bcn) {
148360                 if (res)
148361 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
148362 index 3b3bcefbf657..28422d687096 100644
148363 --- a/net/mac80211/tx.c
148364 +++ b/net/mac80211/tx.c
148365 @@ -2267,17 +2267,6 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
148366                                                     payload[7]);
148367         }
148369 -       /* Initialize skb->priority for QoS frames. If the DONT_REORDER flag
148370 -        * is set, stick to the default value for skb->priority to assure
148371 -        * frames injected with this flag are not reordered relative to each
148372 -        * other.
148373 -        */
148374 -       if (ieee80211_is_data_qos(hdr->frame_control) &&
148375 -           !(info->control.flags & IEEE80211_TX_CTRL_DONT_REORDER)) {
148376 -               u8 *p = ieee80211_get_qos_ctl(hdr);
148377 -               skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
148378 -       }
148380         rcu_read_lock();
148382         /*
148383 @@ -2341,6 +2330,15 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
148385         info->band = chandef->chan->band;
148387 +       /* Initialize skb->priority according to frame type and TID class,
148388 +        * with respect to the sub interface that the frame will actually
148389 +        * be transmitted on. If the DONT_REORDER flag is set, the original
148390 +        * skb-priority is preserved to assure frames injected with this
148391 +        * flag are not reordered relative to each other.
148392 +        */
148393 +       ieee80211_select_queue_80211(sdata, skb, hdr);
148394 +       skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority));
148396         /* remove the injection radiotap header */
148397         skb_pull(skb, len_rthdr);
148399 diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
148400 index 4bde960e19dc..65e5d3eb1078 100644
148401 --- a/net/mptcp/protocol.c
148402 +++ b/net/mptcp/protocol.c
148403 @@ -399,6 +399,14 @@ static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
148404         return false;
148407 +static void mptcp_set_datafin_timeout(const struct sock *sk)
148409 +       struct inet_connection_sock *icsk = inet_csk(sk);
148411 +       mptcp_sk(sk)->timer_ival = min(TCP_RTO_MAX,
148412 +                                      TCP_RTO_MIN << icsk->icsk_retransmits);
148415  static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk)
148417         long tout = ssk && inet_csk(ssk)->icsk_pending ?
148418 @@ -1052,7 +1060,7 @@ static void __mptcp_clean_una(struct sock *sk)
148419         }
148421         if (snd_una == READ_ONCE(msk->snd_nxt)) {
148422 -               if (msk->timer_ival)
148423 +               if (msk->timer_ival && !mptcp_data_fin_enabled(msk))
148424                         mptcp_stop_timer(sk);
148425         } else {
148426                 mptcp_reset_timer(sk);
148427 @@ -1275,7 +1283,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
148428         int avail_size;
148429         size_t ret = 0;
148431 -       pr_debug("msk=%p ssk=%p sending dfrag at seq=%lld len=%d already sent=%d",
148432 +       pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
148433                  msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
148435         /* compute send limit */
148436 @@ -1693,7 +1701,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
148437                         if (!msk->first_pending)
148438                                 WRITE_ONCE(msk->first_pending, dfrag);
148439                 }
148440 -               pr_debug("msk=%p dfrag at seq=%lld len=%d sent=%d new=%d", msk,
148441 +               pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
148442                          dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
148443                          !dfrag_collapsed);
148445 @@ -2276,8 +2284,19 @@ static void __mptcp_retrans(struct sock *sk)
148447         __mptcp_clean_una_wakeup(sk);
148448         dfrag = mptcp_rtx_head(sk);
148449 -       if (!dfrag)
148450 +       if (!dfrag) {
148451 +               if (mptcp_data_fin_enabled(msk)) {
148452 +                       struct inet_connection_sock *icsk = inet_csk(sk);
148454 +                       icsk->icsk_retransmits++;
148455 +                       mptcp_set_datafin_timeout(sk);
148456 +                       mptcp_send_ack(msk);
148458 +                       goto reset_timer;
148459 +               }
148461                 return;
148462 +       }
148464         ssk = mptcp_subflow_get_retrans(msk);
148465         if (!ssk)
148466 @@ -2460,6 +2479,8 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
148467                         pr_debug("Sending DATA_FIN on subflow %p", ssk);
148468                         mptcp_set_timeout(sk, ssk);
148469                         tcp_send_ack(ssk);
148470 +                       if (!mptcp_timer_pending(sk))
148471 +                               mptcp_reset_timer(sk);
148472                 }
148473                 break;
148474         }
148475 diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
148476 index d17d39ccdf34..4fe7acaa472f 100644
148477 --- a/net/mptcp/subflow.c
148478 +++ b/net/mptcp/subflow.c
148479 @@ -524,8 +524,7 @@ static void mptcp_sock_destruct(struct sock *sk)
148480          * ESTABLISHED state and will not have the SOCK_DEAD flag.
148481          * Both result in warnings from inet_sock_destruct.
148482          */
148484 -       if (sk->sk_state == TCP_ESTABLISHED) {
148485 +       if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
148486                 sk->sk_state = TCP_CLOSE;
148487                 WARN_ON_ONCE(sk->sk_socket);
148488                 sock_orphan(sk);
148489 diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
148490 index c6c0cb465664..313d1c8ff066 100644
148491 --- a/net/netfilter/nf_conntrack_standalone.c
148492 +++ b/net/netfilter/nf_conntrack_standalone.c
148493 @@ -1060,16 +1060,10 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
148494         nf_conntrack_standalone_init_dccp_sysctl(net, table);
148495         nf_conntrack_standalone_init_gre_sysctl(net, table);
148497 -       /* Don't allow unprivileged users to alter certain sysctls */
148498 -       if (net->user_ns != &init_user_ns) {
148499 +       /* Don't allow non-init_net ns to alter global sysctls */
148500 +       if (!net_eq(&init_net, net)) {
148501                 table[NF_SYSCTL_CT_MAX].mode = 0444;
148502                 table[NF_SYSCTL_CT_EXPECT_MAX].mode = 0444;
148503 -               table[NF_SYSCTL_CT_HELPER].mode = 0444;
148504 -#ifdef CONFIG_NF_CONNTRACK_EVENTS
148505 -               table[NF_SYSCTL_CT_EVENTS].mode = 0444;
148506 -#endif
148507 -               table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
148508 -       } else if (!net_eq(&init_net, net)) {
148509                 table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
148510         }
148512 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
148513 index 589d2f6978d3..878ed49d0c56 100644
148514 --- a/net/netfilter/nf_tables_api.c
148515 +++ b/net/netfilter/nf_tables_api.c
148516 @@ -6246,9 +6246,9 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
148517         INIT_LIST_HEAD(&obj->list);
148518         return err;
148519  err_trans:
148520 -       kfree(obj->key.name);
148521 -err_userdata:
148522         kfree(obj->udata);
148523 +err_userdata:
148524 +       kfree(obj->key.name);
148525  err_strdup:
148526         if (obj->ops->destroy)
148527                 obj->ops->destroy(&ctx, obj);
148528 diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
148529 index 9ae14270c543..2b00f7f47693 100644
148530 --- a/net/netfilter/nf_tables_offload.c
148531 +++ b/net/netfilter/nf_tables_offload.c
148532 @@ -45,6 +45,48 @@ void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
148533                 offsetof(struct nft_flow_key, control);
148536 +struct nft_offload_ethertype {
148537 +       __be16 value;
148538 +       __be16 mask;
148541 +static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
148542 +                                       struct nft_flow_rule *flow)
148544 +       struct nft_flow_match *match = &flow->match;
148545 +       struct nft_offload_ethertype ethertype;
148547 +       if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL) &&
148548 +           match->key.basic.n_proto != htons(ETH_P_8021Q) &&
148549 +           match->key.basic.n_proto != htons(ETH_P_8021AD))
148550 +               return;
148552 +       ethertype.value = match->key.basic.n_proto;
148553 +       ethertype.mask = match->mask.basic.n_proto;
148555 +       if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) &&
148556 +           (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
148557 +            match->key.vlan.vlan_tpid == htons(ETH_P_8021AD))) {
148558 +               match->key.basic.n_proto = match->key.cvlan.vlan_tpid;
148559 +               match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid;
148560 +               match->key.cvlan.vlan_tpid = match->key.vlan.vlan_tpid;
148561 +               match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid;
148562 +               match->key.vlan.vlan_tpid = ethertype.value;
148563 +               match->mask.vlan.vlan_tpid = ethertype.mask;
148564 +               match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
148565 +                       offsetof(struct nft_flow_key, cvlan);
148566 +               match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
148567 +       } else {
148568 +               match->key.basic.n_proto = match->key.vlan.vlan_tpid;
148569 +               match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
148570 +               match->key.vlan.vlan_tpid = ethertype.value;
148571 +               match->mask.vlan.vlan_tpid = ethertype.mask;
148572 +               match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
148573 +                       offsetof(struct nft_flow_key, vlan);
148574 +               match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
148575 +       }
148578  struct nft_flow_rule *nft_flow_rule_create(struct net *net,
148579                                            const struct nft_rule *rule)
148581 @@ -89,6 +131,8 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
148583                 expr = nft_expr_next(expr);
148584         }
148585 +       nft_flow_rule_transfer_vlan(ctx, flow);
148587         flow->proto = ctx->dep.l3num;
148588         kfree(ctx);
148590 diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
148591 index 916a3c7f9eaf..79fbf37291f3 100644
148592 --- a/net/netfilter/nfnetlink_osf.c
148593 +++ b/net/netfilter/nfnetlink_osf.c
148594 @@ -186,6 +186,8 @@ static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx,
148596                 ctx->optp = skb_header_pointer(skb, ip_hdrlen(skb) +
148597                                 sizeof(struct tcphdr), ctx->optsize, opts);
148598 +               if (!ctx->optp)
148599 +                       return NULL;
148600         }
148602         return tcp;
148603 diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
148604 index eb6a43a180bb..47b6d05f1ae6 100644
148605 --- a/net/netfilter/nft_cmp.c
148606 +++ b/net/netfilter/nft_cmp.c
148607 @@ -114,19 +114,56 @@ static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
148608         return -1;
148611 +union nft_cmp_offload_data {
148612 +       u16     val16;
148613 +       u32     val32;
148614 +       u64     val64;
148617 +static void nft_payload_n2h(union nft_cmp_offload_data *data,
148618 +                           const u8 *val, u32 len)
148620 +       switch (len) {
148621 +       case 2:
148622 +               data->val16 = ntohs(*((u16 *)val));
148623 +               break;
148624 +       case 4:
148625 +               data->val32 = ntohl(*((u32 *)val));
148626 +               break;
148627 +       case 8:
148628 +               data->val64 = be64_to_cpu(*((u64 *)val));
148629 +               break;
148630 +       default:
148631 +               WARN_ON_ONCE(1);
148632 +               break;
148633 +       }
148636  static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
148637                              struct nft_flow_rule *flow,
148638                              const struct nft_cmp_expr *priv)
148640         struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
148641 +       union nft_cmp_offload_data _data, _datamask;
148642         u8 *mask = (u8 *)&flow->match.mask;
148643         u8 *key = (u8 *)&flow->match.key;
148644 +       u8 *data, *datamask;
148646         if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
148647                 return -EOPNOTSUPP;
148649 -       memcpy(key + reg->offset, &priv->data, reg->len);
148650 -       memcpy(mask + reg->offset, &reg->mask, reg->len);
148651 +       if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
148652 +               nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
148653 +               nft_payload_n2h(&_datamask, (u8 *)&reg->mask, reg->len);
148654 +               data = (u8 *)&_data;
148655 +               datamask = (u8 *)&_datamask;
148656 +       } else {
148657 +               data = (u8 *)&priv->data;
148658 +               datamask = (u8 *)&reg->mask;
148659 +       }
148661 +       memcpy(key + reg->offset, data, reg->len);
148662 +       memcpy(mask + reg->offset, datamask, reg->len);
148664         flow->match.dissector.used_keys |= BIT(reg->key);
148665         flow->match.dissector.offset[reg->key] = reg->base_offset;
148666 diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
148667 index cb1c8c231880..501c5b24cc39 100644
148668 --- a/net/netfilter/nft_payload.c
148669 +++ b/net/netfilter/nft_payload.c
148670 @@ -226,8 +226,9 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
148671                 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
148672                         return -EOPNOTSUPP;
148674 -               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
148675 -                                 vlan_tci, sizeof(__be16), reg);
148676 +               NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
148677 +                                       vlan_tci, sizeof(__be16), reg,
148678 +                                       NFT_OFFLOAD_F_NETWORK2HOST);
148679                 break;
148680         case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
148681                 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
148682 @@ -241,16 +242,18 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
148683                 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
148684                         return -EOPNOTSUPP;
148686 -               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
148687 -                                 vlan_tci, sizeof(__be16), reg);
148688 +               NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
148689 +                                       vlan_tci, sizeof(__be16), reg,
148690 +                                       NFT_OFFLOAD_F_NETWORK2HOST);
148691                 break;
148692         case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
148693                                                         sizeof(struct vlan_hdr):
148694                 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
148695                         return -EOPNOTSUPP;
148697 -               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
148698 +               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
148699                                   vlan_tpid, sizeof(__be16), reg);
148700 +               nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
148701                 break;
148702         default:
148703                 return -EOPNOTSUPP;
148704 diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
148705 index bf618b7ec1ae..560c2cda52ee 100644
148706 --- a/net/netfilter/nft_set_hash.c
148707 +++ b/net/netfilter/nft_set_hash.c
148708 @@ -406,9 +406,17 @@ static void nft_rhash_destroy(const struct nft_set *set)
148709                                     (void *)set);
148712 +/* Number of buckets is stored in u32, so cap our result to 1U<<31 */
148713 +#define NFT_MAX_BUCKETS (1U << 31)
148715  static u32 nft_hash_buckets(u32 size)
148717 -       return roundup_pow_of_two(size * 4 / 3);
148718 +       u64 val = div_u64((u64)size * 4, 3);
148720 +       if (val >= NFT_MAX_BUCKETS)
148721 +               return NFT_MAX_BUCKETS;
148723 +       return roundup_pow_of_two(val);
148726  static bool nft_rhash_estimate(const struct nft_set_desc *desc, u32 features,
148727 diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
148728 index 75625d13e976..498a0bf6f044 100644
148729 --- a/net/netfilter/xt_SECMARK.c
148730 +++ b/net/netfilter/xt_SECMARK.c
148731 @@ -24,10 +24,9 @@ MODULE_ALIAS("ip6t_SECMARK");
148732  static u8 mode;
148734  static unsigned int
148735 -secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
148736 +secmark_tg(struct sk_buff *skb, const struct xt_secmark_target_info_v1 *info)
148738         u32 secmark = 0;
148739 -       const struct xt_secmark_target_info *info = par->targinfo;
148741         switch (mode) {
148742         case SECMARK_MODE_SEL:
148743 @@ -41,7 +40,7 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
148744         return XT_CONTINUE;
148747 -static int checkentry_lsm(struct xt_secmark_target_info *info)
148748 +static int checkentry_lsm(struct xt_secmark_target_info_v1 *info)
148750         int err;
148752 @@ -73,15 +72,15 @@ static int checkentry_lsm(struct xt_secmark_target_info *info)
148753         return 0;
148756 -static int secmark_tg_check(const struct xt_tgchk_param *par)
148757 +static int
148758 +secmark_tg_check(const char *table, struct xt_secmark_target_info_v1 *info)
148760 -       struct xt_secmark_target_info *info = par->targinfo;
148761         int err;
148763 -       if (strcmp(par->table, "mangle") != 0 &&
148764 -           strcmp(par->table, "security") != 0) {
148765 +       if (strcmp(table, "mangle") != 0 &&
148766 +           strcmp(table, "security") != 0) {
148767                 pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
148768 -                                   par->table);
148769 +                                   table);
148770                 return -EINVAL;
148771         }
148773 @@ -116,25 +115,76 @@ static void secmark_tg_destroy(const struct xt_tgdtor_param *par)
148774         }
148777 -static struct xt_target secmark_tg_reg __read_mostly = {
148778 -       .name       = "SECMARK",
148779 -       .revision   = 0,
148780 -       .family     = NFPROTO_UNSPEC,
148781 -       .checkentry = secmark_tg_check,
148782 -       .destroy    = secmark_tg_destroy,
148783 -       .target     = secmark_tg,
148784 -       .targetsize = sizeof(struct xt_secmark_target_info),
148785 -       .me         = THIS_MODULE,
148786 +static int secmark_tg_check_v0(const struct xt_tgchk_param *par)
148788 +       struct xt_secmark_target_info *info = par->targinfo;
148789 +       struct xt_secmark_target_info_v1 newinfo = {
148790 +               .mode   = info->mode,
148791 +       };
148792 +       int ret;
148794 +       memcpy(newinfo.secctx, info->secctx, SECMARK_SECCTX_MAX);
148796 +       ret = secmark_tg_check(par->table, &newinfo);
148797 +       info->secid = newinfo.secid;
148799 +       return ret;
148802 +static unsigned int
148803 +secmark_tg_v0(struct sk_buff *skb, const struct xt_action_param *par)
148805 +       const struct xt_secmark_target_info *info = par->targinfo;
148806 +       struct xt_secmark_target_info_v1 newinfo = {
148807 +               .secid  = info->secid,
148808 +       };
148810 +       return secmark_tg(skb, &newinfo);
148813 +static int secmark_tg_check_v1(const struct xt_tgchk_param *par)
148815 +       return secmark_tg_check(par->table, par->targinfo);
148818 +static unsigned int
148819 +secmark_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
148821 +       return secmark_tg(skb, par->targinfo);
148824 +static struct xt_target secmark_tg_reg[] __read_mostly = {
148825 +       {
148826 +               .name           = "SECMARK",
148827 +               .revision       = 0,
148828 +               .family         = NFPROTO_UNSPEC,
148829 +               .checkentry     = secmark_tg_check_v0,
148830 +               .destroy        = secmark_tg_destroy,
148831 +               .target         = secmark_tg_v0,
148832 +               .targetsize     = sizeof(struct xt_secmark_target_info),
148833 +               .me             = THIS_MODULE,
148834 +       },
148835 +       {
148836 +               .name           = "SECMARK",
148837 +               .revision       = 1,
148838 +               .family         = NFPROTO_UNSPEC,
148839 +               .checkentry     = secmark_tg_check_v1,
148840 +               .destroy        = secmark_tg_destroy,
148841 +               .target         = secmark_tg_v1,
148842 +               .targetsize     = sizeof(struct xt_secmark_target_info_v1),
148843 +               .usersize       = offsetof(struct xt_secmark_target_info_v1, secid),
148844 +               .me             = THIS_MODULE,
148845 +       },
148848  static int __init secmark_tg_init(void)
148850 -       return xt_register_target(&secmark_tg_reg);
148851 +       return xt_register_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg));
148854  static void __exit secmark_tg_exit(void)
148856 -       xt_unregister_target(&secmark_tg_reg);
148857 +       xt_unregister_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg));
148860  module_init(secmark_tg_init);
148861 diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
148862 index 5971fb6f51cc..dc21b4141b0a 100644
148863 --- a/net/nfc/digital_dep.c
148864 +++ b/net/nfc/digital_dep.c
148865 @@ -1273,6 +1273,8 @@ static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg,
148866         }
148868         rc = nfc_tm_data_received(ddev->nfc_dev, resp);
148869 +       if (rc)
148870 +               resp = NULL;
148872  exit:
148873         kfree_skb(ddev->chaining_skb);
148874 diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
148875 index a3b46f888803..53dbe733f998 100644
148876 --- a/net/nfc/llcp_sock.c
148877 +++ b/net/nfc/llcp_sock.c
148878 @@ -109,12 +109,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
148879                                           GFP_KERNEL);
148880         if (!llcp_sock->service_name) {
148881                 nfc_llcp_local_put(llcp_sock->local);
148882 +               llcp_sock->local = NULL;
148883                 ret = -ENOMEM;
148884                 goto put_dev;
148885         }
148886         llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
148887         if (llcp_sock->ssap == LLCP_SAP_MAX) {
148888                 nfc_llcp_local_put(llcp_sock->local);
148889 +               llcp_sock->local = NULL;
148890                 kfree(llcp_sock->service_name);
148891                 llcp_sock->service_name = NULL;
148892                 ret = -EADDRINUSE;
148893 @@ -709,6 +711,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
148894         llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
148895         if (llcp_sock->ssap == LLCP_SAP_MAX) {
148896                 nfc_llcp_local_put(llcp_sock->local);
148897 +               llcp_sock->local = NULL;
148898                 ret = -ENOMEM;
148899                 goto put_dev;
148900         }
148901 @@ -756,6 +759,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
148902  sock_llcp_release:
148903         nfc_llcp_put_ssap(local, llcp_sock->ssap);
148904         nfc_llcp_local_put(llcp_sock->local);
148905 +       llcp_sock->local = NULL;
148907  put_dev:
148908         nfc_put_device(dev);
148909 diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
148910 index 92a0b67b2728..77d924ab8cdb 100644
148911 --- a/net/openvswitch/actions.c
148912 +++ b/net/openvswitch/actions.c
148913 @@ -827,17 +827,17 @@ static void ovs_fragment(struct net *net, struct vport *vport,
148914         }
148916         if (key->eth.type == htons(ETH_P_IP)) {
148917 -               struct dst_entry ovs_dst;
148918 +               struct rtable ovs_rt = { 0 };
148919                 unsigned long orig_dst;
148921                 prepare_frag(vport, skb, orig_network_offset,
148922                              ovs_key_mac_proto(key));
148923 -               dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
148924 +               dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
148925                          DST_OBSOLETE_NONE, DST_NOCOUNT);
148926 -               ovs_dst.dev = vport->dev;
148927 +               ovs_rt.dst.dev = vport->dev;
148929                 orig_dst = skb->_skb_refdst;
148930 -               skb_dst_set_noref(skb, &ovs_dst);
148931 +               skb_dst_set_noref(skb, &ovs_rt.dst);
148932                 IPCB(skb)->frag_max_size = mru;
148934                 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
148935 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
148936 index e24b2841c643..9611e41c7b8b 100644
148937 --- a/net/packet/af_packet.c
148938 +++ b/net/packet/af_packet.c
148939 @@ -1359,7 +1359,7 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
148940         struct packet_sock *po, *po_next, *po_skip = NULL;
148941         unsigned int i, j, room = ROOM_NONE;
148943 -       po = pkt_sk(f->arr[idx]);
148944 +       po = pkt_sk(rcu_dereference(f->arr[idx]));
148946         if (try_self) {
148947                 room = packet_rcv_has_room(po, skb);
148948 @@ -1371,7 +1371,7 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
148950         i = j = min_t(int, po->rollover->sock, num - 1);
148951         do {
148952 -               po_next = pkt_sk(f->arr[i]);
148953 +               po_next = pkt_sk(rcu_dereference(f->arr[i]));
148954                 if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
148955                     packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
148956                         if (i != j)
148957 @@ -1466,7 +1466,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
148958         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
148959                 idx = fanout_demux_rollover(f, skb, idx, true, num);
148961 -       po = pkt_sk(f->arr[idx]);
148962 +       po = pkt_sk(rcu_dereference(f->arr[idx]));
148963         return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
148966 @@ -1480,7 +1480,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po)
148967         struct packet_fanout *f = po->fanout;
148969         spin_lock(&f->lock);
148970 -       f->arr[f->num_members] = sk;
148971 +       rcu_assign_pointer(f->arr[f->num_members], sk);
148972         smp_wmb();
148973         f->num_members++;
148974         if (f->num_members == 1)
148975 @@ -1495,11 +1495,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
148977         spin_lock(&f->lock);
148978         for (i = 0; i < f->num_members; i++) {
148979 -               if (f->arr[i] == sk)
148980 +               if (rcu_dereference_protected(f->arr[i],
148981 +                                             lockdep_is_held(&f->lock)) == sk)
148982                         break;
148983         }
148984         BUG_ON(i >= f->num_members);
148985 -       f->arr[i] = f->arr[f->num_members - 1];
148986 +       rcu_assign_pointer(f->arr[i],
148987 +                          rcu_dereference_protected(f->arr[f->num_members - 1],
148988 +                                                    lockdep_is_held(&f->lock)));
148989         f->num_members--;
148990         if (f->num_members == 0)
148991                 __dev_remove_pack(&f->prot_hook);
148992 diff --git a/net/packet/internal.h b/net/packet/internal.h
148993 index 5f61e59ebbff..48af35b1aed2 100644
148994 --- a/net/packet/internal.h
148995 +++ b/net/packet/internal.h
148996 @@ -94,7 +94,7 @@ struct packet_fanout {
148997         spinlock_t              lock;
148998         refcount_t              sk_ref;
148999         struct packet_type      prot_hook ____cacheline_aligned_in_smp;
149000 -       struct sock             *arr[];
149001 +       struct sock     __rcu   *arr[];
149004  struct packet_rollover {
149005 diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c
149006 index 2bf2b1943e61..fa611678af05 100644
149007 --- a/net/qrtr/mhi.c
149008 +++ b/net/qrtr/mhi.c
149009 @@ -50,6 +50,9 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
149010         struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep);
149011         int rc;
149013 +       if (skb->sk)
149014 +               sock_hold(skb->sk);
149016         rc = skb_linearize(skb);
149017         if (rc)
149018                 goto free_skb;
149019 @@ -59,12 +62,11 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
149020         if (rc)
149021                 goto free_skb;
149023 -       if (skb->sk)
149024 -               sock_hold(skb->sk);
149026         return rc;
149028  free_skb:
149029 +       if (skb->sk)
149030 +               sock_put(skb->sk);
149031         kfree_skb(skb);
149033         return rc;
149034 diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
149035 index 16e888a9601d..48fdf7293dea 100644
149036 --- a/net/sched/act_ct.c
149037 +++ b/net/sched/act_ct.c
149038 @@ -732,7 +732,8 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
149039  #endif
149040         }
149042 -       *qdisc_skb_cb(skb) = cb;
149043 +       if (err != -EINPROGRESS)
149044 +               *qdisc_skb_cb(skb) = cb;
149045         skb_clear_hash(skb);
149046         skb->ignore_df = 1;
149047         return err;
149048 @@ -967,7 +968,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
149049         err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
149050         if (err == -EINPROGRESS) {
149051                 retval = TC_ACT_STOLEN;
149052 -               goto out;
149053 +               goto out_clear;
149054         }
149055         if (err)
149056                 goto drop;
149057 @@ -1030,7 +1031,6 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
149058  out_push:
149059         skb_push_rcsum(skb, nh_ofs);
149061 -out:
149062         qdisc_skb_cb(skb)->post_ct = true;
149063  out_clear:
149064         tcf_action_update_bstats(&c->common, skb);
149065 diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
149066 index c69a4ba9c33f..3035f96c6e6c 100644
149067 --- a/net/sched/cls_flower.c
149068 +++ b/net/sched/cls_flower.c
149069 @@ -209,16 +209,16 @@ static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
149070                                   struct fl_flow_key *key,
149071                                   struct fl_flow_key *mkey)
149073 -       __be16 min_mask, max_mask, min_val, max_val;
149074 +       u16 min_mask, max_mask, min_val, max_val;
149076 -       min_mask = htons(filter->mask->key.tp_range.tp_min.dst);
149077 -       max_mask = htons(filter->mask->key.tp_range.tp_max.dst);
149078 -       min_val = htons(filter->key.tp_range.tp_min.dst);
149079 -       max_val = htons(filter->key.tp_range.tp_max.dst);
149080 +       min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
149081 +       max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
149082 +       min_val = ntohs(filter->key.tp_range.tp_min.dst);
149083 +       max_val = ntohs(filter->key.tp_range.tp_max.dst);
149085         if (min_mask && max_mask) {
149086 -               if (htons(key->tp_range.tp.dst) < min_val ||
149087 -                   htons(key->tp_range.tp.dst) > max_val)
149088 +               if (ntohs(key->tp_range.tp.dst) < min_val ||
149089 +                   ntohs(key->tp_range.tp.dst) > max_val)
149090                         return false;
149092                 /* skb does not have min and max values */
149093 @@ -232,16 +232,16 @@ static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
149094                                   struct fl_flow_key *key,
149095                                   struct fl_flow_key *mkey)
149097 -       __be16 min_mask, max_mask, min_val, max_val;
149098 +       u16 min_mask, max_mask, min_val, max_val;
149100 -       min_mask = htons(filter->mask->key.tp_range.tp_min.src);
149101 -       max_mask = htons(filter->mask->key.tp_range.tp_max.src);
149102 -       min_val = htons(filter->key.tp_range.tp_min.src);
149103 -       max_val = htons(filter->key.tp_range.tp_max.src);
149104 +       min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
149105 +       max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
149106 +       min_val = ntohs(filter->key.tp_range.tp_min.src);
149107 +       max_val = ntohs(filter->key.tp_range.tp_max.src);
149109         if (min_mask && max_mask) {
149110 -               if (htons(key->tp_range.tp.src) < min_val ||
149111 -                   htons(key->tp_range.tp.src) > max_val)
149112 +               if (ntohs(key->tp_range.tp.src) < min_val ||
149113 +                   ntohs(key->tp_range.tp.src) > max_val)
149114                         return false;
149116                 /* skb does not have min and max values */
149117 @@ -783,16 +783,16 @@ static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
149118                        TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
149120         if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
149121 -           htons(key->tp_range.tp_max.dst) <=
149122 -           htons(key->tp_range.tp_min.dst)) {
149123 +           ntohs(key->tp_range.tp_max.dst) <=
149124 +           ntohs(key->tp_range.tp_min.dst)) {
149125                 NL_SET_ERR_MSG_ATTR(extack,
149126                                     tb[TCA_FLOWER_KEY_PORT_DST_MIN],
149127                                     "Invalid destination port range (min must be strictly smaller than max)");
149128                 return -EINVAL;
149129         }
149130         if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
149131 -           htons(key->tp_range.tp_max.src) <=
149132 -           htons(key->tp_range.tp_min.src)) {
149133 +           ntohs(key->tp_range.tp_max.src) <=
149134 +           ntohs(key->tp_range.tp_min.src)) {
149135                 NL_SET_ERR_MSG_ATTR(extack,
149136                                     tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
149137                                     "Invalid source port range (min must be strictly smaller than max)");
149138 diff --git a/net/sched/sch_frag.c b/net/sched/sch_frag.c
149139 index e1e77d3fb6c0..8c06381391d6 100644
149140 --- a/net/sched/sch_frag.c
149141 +++ b/net/sched/sch_frag.c
149142 @@ -90,16 +90,16 @@ static int sch_fragment(struct net *net, struct sk_buff *skb,
149143         }
149145         if (skb_protocol(skb, true) == htons(ETH_P_IP)) {
149146 -               struct dst_entry sch_frag_dst;
149147 +               struct rtable sch_frag_rt = { 0 };
149148                 unsigned long orig_dst;
149150                 sch_frag_prepare_frag(skb, xmit);
149151 -               dst_init(&sch_frag_dst, &sch_frag_dst_ops, NULL, 1,
149152 +               dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL, 1,
149153                          DST_OBSOLETE_NONE, DST_NOCOUNT);
149154 -               sch_frag_dst.dev = skb->dev;
149155 +               sch_frag_rt.dst.dev = skb->dev;
149157                 orig_dst = skb->_skb_refdst;
149158 -               skb_dst_set_noref(skb, &sch_frag_dst);
149159 +               skb_dst_set_noref(skb, &sch_frag_rt.dst);
149160                 IPCB(skb)->frag_max_size = mru;
149162                 ret = ip_do_fragment(net, skb->sk, skb, sch_frag_xmit);
149163 diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
149164 index 8287894541e3..909c798b7403 100644
149165 --- a/net/sched/sch_taprio.c
149166 +++ b/net/sched/sch_taprio.c
149167 @@ -901,6 +901,12 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
149169                 list_for_each_entry(entry, &new->entries, list)
149170                         cycle = ktime_add_ns(cycle, entry->interval);
149172 +               if (!cycle) {
149173 +                       NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
149174 +                       return -EINVAL;
149175 +               }
149177                 new->cycle_time = cycle;
149178         }
149180 diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
149181 index f77484df097b..da4ce0947c3a 100644
149182 --- a/net/sctp/sm_make_chunk.c
149183 +++ b/net/sctp/sm_make_chunk.c
149184 @@ -3147,7 +3147,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
149185                  * primary.
149186                  */
149187                 if (af->is_any(&addr))
149188 -                       memcpy(&addr.v4, sctp_source(asconf), sizeof(addr));
149189 +                       memcpy(&addr, sctp_source(asconf), sizeof(addr));
149191                 if (security_sctp_bind_connect(asoc->ep->base.sk,
149192                                                SCTP_PARAM_SET_PRIMARY,
149193 diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
149194 index af2b7041fa4e..73bb4c6e9201 100644
149195 --- a/net/sctp/sm_statefuns.c
149196 +++ b/net/sctp/sm_statefuns.c
149197 @@ -1852,20 +1852,35 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
149198                         SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
149199         sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL());
149201 -       repl = sctp_make_cookie_ack(new_asoc, chunk);
149202 +       /* Update the content of current association. */
149203 +       if (sctp_assoc_update((struct sctp_association *)asoc, new_asoc)) {
149204 +               struct sctp_chunk *abort;
149206 +               abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr));
149207 +               if (abort) {
149208 +                       sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
149209 +                       sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
149210 +               }
149211 +               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED));
149212 +               sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
149213 +                               SCTP_PERR(SCTP_ERROR_RSRC_LOW));
149214 +               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
149215 +               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
149216 +               goto nomem;
149217 +       }
149219 +       repl = sctp_make_cookie_ack(asoc, chunk);
149220         if (!repl)
149221                 goto nomem;
149223         /* Report association restart to upper layer. */
149224         ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0,
149225 -                                            new_asoc->c.sinit_num_ostreams,
149226 -                                            new_asoc->c.sinit_max_instreams,
149227 +                                            asoc->c.sinit_num_ostreams,
149228 +                                            asoc->c.sinit_max_instreams,
149229                                              NULL, GFP_ATOMIC);
149230         if (!ev)
149231                 goto nomem_ev;
149233 -       /* Update the content of current association. */
149234 -       sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
149235         sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
149236         if ((sctp_state(asoc, SHUTDOWN_PENDING) ||
149237              sctp_state(asoc, SHUTDOWN_SENT)) &&
149238 @@ -1929,7 +1944,8 @@ static enum sctp_disposition sctp_sf_do_dupcook_b(
149239         sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
149240         sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
149241                         SCTP_STATE(SCTP_STATE_ESTABLISHED));
149242 -       SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
149243 +       if (asoc->state < SCTP_STATE_ESTABLISHED)
149244 +               SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
149245         sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
149247         repl = sctp_make_cookie_ack(new_asoc, chunk);
149248 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
149249 index b9b3d899a611..4ae428f2f2c5 100644
149250 --- a/net/sctp/socket.c
149251 +++ b/net/sctp/socket.c
149252 @@ -357,6 +357,18 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
149253         return af;
149256 +static void sctp_auto_asconf_init(struct sctp_sock *sp)
149258 +       struct net *net = sock_net(&sp->inet.sk);
149260 +       if (net->sctp.default_auto_asconf) {
149261 +               spin_lock(&net->sctp.addr_wq_lock);
149262 +               list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist);
149263 +               spin_unlock(&net->sctp.addr_wq_lock);
149264 +               sp->do_auto_asconf = 1;
149265 +       }
149268  /* Bind a local address either to an endpoint or to an association.  */
149269  static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
149271 @@ -418,8 +430,10 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
149272                 return -EADDRINUSE;
149274         /* Refresh ephemeral port.  */
149275 -       if (!bp->port)
149276 +       if (!bp->port) {
149277                 bp->port = inet_sk(sk)->inet_num;
149278 +               sctp_auto_asconf_init(sp);
149279 +       }
149281         /* Add the address to the bind address list.
149282          * Use GFP_ATOMIC since BHs will be disabled.
149283 @@ -1520,9 +1534,11 @@ static void sctp_close(struct sock *sk, long timeout)
149285         /* Supposedly, no process has access to the socket, but
149286          * the net layers still may.
149287 +        * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
149288 +        * held and that should be grabbed before socket lock.
149289          */
149290 -       local_bh_disable();
149291 -       bh_lock_sock(sk);
149292 +       spin_lock_bh(&net->sctp.addr_wq_lock);
149293 +       bh_lock_sock_nested(sk);
149295         /* Hold the sock, since sk_common_release() will put sock_put()
149296          * and we have just a little more cleanup.
149297 @@ -1531,7 +1547,7 @@ static void sctp_close(struct sock *sk, long timeout)
149298         sk_common_release(sk);
149300         bh_unlock_sock(sk);
149301 -       local_bh_enable();
149302 +       spin_unlock_bh(&net->sctp.addr_wq_lock);
149304         sock_put(sk);
149306 @@ -4991,16 +5007,6 @@ static int sctp_init_sock(struct sock *sk)
149307         sk_sockets_allocated_inc(sk);
149308         sock_prot_inuse_add(net, sk->sk_prot, 1);
149310 -       if (net->sctp.default_auto_asconf) {
149311 -               spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
149312 -               list_add_tail(&sp->auto_asconf_list,
149313 -                   &net->sctp.auto_asconf_splist);
149314 -               sp->do_auto_asconf = 1;
149315 -               spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
149316 -       } else {
149317 -               sp->do_auto_asconf = 0;
149318 -       }
149320         local_bh_enable();
149322         return 0;
149323 @@ -5025,9 +5031,7 @@ static void sctp_destroy_sock(struct sock *sk)
149325         if (sp->do_auto_asconf) {
149326                 sp->do_auto_asconf = 0;
149327 -               spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
149328                 list_del(&sp->auto_asconf_list);
149329 -               spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
149330         }
149331         sctp_endpoint_free(sp->ep);
149332         local_bh_disable();
149333 @@ -9398,6 +9402,8 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
149334                         return err;
149335         }
149337 +       sctp_auto_asconf_init(newsp);
149339         /* Move any messages in the old socket's receive queue that are for the
149340          * peeled off association to the new socket's receive queue.
149341          */
149342 diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
149343 index 47340b3b514f..cb23cca72c24 100644
149344 --- a/net/smc/af_smc.c
149345 +++ b/net/smc/af_smc.c
149346 @@ -2162,6 +2162,9 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
149347         struct smc_sock *smc;
149348         int val, rc;
149350 +       if (level == SOL_TCP && optname == TCP_ULP)
149351 +               return -EOPNOTSUPP;
149353         smc = smc_sk(sk);
149355         /* generic setsockopts reaching us here always apply to the
149356 @@ -2186,7 +2189,6 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
149357         if (rc || smc->use_fallback)
149358                 goto out;
149359         switch (optname) {
149360 -       case TCP_ULP:
149361         case TCP_FASTOPEN:
149362         case TCP_FASTOPEN_CONNECT:
149363         case TCP_FASTOPEN_KEY:
149364 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
149365 index 612f0a641f4c..f555d335e910 100644
149366 --- a/net/sunrpc/clnt.c
149367 +++ b/net/sunrpc/clnt.c
149368 @@ -1799,7 +1799,6 @@ call_allocate(struct rpc_task *task)
149370         status = xprt->ops->buf_alloc(task);
149371         trace_rpc_buf_alloc(task, status);
149372 -       xprt_inject_disconnect(xprt);
149373         if (status == 0)
149374                 return;
149375         if (status != -ENOMEM) {
149376 @@ -2457,12 +2456,6 @@ call_decode(struct rpc_task *task)
149377                 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
149378         }
149380 -       /*
149381 -        * Ensure that we see all writes made by xprt_complete_rqst()
149382 -        * before it changed req->rq_reply_bytes_recvd.
149383 -        */
149384 -       smp_rmb();
149386         /*
149387          * Did we ever call xprt_complete_rqst()? If not, we should assume
149388          * the message is incomplete.
149389 @@ -2471,6 +2464,11 @@ call_decode(struct rpc_task *task)
149390         if (!req->rq_reply_bytes_recvd)
149391                 goto out;
149393 +       /* Ensure that we see all writes made by xprt_complete_rqst()
149394 +        * before it changed req->rq_reply_bytes_recvd.
149395 +        */
149396 +       smp_rmb();
149398         req->rq_rcv_buf.len = req->rq_private_buf.len;
149399         trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf);
149401 diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
149402 index d76dc9d95d16..0de918cb3d90 100644
149403 --- a/net/sunrpc/svc.c
149404 +++ b/net/sunrpc/svc.c
149405 @@ -846,7 +846,8 @@ void
149406  svc_rqst_free(struct svc_rqst *rqstp)
149408         svc_release_buffer(rqstp);
149409 -       put_page(rqstp->rq_scratch_page);
149410 +       if (rqstp->rq_scratch_page)
149411 +               put_page(rqstp->rq_scratch_page);
149412         kfree(rqstp->rq_resp);
149413         kfree(rqstp->rq_argp);
149414         kfree(rqstp->rq_auth_data);
149415 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
149416 index 2e2f007dfc9f..7cde41a936a4 100644
149417 --- a/net/sunrpc/svcsock.c
149418 +++ b/net/sunrpc/svcsock.c
149419 @@ -1171,7 +1171,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
149420         tcp_sock_set_cork(svsk->sk_sk, true);
149421         err = svc_tcp_sendmsg(svsk->sk_sock, xdr, marker, &sent);
149422         xdr_free_bvec(xdr);
149423 -       trace_svcsock_tcp_send(xprt, err < 0 ? err : sent);
149424 +       trace_svcsock_tcp_send(xprt, err < 0 ? (long)err : sent);
149425         if (err < 0 || sent != (xdr->len + sizeof(marker)))
149426                 goto out_close;
149427         if (atomic_dec_and_test(&svsk->sk_sendqlen))
149428 diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
149429 index 691ccf8049a4..20fe31b1b776 100644
149430 --- a/net/sunrpc/xprt.c
149431 +++ b/net/sunrpc/xprt.c
149432 @@ -698,9 +698,9 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
149433         const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
149434         int status = 0;
149436 -       if (time_before(jiffies, req->rq_minortimeo))
149437 -               return status;
149438         if (time_before(jiffies, req->rq_majortimeo)) {
149439 +               if (time_before(jiffies, req->rq_minortimeo))
149440 +                       return status;
149441                 if (to->to_exponential)
149442                         req->rq_timeout <<= 1;
149443                 else
149444 @@ -1469,8 +1469,6 @@ bool xprt_prepare_transmit(struct rpc_task *task)
149445         struct rpc_xprt *xprt = req->rq_xprt;
149447         if (!xprt_lock_write(xprt, task)) {
149448 -               trace_xprt_transmit_queued(xprt, task);
149450                 /* Race breaker: someone may have transmitted us */
149451                 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
149452                         rpc_wake_up_queued_task_set_status(&xprt->sending,
149453 @@ -1483,7 +1481,10 @@ bool xprt_prepare_transmit(struct rpc_task *task)
149455  void xprt_end_transmit(struct rpc_task *task)
149457 -       xprt_release_write(task->tk_rqstp->rq_xprt, task);
149458 +       struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
149460 +       xprt_inject_disconnect(xprt);
149461 +       xprt_release_write(xprt, task);
149464  /**
149465 @@ -1885,7 +1886,6 @@ void xprt_release(struct rpc_task *task)
149466         spin_unlock(&xprt->transport_lock);
149467         if (req->rq_buffer)
149468                 xprt->ops->buf_free(task);
149469 -       xprt_inject_disconnect(xprt);
149470         xdr_free_bvec(&req->rq_rcv_buf);
149471         xdr_free_bvec(&req->rq_snd_buf);
149472         if (req->rq_cred != NULL)
149473 diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
149474 index 766a1048a48a..aca2228095db 100644
149475 --- a/net/sunrpc/xprtrdma/frwr_ops.c
149476 +++ b/net/sunrpc/xprtrdma/frwr_ops.c
149477 @@ -257,6 +257,7 @@ int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device)
149478         ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
149479         ep->re_attr.cap.max_recv_wr = ep->re_max_requests;
149480         ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
149481 +       ep->re_attr.cap.max_recv_wr += RPCRDMA_MAX_RECV_BATCH;
149482         ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
149484         ep->re_max_rdma_segs =
149485 @@ -575,7 +576,6 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
149486                 mr = container_of(frwr, struct rpcrdma_mr, frwr);
149487                 bad_wr = bad_wr->next;
149489 -               list_del_init(&mr->mr_list);
149490                 frwr_mr_recycle(mr);
149491         }
149493 diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
149494 index 292f066d006e..21ddd78a8c35 100644
149495 --- a/net/sunrpc/xprtrdma/rpc_rdma.c
149496 +++ b/net/sunrpc/xprtrdma/rpc_rdma.c
149497 @@ -1430,9 +1430,10 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
149498                 credits = 1;    /* don't deadlock */
149499         else if (credits > r_xprt->rx_ep->re_max_requests)
149500                 credits = r_xprt->rx_ep->re_max_requests;
149501 +       rpcrdma_post_recvs(r_xprt, credits + (buf->rb_bc_srv_max_requests << 1),
149502 +                          false);
149503         if (buf->rb_credits != credits)
149504                 rpcrdma_update_cwnd(r_xprt, credits);
149505 -       rpcrdma_post_recvs(r_xprt, false);
149507         req = rpcr_to_rdmar(rqst);
149508         if (unlikely(req->rl_reply))
149509 diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
149510 index 78d29d1bcc20..09953597d055 100644
149511 --- a/net/sunrpc/xprtrdma/transport.c
149512 +++ b/net/sunrpc/xprtrdma/transport.c
149513 @@ -262,8 +262,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
149514   * xprt_rdma_inject_disconnect - inject a connection fault
149515   * @xprt: transport context
149516   *
149517 - * If @xprt is connected, disconnect it to simulate spurious connection
149518 - * loss.
149519 + * If @xprt is connected, disconnect it to simulate spurious
149520 + * connection loss. Caller must hold @xprt's send lock to
149521 + * ensure that data structures and hardware resources are
149522 + * stable during the rdma_disconnect() call.
149523   */
149524  static void
149525  xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
149526 diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
149527 index ec912cf9c618..f3fffc74ab0f 100644
149528 --- a/net/sunrpc/xprtrdma/verbs.c
149529 +++ b/net/sunrpc/xprtrdma/verbs.c
149530 @@ -535,7 +535,7 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt)
149531          * outstanding Receives.
149532          */
149533         rpcrdma_ep_get(ep);
149534 -       rpcrdma_post_recvs(r_xprt, true);
149535 +       rpcrdma_post_recvs(r_xprt, 1, true);
149537         rc = rdma_connect(ep->re_id, &ep->re_remote_cma);
149538         if (rc)
149539 @@ -1364,21 +1364,21 @@ int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
149540  /**
149541   * rpcrdma_post_recvs - Refill the Receive Queue
149542   * @r_xprt: controlling transport instance
149543 - * @temp: mark Receive buffers to be deleted after use
149544 + * @needed: current credit grant
149545 + * @temp: mark Receive buffers to be deleted after one use
149546   *
149547   */
149548 -void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
149549 +void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
149551         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
149552         struct rpcrdma_ep *ep = r_xprt->rx_ep;
149553         struct ib_recv_wr *wr, *bad_wr;
149554         struct rpcrdma_rep *rep;
149555 -       int needed, count, rc;
149556 +       int count, rc;
149558         rc = 0;
149559         count = 0;
149561 -       needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
149562         if (likely(ep->re_receive_count > needed))
149563                 goto out;
149564         needed -= ep->re_receive_count;
149565 diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
149566 index fe3be985e239..28af11fbe643 100644
149567 --- a/net/sunrpc/xprtrdma/xprt_rdma.h
149568 +++ b/net/sunrpc/xprtrdma/xprt_rdma.h
149569 @@ -461,7 +461,7 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt);
149570  void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt);
149572  int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
149573 -void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
149574 +void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp);
149577   * Buffer calls - xprtrdma/verbs.c
149578 diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
149579 index 97710ce36047..c89ce47c56cf 100644
149580 --- a/net/tipc/crypto.c
149581 +++ b/net/tipc/crypto.c
149582 @@ -1492,6 +1492,8 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
149583         /* Allocate statistic structure */
149584         c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
149585         if (!c->stats) {
149586 +               if (c->wq)
149587 +                       destroy_workqueue(c->wq);
149588                 kfree_sensitive(c);
149589                 return -ENOMEM;
149590         }
149591 diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
149592 index 5a1ce64039f7..0749df80454d 100644
149593 --- a/net/tipc/netlink_compat.c
149594 +++ b/net/tipc/netlink_compat.c
149595 @@ -696,7 +696,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
149596         if (err)
149597                 return err;
149599 -       link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
149600 +       link_info.dest = htonl(nla_get_flag(link[TIPC_NLA_LINK_DEST]));
149601         link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
149602         nla_strscpy(link_info.str, link[TIPC_NLA_LINK_NAME],
149603                     TIPC_MAX_LINK_NAME);
149604 diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
149605 index e4370b1b7494..902cb6dd710b 100644
149606 --- a/net/vmw_vsock/virtio_transport_common.c
149607 +++ b/net/vmw_vsock/virtio_transport_common.c
149608 @@ -733,6 +733,23 @@ static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
149609         return t->send_pkt(reply);
149612 +/* This function should be called with sk_lock held and SOCK_DONE set */
149613 +static void virtio_transport_remove_sock(struct vsock_sock *vsk)
149615 +       struct virtio_vsock_sock *vvs = vsk->trans;
149616 +       struct virtio_vsock_pkt *pkt, *tmp;
149618 +       /* We don't need to take rx_lock, as the socket is closing and we are
149619 +        * removing it.
149620 +        */
149621 +       list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
149622 +               list_del(&pkt->list);
149623 +               virtio_transport_free_pkt(pkt);
149624 +       }
149626 +       vsock_remove_sock(vsk);
149629  static void virtio_transport_wait_close(struct sock *sk, long timeout)
149631         if (timeout) {
149632 @@ -765,7 +782,7 @@ static void virtio_transport_do_close(struct vsock_sock *vsk,
149633             (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
149634                 vsk->close_work_scheduled = false;
149636 -               vsock_remove_sock(vsk);
149637 +               virtio_transport_remove_sock(vsk);
149639                 /* Release refcnt obtained when we scheduled the timeout */
149640                 sock_put(sk);
149641 @@ -828,22 +845,15 @@ static bool virtio_transport_close(struct vsock_sock *vsk)
149643  void virtio_transport_release(struct vsock_sock *vsk)
149645 -       struct virtio_vsock_sock *vvs = vsk->trans;
149646 -       struct virtio_vsock_pkt *pkt, *tmp;
149647         struct sock *sk = &vsk->sk;
149648         bool remove_sock = true;
149650         if (sk->sk_type == SOCK_STREAM)
149651                 remove_sock = virtio_transport_close(vsk);
149653 -       list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
149654 -               list_del(&pkt->list);
149655 -               virtio_transport_free_pkt(pkt);
149656 -       }
149658         if (remove_sock) {
149659                 sock_set_flag(sk, SOCK_DONE);
149660 -               vsock_remove_sock(vsk);
149661 +               virtio_transport_remove_sock(vsk);
149662         }
149664  EXPORT_SYMBOL_GPL(virtio_transport_release);
149665 diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
149666 index 8b65323207db..1c9ecb18b8e6 100644
149667 --- a/net/vmw_vsock/vmci_transport.c
149668 +++ b/net/vmw_vsock/vmci_transport.c
149669 @@ -568,8 +568,7 @@ vmci_transport_queue_pair_alloc(struct vmci_qp **qpair,
149670                                peer, flags, VMCI_NO_PRIVILEGE_FLAGS);
149671  out:
149672         if (err < 0) {
149673 -               pr_err("Could not attach to queue pair with %d\n",
149674 -                      err);
149675 +               pr_err_once("Could not attach to queue pair with %d\n", err);
149676                 err = vmci_transport_error_to_vsock_error(err);
149677         }
149679 diff --git a/net/wireless/core.c b/net/wireless/core.c
149680 index a2785379df6e..589ee5a69a2e 100644
149681 --- a/net/wireless/core.c
149682 +++ b/net/wireless/core.c
149683 @@ -332,14 +332,29 @@ static void cfg80211_event_work(struct work_struct *work)
149684  void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
149686         struct wireless_dev *wdev, *tmp;
149687 +       bool found = false;
149689         ASSERT_RTNL();
149690 -       lockdep_assert_wiphy(&rdev->wiphy);
149692 +       list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
149693 +               if (wdev->nl_owner_dead) {
149694 +                       if (wdev->netdev)
149695 +                               dev_close(wdev->netdev);
149696 +                       found = true;
149697 +               }
149698 +       }
149700 +       if (!found)
149701 +               return;
149703 +       wiphy_lock(&rdev->wiphy);
149704         list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) {
149705 -               if (wdev->nl_owner_dead)
149706 +               if (wdev->nl_owner_dead) {
149707 +                       cfg80211_leave(rdev, wdev);
149708                         rdev_del_virtual_intf(rdev, wdev);
149709 +               }
149710         }
149711 +       wiphy_unlock(&rdev->wiphy);
149714  static void cfg80211_destroy_iface_wk(struct work_struct *work)
149715 @@ -350,9 +365,7 @@ static void cfg80211_destroy_iface_wk(struct work_struct *work)
149716                             destroy_work);
149718         rtnl_lock();
149719 -       wiphy_lock(&rdev->wiphy);
149720         cfg80211_destroy_ifaces(rdev);
149721 -       wiphy_unlock(&rdev->wiphy);
149722         rtnl_unlock();
149725 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
149726 index b1df42e4f1eb..a5224da63832 100644
149727 --- a/net/wireless/nl80211.c
149728 +++ b/net/wireless/nl80211.c
149729 @@ -3929,7 +3929,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
149730         return err;
149733 -static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
149734 +static int _nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
149736         struct cfg80211_registered_device *rdev = info->user_ptr[0];
149737         struct vif_params params;
149738 @@ -3938,9 +3938,6 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
149739         int err;
149740         enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
149742 -       /* to avoid failing a new interface creation due to pending removal */
149743 -       cfg80211_destroy_ifaces(rdev);
149745         memset(&params, 0, sizeof(params));
149747         if (!info->attrs[NL80211_ATTR_IFNAME])
149748 @@ -4028,6 +4025,21 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
149749         return genlmsg_reply(msg, info);
149752 +static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
149754 +       struct cfg80211_registered_device *rdev = info->user_ptr[0];
149755 +       int ret;
149757 +       /* to avoid failing a new interface creation due to pending removal */
149758 +       cfg80211_destroy_ifaces(rdev);
149760 +       wiphy_lock(&rdev->wiphy);
149761 +       ret = _nl80211_new_interface(skb, info);
149762 +       wiphy_unlock(&rdev->wiphy);
149764 +       return ret;
149767  static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
149769         struct cfg80211_registered_device *rdev = info->user_ptr[0];
149770 @@ -15040,7 +15052,9 @@ static const struct genl_small_ops nl80211_small_ops[] = {
149771                 .doit = nl80211_new_interface,
149772                 .flags = GENL_UNS_ADMIN_PERM,
149773                 .internal_flags = NL80211_FLAG_NEED_WIPHY |
149774 -                                 NL80211_FLAG_NEED_RTNL,
149775 +                                 NL80211_FLAG_NEED_RTNL |
149776 +                                 /* we take the wiphy mutex later ourselves */
149777 +                                 NL80211_FLAG_NO_WIPHY_MTX,
149778         },
149779         {
149780                 .cmd = NL80211_CMD_DEL_INTERFACE,
149781 diff --git a/net/wireless/scan.c b/net/wireless/scan.c
149782 index 758eb7d2a706..caa8eafbd583 100644
149783 --- a/net/wireless/scan.c
149784 +++ b/net/wireless/scan.c
149785 @@ -1751,6 +1751,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
149787                 if (rdev->bss_entries >= bss_entries_limit &&
149788                     !cfg80211_bss_expire_oldest(rdev)) {
149789 +                       if (!list_empty(&new->hidden_list))
149790 +                               list_del(&new->hidden_list);
149791                         kfree(new);
149792                         goto drop;
149793                 }
149794 diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
149795 index 4faabd1ecfd1..143979ea4165 100644
149796 --- a/net/xdp/xsk.c
149797 +++ b/net/xdp/xsk.c
149798 @@ -454,12 +454,16 @@ static int xsk_generic_xmit(struct sock *sk)
149799         struct sk_buff *skb;
149800         unsigned long flags;
149801         int err = 0;
149802 +       u32 hr, tr;
149804         mutex_lock(&xs->mutex);
149806         if (xs->queue_id >= xs->dev->real_num_tx_queues)
149807                 goto out;
149809 +       hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
149810 +       tr = xs->dev->needed_tailroom;
149812         while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
149813                 char *buffer;
149814                 u64 addr;
149815 @@ -471,11 +475,13 @@ static int xsk_generic_xmit(struct sock *sk)
149816                 }
149818                 len = desc.len;
149819 -               skb = sock_alloc_send_skb(sk, len, 1, &err);
149820 +               skb = sock_alloc_send_skb(sk, hr + len + tr, 1, &err);
149821                 if (unlikely(!skb))
149822                         goto out;
149824 +               skb_reserve(skb, hr);
149825                 skb_put(skb, len);
149827                 addr = desc.addr;
149828                 buffer = xsk_buff_raw_get_data(xs->pool, addr);
149829                 err = skb_store_bits(skb, 0, buffer, len);
149830 diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
149831 index 2823b7c3302d..40f359bf2044 100644
149832 --- a/net/xdp/xsk_queue.h
149833 +++ b/net/xdp/xsk_queue.h
149834 @@ -128,13 +128,12 @@ static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
149835  static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
149836                                             struct xdp_desc *desc)
149838 -       u64 chunk, chunk_end;
149839 +       u64 chunk;
149841 -       chunk = xp_aligned_extract_addr(pool, desc->addr);
149842 -       chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len);
149843 -       if (chunk != chunk_end)
149844 +       if (desc->len > pool->chunk_size)
149845                 return false;
149847 +       chunk = xp_aligned_extract_addr(pool, desc->addr);
149848         if (chunk >= pool->addrs_cnt)
149849                 return false;
149851 diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1_kern.c
149852 index 3f4599c9a202..ef30d2b353b0 100644
149853 --- a/samples/bpf/tracex1_kern.c
149854 +++ b/samples/bpf/tracex1_kern.c
149855 @@ -26,7 +26,7 @@
149856  SEC("kprobe/__netif_receive_skb_core")
149857  int bpf_prog1(struct pt_regs *ctx)
149859 -       /* attaches to kprobe netif_receive_skb,
149860 +       /* attaches to kprobe __netif_receive_skb_core,
149861          * looks for packets on loobpack device and prints them
149862          */
149863         char devname[IFNAMSIZ];
149864 @@ -35,7 +35,7 @@ int bpf_prog1(struct pt_regs *ctx)
149865         int len;
149867         /* non-portable! works for the given kernel only */
149868 -       skb = (struct sk_buff *) PT_REGS_PARM1(ctx);
149869 +       bpf_probe_read_kernel(&skb, sizeof(skb), (void *)PT_REGS_PARM1(ctx));
149870         dev = _(skb->dev);
149871         len = _(skb->len);
149873 diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c
149874 index c406f03ee551..5a90aa527877 100644
149875 --- a/samples/kfifo/bytestream-example.c
149876 +++ b/samples/kfifo/bytestream-example.c
149877 @@ -122,8 +122,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
149878         ret = kfifo_from_user(&test, buf, count, &copied);
149880         mutex_unlock(&write_lock);
149881 +       if (ret)
149882 +               return ret;
149884 -       return ret ? ret : copied;
149885 +       return copied;
149888  static ssize_t fifo_read(struct file *file, char __user *buf,
149889 @@ -138,8 +140,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
149890         ret = kfifo_to_user(&test, buf, count, &copied);
149892         mutex_unlock(&read_lock);
149893 +       if (ret)
149894 +               return ret;
149896 -       return ret ? ret : copied;
149897 +       return copied;
149900  static const struct proc_ops fifo_proc_ops = {
149901 diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c
149902 index 78977fc4a23f..e5403d8c971a 100644
149903 --- a/samples/kfifo/inttype-example.c
149904 +++ b/samples/kfifo/inttype-example.c
149905 @@ -115,8 +115,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
149906         ret = kfifo_from_user(&test, buf, count, &copied);
149908         mutex_unlock(&write_lock);
149909 +       if (ret)
149910 +               return ret;
149912 -       return ret ? ret : copied;
149913 +       return copied;
149916  static ssize_t fifo_read(struct file *file, char __user *buf,
149917 @@ -131,8 +133,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
149918         ret = kfifo_to_user(&test, buf, count, &copied);
149920         mutex_unlock(&read_lock);
149921 +       if (ret)
149922 +               return ret;
149924 -       return ret ? ret : copied;
149925 +       return copied;
149928  static const struct proc_ops fifo_proc_ops = {
149929 diff --git a/samples/kfifo/record-example.c b/samples/kfifo/record-example.c
149930 index c507998a2617..f64f3d62d6c2 100644
149931 --- a/samples/kfifo/record-example.c
149932 +++ b/samples/kfifo/record-example.c
149933 @@ -129,8 +129,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
149934         ret = kfifo_from_user(&test, buf, count, &copied);
149936         mutex_unlock(&write_lock);
149937 +       if (ret)
149938 +               return ret;
149940 -       return ret ? ret : copied;
149941 +       return copied;
149944  static ssize_t fifo_read(struct file *file, char __user *buf,
149945 @@ -145,8 +147,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
149946         ret = kfifo_to_user(&test, buf, count, &copied);
149948         mutex_unlock(&read_lock);
149949 +       if (ret)
149950 +               return ret;
149952 -       return ret ? ret : copied;
149953 +       return copied;
149956  static const struct proc_ops fifo_proc_ops = {
149957 diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
149958 index 066beffca09a..4ca5579af4e4 100644
149959 --- a/scripts/Makefile.modpost
149960 +++ b/scripts/Makefile.modpost
149961 @@ -68,7 +68,20 @@ else
149962  ifeq ($(KBUILD_EXTMOD),)
149964  input-symdump := vmlinux.symvers
149965 -output-symdump := Module.symvers
149966 +output-symdump := modules-only.symvers
149968 +quiet_cmd_cat = GEN     $@
149969 +      cmd_cat = cat $(real-prereqs) > $@
149971 +ifneq ($(wildcard vmlinux.symvers),)
149973 +__modpost: Module.symvers
149974 +Module.symvers: vmlinux.symvers modules-only.symvers FORCE
149975 +       $(call if_changed,cat)
149977 +targets += Module.symvers
149979 +endif
149981  else
149983 diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
149984 index e0f965529166..af814b39b876 100644
149985 --- a/scripts/kconfig/nconf.c
149986 +++ b/scripts/kconfig/nconf.c
149987 @@ -504,8 +504,8 @@ static int get_mext_match(const char *match_str, match_f flag)
149988         else if (flag == FIND_NEXT_MATCH_UP)
149989                 --match_start;
149991 +       match_start = (match_start + items_num) % items_num;
149992         index = match_start;
149993 -       index = (index + items_num) % items_num;
149994         while (true) {
149995                 char *str = k_menu_items[index].str;
149996                 if (strcasestr(str, match_str) != NULL)
149997 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
149998 index 24725e50c7b4..10c3fba26f03 100644
149999 --- a/scripts/mod/modpost.c
150000 +++ b/scripts/mod/modpost.c
150001 @@ -2423,19 +2423,6 @@ static void read_dump(const char *fname)
150002         fatal("parse error in symbol dump file\n");
150005 -/* For normal builds always dump all symbols.
150006 - * For external modules only dump symbols
150007 - * that are not read from kernel Module.symvers.
150008 - **/
150009 -static int dump_sym(struct symbol *sym)
150011 -       if (!external_module)
150012 -               return 1;
150013 -       if (sym->module->from_dump)
150014 -               return 0;
150015 -       return 1;
150018  static void write_dump(const char *fname)
150020         struct buffer buf = { };
150021 @@ -2446,7 +2433,7 @@ static void write_dump(const char *fname)
150022         for (n = 0; n < SYMBOL_HASH_SIZE ; n++) {
150023                 symbol = symbolhash[n];
150024                 while (symbol) {
150025 -                       if (dump_sym(symbol)) {
150026 +                       if (!symbol->module->from_dump) {
150027                                 namespace = symbol->namespace;
150028                                 buf_printf(&buf, "0x%08x\t%s\t%s\t%s\t%s\n",
150029                                            symbol->crc, symbol->name,
150030 diff --git a/scripts/setlocalversion b/scripts/setlocalversion
150031 index bb709eda96cd..cf323fa660b6 100755
150032 --- a/scripts/setlocalversion
150033 +++ b/scripts/setlocalversion
150034 @@ -54,7 +54,7 @@ scm_version()
150035                         # If only the short version is requested, don't bother
150036                         # running further git commands
150037                         if $short; then
150038 -                               echo "+"
150039 +                       #       echo "+"
150040                                 return
150041                         fi
150042                         # If we are past a tagged commit (like
150043 diff --git a/security/commoncap.c b/security/commoncap.c
150044 index 1c519c875217..5cdeb73ca8fa 100644
150045 --- a/security/commoncap.c
150046 +++ b/security/commoncap.c
150047 @@ -400,7 +400,7 @@ int cap_inode_getsecurity(struct user_namespace *mnt_userns,
150048                                       &tmpbuf, size, GFP_NOFS);
150049         dput(dentry);
150051 -       if (ret < 0)
150052 +       if (ret < 0 || !tmpbuf)
150053                 return ret;
150055         fs_ns = inode->i_sb->s_user_ns;
150056 diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
150057 index e22e510ae92d..4e081e650047 100644
150058 --- a/security/integrity/ima/ima_template.c
150059 +++ b/security/integrity/ima/ima_template.c
150060 @@ -494,8 +494,8 @@ int ima_restore_measurement_list(loff_t size, void *buf)
150061                         }
150062                 }
150064 -               entry->pcr = !ima_canonical_fmt ? *(hdr[HDR_PCR].data) :
150065 -                            le32_to_cpu(*(hdr[HDR_PCR].data));
150066 +               entry->pcr = !ima_canonical_fmt ? *(u32 *)(hdr[HDR_PCR].data) :
150067 +                            le32_to_cpu(*(u32 *)(hdr[HDR_PCR].data));
150068                 ret = ima_restore_measurement_entry(entry);
150069                 if (ret < 0)
150070                         break;
150071 diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c
150072 index 493eb91ed017..56c9b48460d9 100644
150073 --- a/security/keys/trusted-keys/trusted_tpm1.c
150074 +++ b/security/keys/trusted-keys/trusted_tpm1.c
150075 @@ -500,10 +500,12 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
150077         ret = tpm_get_random(chip, td->nonceodd, TPM_NONCE_SIZE);
150078         if (ret < 0)
150079 -               return ret;
150080 +               goto out;
150082 -       if (ret != TPM_NONCE_SIZE)
150083 -               return -EIO;
150084 +       if (ret != TPM_NONCE_SIZE) {
150085 +               ret = -EIO;
150086 +               goto out;
150087 +       }
150089         ordinal = htonl(TPM_ORD_SEAL);
150090         datsize = htonl(datalen);
150091 @@ -791,13 +793,33 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
150092                                 return -EINVAL;
150093                         break;
150094                 case Opt_blobauth:
150095 -                       if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
150096 -                               return -EINVAL;
150097 -                       res = hex2bin(opt->blobauth, args[0].from,
150098 -                                     SHA1_DIGEST_SIZE);
150099 -                       if (res < 0)
150100 -                               return -EINVAL;
150101 +                       /*
150102 +                        * TPM 1.2 authorizations are sha1 hashes passed in as
150103 +                        * hex strings.  TPM 2.0 authorizations are simple
150104 +                        * passwords (although it can take a hash as well)
150105 +                        */
150106 +                       opt->blobauth_len = strlen(args[0].from);
150108 +                       if (opt->blobauth_len == 2 * TPM_DIGEST_SIZE) {
150109 +                               res = hex2bin(opt->blobauth, args[0].from,
150110 +                                             TPM_DIGEST_SIZE);
150111 +                               if (res < 0)
150112 +                                       return -EINVAL;
150114 +                               opt->blobauth_len = TPM_DIGEST_SIZE;
150115 +                               break;
150116 +                       }
150118 +                       if (tpm2 && opt->blobauth_len <= sizeof(opt->blobauth)) {
150119 +                               memcpy(opt->blobauth, args[0].from,
150120 +                                      opt->blobauth_len);
150121 +                               break;
150122 +                       }
150124 +                       return -EINVAL;
150126                         break;
150128                 case Opt_migratable:
150129                         if (*args[0].from == '0')
150130                                 pay->migratable = 0;
150131 diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
150132 index c87c4df8703d..4c19d3abddbe 100644
150133 --- a/security/keys/trusted-keys/trusted_tpm2.c
150134 +++ b/security/keys/trusted-keys/trusted_tpm2.c
150135 @@ -97,10 +97,12 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
150136                              TPM_DIGEST_SIZE);
150138         /* sensitive */
150139 -       tpm_buf_append_u16(&buf, 4 + TPM_DIGEST_SIZE + payload->key_len + 1);
150140 +       tpm_buf_append_u16(&buf, 4 + options->blobauth_len + payload->key_len + 1);
150142 +       tpm_buf_append_u16(&buf, options->blobauth_len);
150143 +       if (options->blobauth_len)
150144 +               tpm_buf_append(&buf, options->blobauth, options->blobauth_len);
150146 -       tpm_buf_append_u16(&buf, TPM_DIGEST_SIZE);
150147 -       tpm_buf_append(&buf, options->blobauth, TPM_DIGEST_SIZE);
150148         tpm_buf_append_u16(&buf, payload->key_len + 1);
150149         tpm_buf_append(&buf, payload->key, payload->key_len);
150150         tpm_buf_append_u8(&buf, payload->migratable);
150151 @@ -265,7 +267,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
150152                              NULL /* nonce */, 0,
150153                              TPM2_SA_CONTINUE_SESSION,
150154                              options->blobauth /* hmac */,
150155 -                            TPM_DIGEST_SIZE);
150156 +                            options->blobauth_len);
150158         rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
150159         if (rc > 0)
150160 diff --git a/security/security.c b/security/security.c
150161 index 5ac96b16f8fa..8ef0ce0faba7 100644
150162 --- a/security/security.c
150163 +++ b/security/security.c
150164 @@ -727,24 +727,28 @@ int security_binder_set_context_mgr(struct task_struct *mgr)
150166         return call_int_hook(binder_set_context_mgr, 0, mgr);
150168 +EXPORT_SYMBOL(security_binder_set_context_mgr);
150170  int security_binder_transaction(struct task_struct *from,
150171                                 struct task_struct *to)
150173         return call_int_hook(binder_transaction, 0, from, to);
150175 +EXPORT_SYMBOL(security_binder_transaction);
150177  int security_binder_transfer_binder(struct task_struct *from,
150178                                     struct task_struct *to)
150180         return call_int_hook(binder_transfer_binder, 0, from, to);
150182 +EXPORT_SYMBOL(security_binder_transfer_binder);
150184  int security_binder_transfer_file(struct task_struct *from,
150185                                   struct task_struct *to, struct file *file)
150187         return call_int_hook(binder_transfer_file, 0, from, to, file);
150189 +EXPORT_SYMBOL(security_binder_transfer_file);
150191  int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
150193 diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
150194 index ba2e01a6955c..62d19bccf3de 100644
150195 --- a/security/selinux/include/classmap.h
150196 +++ b/security/selinux/include/classmap.h
150197 @@ -242,11 +242,12 @@ struct security_class_mapping secclass_map[] = {
150198         { "infiniband_endport",
150199           { "manage_subnet", NULL } },
150200         { "bpf",
150201 -         {"map_create", "map_read", "map_write", "prog_load", "prog_run"} },
150202 +         { "map_create", "map_read", "map_write", "prog_load", "prog_run",
150203 +           NULL } },
150204         { "xdp_socket",
150205           { COMMON_SOCK_PERMS, NULL } },
150206         { "perf_event",
150207 -         {"open", "cpu", "kernel", "tracepoint", "read", "write"} },
150208 +         { "open", "cpu", "kernel", "tracepoint", "read", "write", NULL } },
150209         { "lockdown",
150210           { "integrity", "confidentiality", NULL } },
150211         { "anon_inode",
150212 diff --git a/sound/core/init.c b/sound/core/init.c
150213 index 45f4b01de23f..ef41f5b3a240 100644
150214 --- a/sound/core/init.c
150215 +++ b/sound/core/init.c
150216 @@ -398,10 +398,8 @@ int snd_card_disconnect(struct snd_card *card)
150217                 return 0;
150218         }
150219         card->shutdown = 1;
150220 -       spin_unlock(&card->files_lock);
150222         /* replace file->f_op with special dummy operations */
150223 -       spin_lock(&card->files_lock);
150224         list_for_each_entry(mfile, &card->files_list, list) {
150225                 /* it's critical part, use endless loop */
150226                 /* we have no room to fail */
150227 diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
150228 index bbae04793c50..c18017e0a3d9 100644
150229 --- a/sound/firewire/bebob/bebob_stream.c
150230 +++ b/sound/firewire/bebob/bebob_stream.c
150231 @@ -517,20 +517,22 @@ int snd_bebob_stream_init_duplex(struct snd_bebob *bebob)
150232  static int keep_resources(struct snd_bebob *bebob, struct amdtp_stream *stream,
150233                           unsigned int rate, unsigned int index)
150235 -       struct snd_bebob_stream_formation *formation;
150236 +       unsigned int pcm_channels;
150237 +       unsigned int midi_ports;
150238         struct cmp_connection *conn;
150239         int err;
150241         if (stream == &bebob->tx_stream) {
150242 -               formation = bebob->tx_stream_formations + index;
150243 +               pcm_channels = bebob->tx_stream_formations[index].pcm;
150244 +               midi_ports = bebob->midi_input_ports;
150245                 conn = &bebob->out_conn;
150246         } else {
150247 -               formation = bebob->rx_stream_formations + index;
150248 +               pcm_channels = bebob->rx_stream_formations[index].pcm;
150249 +               midi_ports = bebob->midi_output_ports;
150250                 conn = &bebob->in_conn;
150251         }
150253 -       err = amdtp_am824_set_parameters(stream, rate, formation->pcm,
150254 -                                        formation->midi, false);
150255 +       err = amdtp_am824_set_parameters(stream, rate, pcm_channels, midi_ports, false);
150256         if (err < 0)
150257                 return err;
150259 diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c
150260 index 0aa545ac6e60..1c90421a88dc 100644
150261 --- a/sound/isa/sb/emu8000.c
150262 +++ b/sound/isa/sb/emu8000.c
150263 @@ -1029,8 +1029,10 @@ snd_emu8000_create_mixer(struct snd_card *card, struct snd_emu8000 *emu)
150265         memset(emu->controls, 0, sizeof(emu->controls));
150266         for (i = 0; i < EMU8000_NUM_CONTROLS; i++) {
150267 -               if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0)
150268 +               if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0) {
150269 +                       emu->controls[i] = NULL;
150270                         goto __error;
150271 +               }
150272         }
150273         return 0;
150275 diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
150276 index 8635a2b6b36b..4789345a8fdd 100644
150277 --- a/sound/isa/sb/sb16_csp.c
150278 +++ b/sound/isa/sb/sb16_csp.c
150279 @@ -1045,10 +1045,14 @@ static int snd_sb_qsound_build(struct snd_sb_csp * p)
150281         spin_lock_init(&p->q_lock);
150283 -       if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0)
150284 +       if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0) {
150285 +               p->qsound_switch = NULL;
150286                 goto __error;
150287 -       if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0)
150288 +       }
150289 +       if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0) {
150290 +               p->qsound_space = NULL;
150291                 goto __error;
150292 +       }
150294         return 0;
150296 diff --git a/sound/pci/hda/ideapad_s740_helper.c b/sound/pci/hda/ideapad_s740_helper.c
150297 new file mode 100644
150298 index 000000000000..564b9086e52d
150299 --- /dev/null
150300 +++ b/sound/pci/hda/ideapad_s740_helper.c
150301 @@ -0,0 +1,492 @@
150302 +// SPDX-License-Identifier: GPL-2.0
150303 +/* Fixes for Lenovo Ideapad S740, to be included from codec driver */
150305 +static const struct hda_verb alc285_ideapad_s740_coefs[] = {
150306 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x10 },
150307 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0320 },
150308 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
150309 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0041 },
150310 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
150311 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0041 },
150312 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150313 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150314 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150315 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150316 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150317 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150318 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150319 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150320 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150321 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150322 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150323 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150324 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150325 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150326 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150327 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
150328 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150329 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150330 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150331 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150332 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
150333 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150334 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150335 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150336 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150337 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150338 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150339 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
150340 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150341 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
150342 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150343 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150344 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
150345 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150346 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
150347 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150348 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150349 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150350 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150351 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
150352 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150353 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
150354 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150355 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150356 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
150357 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150358 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
150359 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150360 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150361 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150362 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150363 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
150364 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150365 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
150366 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150367 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150368 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
150369 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150370 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
150371 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150372 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150373 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150374 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150375 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
150376 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150377 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
150378 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150379 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150380 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
150381 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150382 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
150383 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150384 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150385 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150386 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150387 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
150388 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150389 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
150390 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150391 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150392 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
150393 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150394 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
150395 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150396 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150397 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150398 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150399 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
150400 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150401 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
150402 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150403 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150404 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
150405 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150406 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
150407 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150408 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150409 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150410 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150411 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
150412 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150413 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
150414 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150415 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150416 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
150417 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150418 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
150419 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150420 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150421 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150422 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150423 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001d },
150424 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150425 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004e },
150426 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150427 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150428 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001d },
150429 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150430 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004e },
150431 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150432 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150433 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150434 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150435 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
150436 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150437 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
150438 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150439 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150440 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
150441 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150442 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
150443 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150444 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150445 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150446 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150447 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
150448 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150449 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
150450 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150451 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150452 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
150453 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150454 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
150455 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150456 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150457 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150458 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150459 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
150460 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150461 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
150462 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150463 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150464 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
150465 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150466 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
150467 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150468 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150469 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150470 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150471 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
150472 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150473 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
150474 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150475 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150476 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
150477 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150478 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
150479 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150480 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150481 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150482 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150483 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
150484 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150485 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
150486 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150487 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150488 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
150489 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150490 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
150491 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150492 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150493 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150494 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150495 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
150496 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150497 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
150498 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150499 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150500 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
150501 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150502 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
150503 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150504 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150505 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150506 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150507 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
150508 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150509 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
150510 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150511 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150512 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
150513 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150514 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
150515 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150516 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150517 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150518 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150519 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
150520 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150521 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
150522 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150523 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150524 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
150525 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150526 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
150527 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150528 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150529 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150530 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150531 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
150532 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150533 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
150534 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150535 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150536 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
150537 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150538 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
150539 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150540 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150541 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150542 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150543 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
150544 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150545 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150546 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150547 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150548 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
150549 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150550 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150551 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150552 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
150553 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
150554 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
150555 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
150556 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150557 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150558 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150559 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150560 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150561 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150562 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150563 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150564 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150565 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150566 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150567 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150568 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150569 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150570 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150571 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
150572 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150573 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150574 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150575 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150576 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
150577 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150578 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150579 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150580 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150581 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150582 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150583 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
150584 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150585 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
150586 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150587 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150588 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
150589 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150590 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
150591 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150592 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150593 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150594 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150595 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
150596 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150597 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
150598 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150599 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150600 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
150601 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150602 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
150603 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150604 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150605 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150606 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150607 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
150608 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150609 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x002a },
150610 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150611 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150612 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
150613 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150614 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x002a },
150615 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150616 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150617 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150618 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150619 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
150620 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150621 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0046 },
150622 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150623 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150624 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
150625 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150626 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0046 },
150627 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150628 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150629 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150630 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150631 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
150632 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150633 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0044 },
150634 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150635 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150636 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
150637 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150638 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0044 },
150639 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150640 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150641 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150642 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150643 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
150644 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150645 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
150646 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150647 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150648 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
150649 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150650 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
150651 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150652 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150653 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150654 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150655 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
150656 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150657 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
150658 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150659 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150660 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
150661 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150662 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
150663 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150664 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150665 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150666 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150667 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150668 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150669 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
150670 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150671 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
150672 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150673 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150674 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
150675 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150676 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
150677 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150678 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150679 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150680 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150681 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
150682 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150683 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
150684 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150685 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150686 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
150687 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150688 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
150689 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150690 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150691 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150692 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150693 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
150694 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150695 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
150696 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150697 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150698 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
150699 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150700 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
150701 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150702 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150703 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150704 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150705 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
150706 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150707 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
150708 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150709 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150710 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
150711 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150712 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
150713 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150714 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150715 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150716 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150717 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
150718 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150719 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
150720 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150721 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150722 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
150723 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150724 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
150725 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150726 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150727 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150728 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150729 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
150730 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150731 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
150732 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150733 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150734 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
150735 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150736 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
150737 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150738 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150739 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150740 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150741 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
150742 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150743 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
150744 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150745 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150746 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
150747 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150748 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
150749 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150750 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150751 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150752 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150753 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
150754 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150755 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
150756 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150757 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150758 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
150759 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150760 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
150761 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150762 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150763 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150764 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150765 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
150766 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150767 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
150768 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150769 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150770 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
150771 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150772 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
150773 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150774 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150775 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
150776 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
150777 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
150778 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150779 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
150780 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
150784 +static void alc285_fixup_ideapad_s740_coef(struct hda_codec *codec,
150785 +                                          const struct hda_fixup *fix,
150786 +                                          int action)
150788 +       switch (action) {
150789 +       case HDA_FIXUP_ACT_PRE_PROBE:
150790 +               snd_hda_add_verbs(codec, alc285_ideapad_s740_coefs);
150791 +               break;
150792 +       }
150794 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
150795 index dfef9c17e140..d111258c6f45 100644
150796 --- a/sound/pci/hda/patch_conexant.c
150797 +++ b/sound/pci/hda/patch_conexant.c
150798 @@ -930,18 +930,18 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
150799         SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
150800         SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
150801         SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
150802 -       SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
150803 -       SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
150804 -       SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
150805 -       SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
150806 -       SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
150807         SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
150808         SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
150809 +       SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
150810         SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
150811 -       SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
150812 -       SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
150813 +       SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
150814         SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
150815         SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
150816 +       SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
150817 +       SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
150818 +       SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
150819 +       SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
150820 +       SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
150821         SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
150822         SND_PCI_QUIRK(0x103c, 0x8427, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
150823         SND_PCI_QUIRK(0x103c, 0x844f, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
150824 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
150825 index 45ae845e82df..4b2cc8cb55c4 100644
150826 --- a/sound/pci/hda/patch_hdmi.c
150827 +++ b/sound/pci/hda/patch_hdmi.c
150828 @@ -1848,16 +1848,12 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
150829          */
150830         if (spec->intel_hsw_fixup) {
150831                 /*
150832 -                * On Intel platforms, device entries number is
150833 -                * changed dynamically. If there is a DP MST
150834 -                * hub connected, the device entries number is 3.
150835 -                * Otherwise, it is 1.
150836 -                * Here we manually set dev_num to 3, so that
150837 -                * we can initialize all the device entries when
150838 -                * bootup statically.
150839 +                * On Intel platforms, device entries count returned
150840 +                * by AC_PAR_DEVLIST_LEN is dynamic, and depends on
150841 +                * the type of receiver that is connected. Allocate pin
150842 +                * structures based on worst case.
150843                  */
150844 -               dev_num = 3;
150845 -               spec->dev_num = 3;
150846 +               dev_num = spec->dev_num;
150847         } else if (spec->dyn_pcm_assign && codec->dp_mst) {
150848                 dev_num = snd_hda_get_num_devices(codec, pin_nid) + 1;
150849                 /*
150850 @@ -2658,7 +2654,7 @@ static void generic_acomp_pin_eld_notify(void *audio_ptr, int port, int dev_id)
150851         /* skip notification during system suspend (but not in runtime PM);
150852          * the state will be updated at resume
150853          */
150854 -       if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
150855 +       if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND)
150856                 return;
150857         /* ditto during suspend/resume process itself */
150858         if (snd_hdac_is_in_pm(&codec->core))
150859 @@ -2844,7 +2840,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
150860         /* skip notification during system suspend (but not in runtime PM);
150861          * the state will be updated at resume
150862          */
150863 -       if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
150864 +       if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND)
150865                 return;
150866         /* ditto during suspend/resume process itself */
150867         if (snd_hdac_is_in_pm(&codec->core))
150868 @@ -2942,7 +2938,7 @@ static int parse_intel_hdmi(struct hda_codec *codec)
150870  /* Intel Haswell and onwards; audio component with eld notifier */
150871  static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
150872 -                                const int *port_map, int port_num)
150873 +                                const int *port_map, int port_num, int dev_num)
150875         struct hdmi_spec *spec;
150876         int err;
150877 @@ -2957,6 +2953,7 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
150878         spec->port_map = port_map;
150879         spec->port_num = port_num;
150880         spec->intel_hsw_fixup = true;
150881 +       spec->dev_num = dev_num;
150883         intel_haswell_enable_all_pins(codec, true);
150884         intel_haswell_fixup_enable_dp12(codec);
150885 @@ -2982,12 +2979,12 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
150887  static int patch_i915_hsw_hdmi(struct hda_codec *codec)
150889 -       return intel_hsw_common_init(codec, 0x08, NULL, 0);
150890 +       return intel_hsw_common_init(codec, 0x08, NULL, 0, 3);
150893  static int patch_i915_glk_hdmi(struct hda_codec *codec)
150895 -       return intel_hsw_common_init(codec, 0x0b, NULL, 0);
150896 +       return intel_hsw_common_init(codec, 0x0b, NULL, 0, 3);
150899  static int patch_i915_icl_hdmi(struct hda_codec *codec)
150900 @@ -2998,7 +2995,7 @@ static int patch_i915_icl_hdmi(struct hda_codec *codec)
150901          */
150902         static const int map[] = {0x0, 0x4, 0x6, 0x8, 0xa, 0xb};
150904 -       return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
150905 +       return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 3);
150908  static int patch_i915_tgl_hdmi(struct hda_codec *codec)
150909 @@ -3010,7 +3007,7 @@ static int patch_i915_tgl_hdmi(struct hda_codec *codec)
150910         static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
150911         int ret;
150913 -       ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
150914 +       ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 4);
150915         if (!ret) {
150916                 struct hdmi_spec *spec = codec->spec;
150918 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
150919 index a7544b77d3f7..1fe70f2fe4fe 100644
150920 --- a/sound/pci/hda/patch_realtek.c
150921 +++ b/sound/pci/hda/patch_realtek.c
150922 @@ -2470,13 +2470,13 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
150923                       ALC882_FIXUP_ACER_ASPIRE_8930G),
150924         SND_PCI_QUIRK(0x1025, 0x0146, "Acer Aspire 6935G",
150925                       ALC882_FIXUP_ACER_ASPIRE_8930G),
150926 +       SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
150927 +                     ALC882_FIXUP_ACER_ASPIRE_4930G),
150928 +       SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
150929         SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
150930                       ALC882_FIXUP_ACER_ASPIRE_4930G),
150931         SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G",
150932                       ALC882_FIXUP_ACER_ASPIRE_4930G),
150933 -       SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
150934 -                     ALC882_FIXUP_ACER_ASPIRE_4930G),
150935 -       SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
150936         SND_PCI_QUIRK(0x1025, 0x021e, "Acer Aspire 5739G",
150937                       ALC882_FIXUP_ACER_ASPIRE_4930G),
150938         SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE),
150939 @@ -2489,11 +2489,11 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
150940         SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
150941         SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
150942         SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
150943 +       SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
150944 +       SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
150945         SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
150946         SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
150947         SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP),
150948 -       SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
150949 -       SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
150951         /* All Apple entries are in codec SSIDs */
150952         SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
150953 @@ -2536,9 +2536,19 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
150954         SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
150955         SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
150956         SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
150957 +       SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
150958 +       SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
150959 +       SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
150960 +       SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
150961 +       SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
150962 +       SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
150963 +       SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
150964 +       SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
150965 +       SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
150966 +       SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
150967         SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
150968         SND_PCI_QUIRK(0x1558, 0x9506, "Clevo P955HQ", ALC1220_FIXUP_CLEVO_P950),
150969 -       SND_PCI_QUIRK(0x1558, 0x950A, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
150970 +       SND_PCI_QUIRK(0x1558, 0x950a, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
150971         SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
150972         SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
150973         SND_PCI_QUIRK(0x1558, 0x95e3, "Clevo P955[ER]T", ALC1220_FIXUP_CLEVO_P950),
150974 @@ -2548,14 +2558,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
150975         SND_PCI_QUIRK(0x1558, 0x96e1, "Clevo P960[ER][CDFN]-K", ALC1220_FIXUP_CLEVO_P950),
150976         SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950),
150977         SND_PCI_QUIRK(0x1558, 0x97e2, "Clevo P970RC-M", ALC1220_FIXUP_CLEVO_P950),
150978 -       SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
150979 -       SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
150980 -       SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
150981 -       SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
150982 -       SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
150983 -       SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
150984 -       SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
150985 -       SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
150986         SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
150987         SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
150988         SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
150989 @@ -4329,6 +4331,35 @@ static void alc245_fixup_hp_x360_amp(struct hda_codec *codec,
150990         }
150993 +/* toggle GPIO2 at each time stream is started; we use PREPARE state instead */
150994 +static void alc274_hp_envy_pcm_hook(struct hda_pcm_stream *hinfo,
150995 +                                   struct hda_codec *codec,
150996 +                                   struct snd_pcm_substream *substream,
150997 +                                   int action)
150999 +       switch (action) {
151000 +       case HDA_GEN_PCM_ACT_PREPARE:
151001 +               alc_update_gpio_data(codec, 0x04, true);
151002 +               break;
151003 +       case HDA_GEN_PCM_ACT_CLEANUP:
151004 +               alc_update_gpio_data(codec, 0x04, false);
151005 +               break;
151006 +       }
151009 +static void alc274_fixup_hp_envy_gpio(struct hda_codec *codec,
151010 +                                     const struct hda_fixup *fix,
151011 +                                     int action)
151013 +       struct alc_spec *spec = codec->spec;
151015 +       if (action == HDA_FIXUP_ACT_PROBE) {
151016 +               spec->gpio_mask |= 0x04;
151017 +               spec->gpio_dir |= 0x04;
151018 +               spec->gen.pcm_playback_hook = alc274_hp_envy_pcm_hook;
151019 +       }
151022  static void alc_update_coef_led(struct hda_codec *codec,
151023                                 struct alc_coef_led *led,
151024                                 bool polarity, bool on)
151025 @@ -4438,6 +4469,25 @@ static void alc236_fixup_hp_mute_led(struct hda_codec *codec,
151026         alc236_fixup_hp_coef_micmute_led(codec, fix, action);
151029 +static void alc236_fixup_hp_micmute_led_vref(struct hda_codec *codec,
151030 +                               const struct hda_fixup *fix, int action)
151032 +       struct alc_spec *spec = codec->spec;
151034 +       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
151035 +               spec->cap_mute_led_nid = 0x1a;
151036 +               snd_hda_gen_add_micmute_led_cdev(codec, vref_micmute_led_set);
151037 +               codec->power_filter = led_power_filter;
151038 +       }
151041 +static void alc236_fixup_hp_mute_led_micmute_vref(struct hda_codec *codec,
151042 +                               const struct hda_fixup *fix, int action)
151044 +       alc236_fixup_hp_mute_led_coefbit(codec, fix, action);
151045 +       alc236_fixup_hp_micmute_led_vref(codec, fix, action);
151048  #if IS_REACHABLE(CONFIG_INPUT)
151049  static void gpio2_mic_hotkey_event(struct hda_codec *codec,
151050                                    struct hda_jack_callback *event)
151051 @@ -6232,6 +6282,9 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
151052  /* for alc295_fixup_hp_top_speakers */
151053  #include "hp_x360_helper.c"
151055 +/* for alc285_fixup_ideapad_s740_coef() */
151056 +#include "ideapad_s740_helper.c"
151058  enum {
151059         ALC269_FIXUP_GPIO2,
151060         ALC269_FIXUP_SONY_VAIO,
151061 @@ -6400,6 +6453,7 @@ enum {
151062         ALC285_FIXUP_HP_MUTE_LED,
151063         ALC236_FIXUP_HP_GPIO_LED,
151064         ALC236_FIXUP_HP_MUTE_LED,
151065 +       ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
151066         ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
151067         ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
151068         ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS,
151069 @@ -6415,10 +6469,13 @@ enum {
151070         ALC269_FIXUP_LEMOTE_A1802,
151071         ALC269_FIXUP_LEMOTE_A190X,
151072         ALC256_FIXUP_INTEL_NUC8_RUGGED,
151073 +       ALC233_FIXUP_INTEL_NUC8_DMIC,
151074 +       ALC233_FIXUP_INTEL_NUC8_BOOST,
151075         ALC256_FIXUP_INTEL_NUC10,
151076         ALC255_FIXUP_XIAOMI_HEADSET_MIC,
151077         ALC274_FIXUP_HP_MIC,
151078         ALC274_FIXUP_HP_HEADSET_MIC,
151079 +       ALC274_FIXUP_HP_ENVY_GPIO,
151080         ALC256_FIXUP_ASUS_HPE,
151081         ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
151082         ALC287_FIXUP_HP_GPIO_LED,
151083 @@ -6427,6 +6484,7 @@ enum {
151084         ALC282_FIXUP_ACER_DISABLE_LINEOUT,
151085         ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST,
151086         ALC256_FIXUP_ACER_HEADSET_MIC,
151087 +       ALC285_FIXUP_IDEAPAD_S740_COEF,
151090  static const struct hda_fixup alc269_fixups[] = {
151091 @@ -7136,6 +7194,16 @@ static const struct hda_fixup alc269_fixups[] = {
151092                 .type = HDA_FIXUP_FUNC,
151093                 .v.func = alc233_fixup_lenovo_line2_mic_hotkey,
151094         },
151095 +       [ALC233_FIXUP_INTEL_NUC8_DMIC] = {
151096 +               .type = HDA_FIXUP_FUNC,
151097 +               .v.func = alc_fixup_inv_dmic,
151098 +               .chained = true,
151099 +               .chain_id = ALC233_FIXUP_INTEL_NUC8_BOOST,
151100 +       },
151101 +       [ALC233_FIXUP_INTEL_NUC8_BOOST] = {
151102 +               .type = HDA_FIXUP_FUNC,
151103 +               .v.func = alc269_fixup_limit_int_mic_boost
151104 +       },
151105         [ALC255_FIXUP_DELL_SPK_NOISE] = {
151106                 .type = HDA_FIXUP_FUNC,
151107                 .v.func = alc_fixup_disable_aamix,
151108 @@ -7646,6 +7714,10 @@ static const struct hda_fixup alc269_fixups[] = {
151109                 .type = HDA_FIXUP_FUNC,
151110                 .v.func = alc236_fixup_hp_mute_led,
151111         },
151112 +       [ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF] = {
151113 +               .type = HDA_FIXUP_FUNC,
151114 +               .v.func = alc236_fixup_hp_mute_led_micmute_vref,
151115 +       },
151116         [ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = {
151117                 .type = HDA_FIXUP_VERBS,
151118                 .v.verbs = (const struct hda_verb[]) {
151119 @@ -7844,6 +7916,10 @@ static const struct hda_fixup alc269_fixups[] = {
151120                 .chained = true,
151121                 .chain_id = ALC274_FIXUP_HP_MIC
151122         },
151123 +       [ALC274_FIXUP_HP_ENVY_GPIO] = {
151124 +               .type = HDA_FIXUP_FUNC,
151125 +               .v.func = alc274_fixup_hp_envy_gpio,
151126 +       },
151127         [ALC256_FIXUP_ASUS_HPE] = {
151128                 .type = HDA_FIXUP_VERBS,
151129                 .v.verbs = (const struct hda_verb[]) {
151130 @@ -7901,6 +7977,12 @@ static const struct hda_fixup alc269_fixups[] = {
151131                 .chained = true,
151132                 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
151133         },
151134 +       [ALC285_FIXUP_IDEAPAD_S740_COEF] = {
151135 +               .type = HDA_FIXUP_FUNC,
151136 +               .v.func = alc285_fixup_ideapad_s740_coef,
151137 +               .chained = true,
151138 +               .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
151139 +       },
151142  static const struct snd_pci_quirk alc269_fixup_tbl[] = {
151143 @@ -7909,12 +7991,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
151144         SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
151145         SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
151146         SND_PCI_QUIRK(0x1025, 0x072d, "Acer Aspire V5-571G", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
151147 -       SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
151148         SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
151149         SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
151150         SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
151151         SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
151152         SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
151153 +       SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
151154         SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF),
151155         SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
151156         SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
151157 @@ -7970,8 +8052,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
151158         SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
151159         SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
151160         SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
151161 -       SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
151162         SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
151163 +       SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
151164         SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
151165         SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
151166         SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
151167 @@ -7981,8 +8063,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
151168         SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
151169         SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
151170         SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
151171 -       SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
151172         SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
151173 +       SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
151174         SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
151175         SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
151176         SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
151177 @@ -7993,35 +8075,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
151178         SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
151179         SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
151180         SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
151181 -       SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
151182 -       /* ALC282 */
151183         SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151184         SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151185         SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151186 +       SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
151187 +       SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
151188 +       SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
151189 +       SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
151190         SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
151191         SND_PCI_QUIRK(0x103c, 0x2237, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
151192         SND_PCI_QUIRK(0x103c, 0x2238, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
151193         SND_PCI_QUIRK(0x103c, 0x2239, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
151194         SND_PCI_QUIRK(0x103c, 0x224b, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
151195 -       SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151196 -       SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151197 -       SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151198 -       SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151199 -       SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
151200 -       SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS),
151201 -       SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS),
151202 -       SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151203 -       SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151204 -       SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151205 -       SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151206 -       SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151207 -       SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
151208 -       SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
151209 -       SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
151210 -       /* ALC290 */
151211 -       SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
151212 -       SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
151213 -       SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
151214         SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
151215         SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
151216         SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
151217 @@ -8029,28 +8094,45 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
151218         SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
151219         SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
151220         SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED),
151221 +       SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
151222         SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151223         SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151224         SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151225         SND_PCI_QUIRK(0x103c, 0x2265, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151226 +       SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151227 +       SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151228 +       SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151229 +       SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151230 +       SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
151231         SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
151232 +       SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS),
151233         SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
151234 +       SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS),
151235         SND_PCI_QUIRK(0x103c, 0x2278, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
151236         SND_PCI_QUIRK(0x103c, 0x227f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151237         SND_PCI_QUIRK(0x103c, 0x2282, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151238         SND_PCI_QUIRK(0x103c, 0x228b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151239         SND_PCI_QUIRK(0x103c, 0x228e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151240 +       SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151241 +       SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151242 +       SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151243 +       SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151244 +       SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151245         SND_PCI_QUIRK(0x103c, 0x22c5, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151246         SND_PCI_QUIRK(0x103c, 0x22c7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151247         SND_PCI_QUIRK(0x103c, 0x22c8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151248 -       SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151249 +       SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151250 +       SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
151251 +       SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
151252 +       SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
151253         SND_PCI_QUIRK(0x103c, 0x2334, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151254         SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151255         SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151256         SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
151257 -       SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
151258         SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
151259         SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
151260 +       SND_PCI_QUIRK(0x103c, 0x8077, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
151261 +       SND_PCI_QUIRK(0x103c, 0x8158, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
151262         SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
151263         SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
151264         SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
151265 @@ -8061,8 +8143,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
151266         SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
151267         SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
151268         SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
151269 +       SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
151270         SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
151271         SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
151272 +       SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
151273         SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
151274         SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
151275         SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
151276 @@ -8087,16 +8171,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
151277         SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
151278         SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
151279         SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
151280 +       SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
151281         SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
151282         SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
151283         SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
151284 -       SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
151285         SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
151286 +       SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
151287         SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
151288         SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
151289         SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
151290         SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
151291         SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
151292 +       SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
151293         SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
151294         SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
151295         SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE),
151296 @@ -8109,31 +8195,31 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
151297         SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
151298         SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
151299         SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
151300 -       SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
151301         SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
151302         SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
151303         SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
151304 +       SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
151305         SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
151306 -       SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
151307         SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
151308         SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
151309         SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
151310         SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
151311         SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
151312         SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
151313 -       SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
151314 -       SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
151315         SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
151316         SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
151317         SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
151318         SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
151319 +       SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
151320 +       SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
151321         SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
151322         SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
151323         SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
151324 -       SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
151325         SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
151326 +       SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
151327         SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
151328         SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
151329 +       SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
151330         SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
151331         SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
151332         SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
151333 @@ -8143,9 +8229,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
151334         SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
151335         SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
151336         SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
151337 -       SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
151338         SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
151339         SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
151340 +       SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
151341         SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
151342         SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
151343         SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
151344 @@ -8201,9 +8287,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
151345         SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
151346         SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
151347         SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
151348 +       SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
151349         SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST),
151350         SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
151351 -       SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
151352         SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
151353         SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
151354         SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
151355 @@ -8244,9 +8330,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
151356         SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
151357         SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
151358         SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
151359 +       SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
151360         SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
151361         SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
151362         SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
151363 +       SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
151364         SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
151365         SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
151366         SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
151367 @@ -8265,20 +8353,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
151368         SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
151369         SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
151370         SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
151371 -       SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
151372         SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
151373         SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
151374         SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
151375         SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
151376         SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
151377         SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
151378 +       SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
151379 +       SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
151380         SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
151381         SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
151382         SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
151383         SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
151384 -       SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
151385 -       SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
151386 -       SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
151387 +       SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
151388         SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
151389         SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
151391 @@ -8733,12 +8820,17 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
151392                 {0x12, 0x90a60130},
151393                 {0x19, 0x03a11020},
151394                 {0x21, 0x0321101f}),
151395 -       SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
151396 +       SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
151397 +               {0x12, 0x90a60130},
151398                 {0x14, 0x90170110},
151399                 {0x19, 0x04a11040},
151400                 {0x21, 0x04211020}),
151401         SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
151402 -               {0x12, 0x90a60130},
151403 +               {0x14, 0x90170110},
151404 +               {0x19, 0x04a11040},
151405 +               {0x1d, 0x40600001},
151406 +               {0x21, 0x04211020}),
151407 +       SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
151408                 {0x14, 0x90170110},
151409                 {0x19, 0x04a11040},
151410                 {0x21, 0x04211020}),
151411 @@ -9224,8 +9316,7 @@ static const struct snd_pci_quirk alc861_fixup_tbl[] = {
151412         SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP),
151413         SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F),
151414         SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT),
151415 -       SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", ALC861_FIXUP_AMP_VREF_0F),
151416 -       SND_PCI_QUIRK(0x1584, 0x0000, "Uniwill ECS M31EI", ALC861_FIXUP_AMP_VREF_0F),
151417 +       SND_PCI_QUIRK_VENDOR(0x1584, "Haier/Uniwill", ALC861_FIXUP_AMP_VREF_0F),
151418         SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", ALC861_FIXUP_FSC_AMILO_PI1505),
151419         {}
151421 @@ -10020,6 +10111,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
151422         SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
151423         SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
151424         SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
151425 +       SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS),
151426         SND_PCI_QUIRK(0x1025, 0x123c, "Acer Nitro N50-600", ALC662_FIXUP_ACER_NITRO_HEADSET_MODE),
151427         SND_PCI_QUIRK(0x1025, 0x124e, "Acer 2660G", ALC662_FIXUP_ACER_X2660G_HEADSET_MODE),
151428         SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
151429 @@ -10036,9 +10128,9 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
151430         SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
151431         SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
151432         SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
151433 -       SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
151434         SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
151435         SND_PCI_QUIRK(0x1043, 0x12ff, "ASUS G751", ALC668_FIXUP_ASUS_G751),
151436 +       SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
151437         SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
151438         SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
151439         SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
151440 @@ -10058,7 +10150,6 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
151441         SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
151442         SND_PCI_QUIRK(0x1b35, 0x1234, "CZC ET26", ALC662_FIXUP_CZC_ET26),
151443         SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
151444 -       SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS),
151446  #if 0
151447         /* Below is a quirk table taken from the old code.
151448 diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
151449 index cdc4b6106252..159c40ec680d 100644
151450 --- a/sound/pci/maestro3.c
151451 +++ b/sound/pci/maestro3.c
151452 @@ -1990,7 +1990,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
151453                 outw(0, io + GPIO_DATA);
151454                 outw(dir | GPO_PRIMARY_AC97, io + GPIO_DIRECTION);
151456 -               schedule_timeout_uninterruptible(msecs_to_jiffies(delay1));
151457 +               schedule_msec_hrtimeout_uninterruptible((delay1));
151459                 outw(GPO_PRIMARY_AC97, io + GPIO_DATA);
151460                 udelay(5);
151461 @@ -1998,7 +1998,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
151462                 outw(IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE, io + RING_BUS_CTRL_A);
151463                 outw(~0, io + GPIO_MASK);
151465 -               schedule_timeout_uninterruptible(msecs_to_jiffies(delay2));
151466 +               schedule_msec_hrtimeout_uninterruptible((delay2));
151468                 if (! snd_m3_try_read_vendor(chip))
151469                         break;
151470 diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
151471 index 4cf879c42dc4..720297cbdf87 100644
151472 --- a/sound/pci/rme9652/hdsp.c
151473 +++ b/sound/pci/rme9652/hdsp.c
151474 @@ -5390,7 +5390,8 @@ static int snd_hdsp_free(struct hdsp *hdsp)
151475         if (hdsp->port)
151476                 pci_release_regions(hdsp->pci);
151478 -       pci_disable_device(hdsp->pci);
151479 +       if (pci_is_enabled(hdsp->pci))
151480 +               pci_disable_device(hdsp->pci);
151481         return 0;
151484 diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
151485 index 8d900c132f0f..97a0bff96b28 100644
151486 --- a/sound/pci/rme9652/hdspm.c
151487 +++ b/sound/pci/rme9652/hdspm.c
151488 @@ -6883,7 +6883,8 @@ static int snd_hdspm_free(struct hdspm * hdspm)
151489         if (hdspm->port)
151490                 pci_release_regions(hdspm->pci);
151492 -       pci_disable_device(hdspm->pci);
151493 +       if (pci_is_enabled(hdspm->pci))
151494 +               pci_disable_device(hdspm->pci);
151495         return 0;
151498 diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
151499 index 4df992e846f2..7a4d395abcee 100644
151500 --- a/sound/pci/rme9652/rme9652.c
151501 +++ b/sound/pci/rme9652/rme9652.c
151502 @@ -1731,7 +1731,8 @@ static int snd_rme9652_free(struct snd_rme9652 *rme9652)
151503         if (rme9652->port)
151504                 pci_release_regions(rme9652->pci);
151506 -       pci_disable_device(rme9652->pci);
151507 +       if (pci_is_enabled(rme9652->pci))
151508 +               pci_disable_device(rme9652->pci);
151509         return 0;
151512 diff --git a/sound/soc/codecs/ak5558.c b/sound/soc/codecs/ak5558.c
151513 index 85bdd0534180..80b3b162ca5b 100644
151514 --- a/sound/soc/codecs/ak5558.c
151515 +++ b/sound/soc/codecs/ak5558.c
151516 @@ -272,7 +272,7 @@ static void ak5558_power_off(struct ak5558_priv *ak5558)
151517         if (!ak5558->reset_gpiod)
151518                 return;
151520 -       gpiod_set_value_cansleep(ak5558->reset_gpiod, 0);
151521 +       gpiod_set_value_cansleep(ak5558->reset_gpiod, 1);
151522         usleep_range(1000, 2000);
151525 @@ -281,7 +281,7 @@ static void ak5558_power_on(struct ak5558_priv *ak5558)
151526         if (!ak5558->reset_gpiod)
151527                 return;
151529 -       gpiod_set_value_cansleep(ak5558->reset_gpiod, 1);
151530 +       gpiod_set_value_cansleep(ak5558->reset_gpiod, 0);
151531         usleep_range(1000, 2000);
151534 diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
151535 index 8abe232ca4a4..ff23a7d4d2ac 100644
151536 --- a/sound/soc/codecs/rt286.c
151537 +++ b/sound/soc/codecs/rt286.c
151538 @@ -171,6 +171,9 @@ static bool rt286_readable_register(struct device *dev, unsigned int reg)
151539         case RT286_PROC_COEF:
151540         case RT286_SET_AMP_GAIN_ADC_IN1:
151541         case RT286_SET_AMP_GAIN_ADC_IN2:
151542 +       case RT286_SET_GPIO_MASK:
151543 +       case RT286_SET_GPIO_DIRECTION:
151544 +       case RT286_SET_GPIO_DATA:
151545         case RT286_SET_POWER(RT286_DAC_OUT1):
151546         case RT286_SET_POWER(RT286_DAC_OUT2):
151547         case RT286_SET_POWER(RT286_ADC_IN1):
151548 @@ -1117,12 +1120,11 @@ static const struct dmi_system_id force_combo_jack_table[] = {
151549         { }
151552 -static const struct dmi_system_id dmi_dell_dino[] = {
151553 +static const struct dmi_system_id dmi_dell[] = {
151554         {
151555 -               .ident = "Dell Dino",
151556 +               .ident = "Dell",
151557                 .matches = {
151558                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
151559 -                       DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343")
151560                 }
151561         },
151562         { }
151563 @@ -1133,7 +1135,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
151565         struct rt286_platform_data *pdata = dev_get_platdata(&i2c->dev);
151566         struct rt286_priv *rt286;
151567 -       int i, ret, val;
151568 +       int i, ret, vendor_id;
151570         rt286 = devm_kzalloc(&i2c->dev, sizeof(*rt286),
151571                                 GFP_KERNEL);
151572 @@ -1149,14 +1151,15 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
151573         }
151575         ret = regmap_read(rt286->regmap,
151576 -               RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val);
151577 +               RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &vendor_id);
151578         if (ret != 0) {
151579                 dev_err(&i2c->dev, "I2C error %d\n", ret);
151580                 return ret;
151581         }
151582 -       if (val != RT286_VENDOR_ID && val != RT288_VENDOR_ID) {
151583 +       if (vendor_id != RT286_VENDOR_ID && vendor_id != RT288_VENDOR_ID) {
151584                 dev_err(&i2c->dev,
151585 -                       "Device with ID register %#x is not rt286\n", val);
151586 +                       "Device with ID register %#x is not rt286\n",
151587 +                       vendor_id);
151588                 return -ENODEV;
151589         }
151591 @@ -1180,8 +1183,8 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
151592         if (pdata)
151593                 rt286->pdata = *pdata;
151595 -       if (dmi_check_system(force_combo_jack_table) ||
151596 -               dmi_check_system(dmi_dell_dino))
151597 +       if ((vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) ||
151598 +               dmi_check_system(force_combo_jack_table))
151599                 rt286->pdata.cbj_en = true;
151601         regmap_write(rt286->regmap, RT286_SET_AUDIO_POWER, AC_PWRST_D3);
151602 @@ -1220,7 +1223,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
151603         regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL3, 0xf777, 0x4737);
151604         regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL4, 0x00ff, 0x003f);
151606 -       if (dmi_check_system(dmi_dell_dino)) {
151607 +       if (vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) {
151608                 regmap_update_bits(rt286->regmap,
151609                         RT286_SET_GPIO_MASK, 0x40, 0x40);
151610                 regmap_update_bits(rt286->regmap,
151611 diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
151612 index 653da3eaf355..d77d12902594 100644
151613 --- a/sound/soc/codecs/rt5631.c
151614 +++ b/sound/soc/codecs/rt5631.c
151615 @@ -417,7 +417,7 @@ static void onebit_depop_mute_stage(struct snd_soc_component *component, int ena
151616         hp_zc = snd_soc_component_read(component, RT5631_INT_ST_IRQ_CTRL_2);
151617         snd_soc_component_write(component, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
151618         if (enable) {
151619 -               schedule_timeout_uninterruptible(msecs_to_jiffies(10));
151620 +               schedule_msec_hrtimeout_uninterruptible((10));
151621                 /* config one-bit depop parameter */
151622                 rt5631_write_index(component, RT5631_SPK_INTL_CTRL, 0x307f);
151623                 snd_soc_component_update_bits(component, RT5631_HP_OUT_VOL,
151624 @@ -529,7 +529,7 @@ static void depop_seq_mute_stage(struct snd_soc_component *component, int enable
151625         hp_zc = snd_soc_component_read(component, RT5631_INT_ST_IRQ_CTRL_2);
151626         snd_soc_component_write(component, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
151627         if (enable) {
151628 -               schedule_timeout_uninterruptible(msecs_to_jiffies(10));
151629 +               schedule_msec_hrtimeout_uninterruptible((10));
151631                 /* config depop sequence parameter */
151632                 rt5631_write_index(component, RT5631_SPK_INTL_CTRL, 0x302f);
151633 diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
151634 index 4063aac2a443..dd69d874bad2 100644
151635 --- a/sound/soc/codecs/rt5670.c
151636 +++ b/sound/soc/codecs/rt5670.c
151637 @@ -2980,6 +2980,18 @@ static const struct dmi_system_id dmi_platform_intel_quirks[] = {
151638                                                  RT5670_GPIO1_IS_IRQ |
151639                                                  RT5670_JD_MODE3),
151640         },
151641 +       {
151642 +               .callback = rt5670_quirk_cb,
151643 +               .ident = "Dell Venue 10 Pro 5055",
151644 +               .matches = {
151645 +                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
151646 +                       DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
151647 +               },
151648 +               .driver_data = (unsigned long *)(RT5670_DMIC_EN |
151649 +                                                RT5670_DMIC2_INR |
151650 +                                                RT5670_GPIO1_IS_IRQ |
151651 +                                                RT5670_JD_MODE1),
151652 +       },
151653         {
151654                 .callback = rt5670_quirk_cb,
151655                 .ident = "Aegex 10 tablet (RU2)",
151656 diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
151657 index f04f88c8d425..b689f26fc4be 100644
151658 --- a/sound/soc/codecs/tlv320aic32x4.c
151659 +++ b/sound/soc/codecs/tlv320aic32x4.c
151660 @@ -577,12 +577,12 @@ static const struct regmap_range_cfg aic32x4_regmap_pages[] = {
151661                 .window_start = 0,
151662                 .window_len = 128,
151663                 .range_min = 0,
151664 -               .range_max = AIC32X4_RMICPGAVOL,
151665 +               .range_max = AIC32X4_REFPOWERUP,
151666         },
151669  const struct regmap_config aic32x4_regmap_config = {
151670 -       .max_register = AIC32X4_RMICPGAVOL,
151671 +       .max_register = AIC32X4_REFPOWERUP,
151672         .ranges = aic32x4_regmap_pages,
151673         .num_ranges = ARRAY_SIZE(aic32x4_regmap_pages),
151675 @@ -1243,6 +1243,10 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap)
151676         if (ret)
151677                 goto err_disable_regulators;
151679 +       ret = aic32x4_register_clocks(dev, aic32x4->mclk_name);
151680 +       if (ret)
151681 +               goto err_disable_regulators;
151683         ret = devm_snd_soc_register_component(dev,
151684                         &soc_component_dev_aic32x4, &aic32x4_dai, 1);
151685         if (ret) {
151686 @@ -1250,10 +1254,6 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap)
151687                 goto err_disable_regulators;
151688         }
151690 -       ret = aic32x4_register_clocks(dev, aic32x4->mclk_name);
151691 -       if (ret)
151692 -               goto err_disable_regulators;
151694         return 0;
151696  err_disable_regulators:
151697 diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
151698 index 15d42ce3b21d..897fced9589b 100644
151699 --- a/sound/soc/codecs/wm8350.c
151700 +++ b/sound/soc/codecs/wm8350.c
151701 @@ -234,10 +234,10 @@ static void wm8350_pga_work(struct work_struct *work)
151702                     out2->ramp == WM8350_RAMP_UP) {
151703                         /* delay is longer over 0dB as increases are larger */
151704                         if (i >= WM8350_OUTn_0dB)
151705 -                               schedule_timeout_interruptible(msecs_to_jiffies
151706 +                               schedule_msec_hrtimeout_interruptible(
151707                                                                (2));
151708                         else
151709 -                               schedule_timeout_interruptible(msecs_to_jiffies
151710 +                               schedule_msec_hrtimeout_interruptible(
151711                                                                (1));
151712                 } else
151713                         udelay(50);     /* doesn't matter if we delay longer */
151714 @@ -1121,7 +1121,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
151715                                          (platform->dis_out4 << 6));
151717                         /* wait for discharge */
151718 -                       schedule_timeout_interruptible(msecs_to_jiffies
151719 +                       schedule_msec_hrtimeout_interruptible(
151720                                                        (platform->
151721                                                         cap_discharge_msecs));
151723 @@ -1137,7 +1137,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
151724                                          WM8350_VBUFEN);
151726                         /* wait for vmid */
151727 -                       schedule_timeout_interruptible(msecs_to_jiffies
151728 +                       schedule_msec_hrtimeout_interruptible(
151729                                                        (platform->
151730                                                         vmid_charge_msecs));
151732 @@ -1188,7 +1188,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
151733                 wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1);
151735                 /* wait */
151736 -               schedule_timeout_interruptible(msecs_to_jiffies
151737 +               schedule_msec_hrtimeout_interruptible(
151738                                                (platform->
151739                                                 vmid_discharge_msecs));
151741 @@ -1206,7 +1206,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
151742                                  pm1 | WM8350_OUTPUT_DRAIN_EN);
151744                 /* wait */
151745 -               schedule_timeout_interruptible(msecs_to_jiffies
151746 +               schedule_msec_hrtimeout_interruptible(
151747                                                (platform->drain_msecs));
151749                 pm1 &= ~WM8350_BIASEN;
151750 diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
151751 index a9a6d766a176..45bf31de6282 100644
151752 --- a/sound/soc/codecs/wm8900.c
151753 +++ b/sound/soc/codecs/wm8900.c
151754 @@ -1104,7 +1104,7 @@ static int wm8900_set_bias_level(struct snd_soc_component *component,
151755                 /* Need to let things settle before stopping the clock
151756                  * to ensure that restart works, see "Stopping the
151757                  * master clock" in the datasheet. */
151758 -               schedule_timeout_interruptible(msecs_to_jiffies(1));
151759 +               schedule_msec_hrtimeout_interruptible(1);
151760                 snd_soc_component_write(component, WM8900_REG_POWER2,
151761                              WM8900_REG_POWER2_SYSCLK_ENA);
151762                 break;
151763 diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
151764 index cda9cd935d4f..9e621a254392 100644
151765 --- a/sound/soc/codecs/wm8960.c
151766 +++ b/sound/soc/codecs/wm8960.c
151767 @@ -608,10 +608,6 @@ static const int bclk_divs[] = {
151768   *             - lrclk      = sysclk / dac_divs
151769   *             - 10 * bclk  = sysclk / bclk_divs
151770   *
151771 - *     If we cannot find an exact match for (sysclk, lrclk, bclk)
151772 - *     triplet, we relax the bclk such that bclk is chosen as the
151773 - *     closest available frequency greater than expected bclk.
151775   * @wm8960: codec private data
151776   * @mclk: MCLK used to derive sysclk
151777   * @sysclk_idx: sysclk_divs index for found sysclk
151778 @@ -629,7 +625,7 @@ int wm8960_configure_sysclk(struct wm8960_priv *wm8960, int mclk,
151780         int sysclk, bclk, lrclk;
151781         int i, j, k;
151782 -       int diff, closest = mclk;
151783 +       int diff;
151785         /* marker for no match */
151786         *bclk_idx = -1;
151787 @@ -653,12 +649,6 @@ int wm8960_configure_sysclk(struct wm8960_priv *wm8960, int mclk,
151788                                         *bclk_idx = k;
151789                                         break;
151790                                 }
151791 -                               if (diff > 0 && closest > diff) {
151792 -                                       *sysclk_idx = i;
151793 -                                       *dac_idx = j;
151794 -                                       *bclk_idx = k;
151795 -                                       closest = diff;
151796 -                               }
151797                         }
151798                         if (k != ARRAY_SIZE(bclk_divs))
151799                                 break;
151800 diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
151801 index e0ce32dd4a81..eb91c0282aad 100644
151802 --- a/sound/soc/codecs/wm9713.c
151803 +++ b/sound/soc/codecs/wm9713.c
151804 @@ -199,7 +199,7 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
151806         /* Gracefully shut down the voice interface. */
151807         snd_soc_component_update_bits(component, AC97_HANDSET_RATE, 0x0f00, 0x0200);
151808 -       schedule_timeout_interruptible(msecs_to_jiffies(1));
151809 +       schedule_msec_hrtimeout_interruptible(1);
151810         snd_soc_component_update_bits(component, AC97_HANDSET_RATE, 0x0f00, 0x0f00);
151811         snd_soc_component_update_bits(component, AC97_EXTENDED_MID, 0x1000, 0x1000);
151813 @@ -868,7 +868,7 @@ static int wm9713_set_pll(struct snd_soc_component *component,
151814         wm9713->pll_in = freq_in;
151816         /* wait 10ms AC97 link frames for the link to stabilise */
151817 -       schedule_timeout_interruptible(msecs_to_jiffies(10));
151818 +       schedule_msec_hrtimeout_interruptible((10));
151819         return 0;
151822 diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
151823 index 8c5cdcdc8713..e81b5cf0d37a 100644
151824 --- a/sound/soc/generic/audio-graph-card.c
151825 +++ b/sound/soc/generic/audio-graph-card.c
151826 @@ -380,7 +380,7 @@ static int graph_dai_link_of(struct asoc_simple_priv *priv,
151827         struct device_node *top = dev->of_node;
151828         struct asoc_simple_dai *cpu_dai;
151829         struct asoc_simple_dai *codec_dai;
151830 -       int ret, single_cpu;
151831 +       int ret, single_cpu = 0;
151833         /* Do it only CPU turn */
151834         if (!li->cpu)
151835 diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
151836 index 75365c7bb393..d916ec69c24f 100644
151837 --- a/sound/soc/generic/simple-card.c
151838 +++ b/sound/soc/generic/simple-card.c
151839 @@ -258,7 +258,7 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
151840         struct device_node *plat = NULL;
151841         char prop[128];
151842         char *prefix = "";
151843 -       int ret, single_cpu;
151844 +       int ret, single_cpu = 0;
151846         /*
151847          *       |CPU   |Codec   : turn
151848 diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
151849 index 4e0248d2accc..7c5038803be7 100644
151850 --- a/sound/soc/intel/Makefile
151851 +++ b/sound/soc/intel/Makefile
151852 @@ -5,7 +5,7 @@ obj-$(CONFIG_SND_SOC) += common/
151853  # Platform Support
151854  obj-$(CONFIG_SND_SST_ATOM_HIFI2_PLATFORM) += atom/
151855  obj-$(CONFIG_SND_SOC_INTEL_CATPT) += catpt/
151856 -obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += skylake/
151857 +obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON) += skylake/
151858  obj-$(CONFIG_SND_SOC_INTEL_KEEMBAY) += keembay/
151860  # Machine support
151861 diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
151862 index 5d48cc359c3d..22912cab5e63 100644
151863 --- a/sound/soc/intel/boards/bytcr_rt5640.c
151864 +++ b/sound/soc/intel/boards/bytcr_rt5640.c
151865 @@ -482,6 +482,9 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
151866                         DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TAF"),
151867                 },
151868                 .driver_data = (void *)(BYT_RT5640_IN1_MAP |
151869 +                                       BYT_RT5640_JD_SRC_JD2_IN4N |
151870 +                                       BYT_RT5640_OVCD_TH_2000UA |
151871 +                                       BYT_RT5640_OVCD_SF_0P75 |
151872                                         BYT_RT5640_MONO_SPEAKER |
151873                                         BYT_RT5640_DIFF_MIC |
151874                                         BYT_RT5640_SSP0_AIF2 |
151875 @@ -515,6 +518,23 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
151876                                         BYT_RT5640_SSP0_AIF1 |
151877                                         BYT_RT5640_MCLK_EN),
151878         },
151879 +       {
151880 +               /* Chuwi Hi8 (CWI509) */
151881 +               .matches = {
151882 +                       DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
151883 +                       DMI_MATCH(DMI_BOARD_NAME, "BYT-PA03C"),
151884 +                       DMI_MATCH(DMI_SYS_VENDOR, "ilife"),
151885 +                       DMI_MATCH(DMI_PRODUCT_NAME, "S806"),
151886 +               },
151887 +               .driver_data = (void *)(BYT_RT5640_IN1_MAP |
151888 +                                       BYT_RT5640_JD_SRC_JD2_IN4N |
151889 +                                       BYT_RT5640_OVCD_TH_2000UA |
151890 +                                       BYT_RT5640_OVCD_SF_0P75 |
151891 +                                       BYT_RT5640_MONO_SPEAKER |
151892 +                                       BYT_RT5640_DIFF_MIC |
151893 +                                       BYT_RT5640_SSP0_AIF1 |
151894 +                                       BYT_RT5640_MCLK_EN),
151895 +       },
151896         {
151897                 .matches = {
151898                         DMI_MATCH(DMI_SYS_VENDOR, "Circuitco"),
151899 diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
151900 index cc9a2509ace2..e0149cf6127d 100644
151901 --- a/sound/soc/intel/boards/kbl_da7219_max98927.c
151902 +++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
151903 @@ -282,11 +282,33 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
151904         struct snd_interval *chan = hw_param_interval(params,
151905                         SNDRV_PCM_HW_PARAM_CHANNELS);
151906         struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
151907 -       struct snd_soc_dpcm *dpcm = container_of(
151908 -                       params, struct snd_soc_dpcm, hw_params);
151909 -       struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link;
151910 -       struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link;
151911 +       struct snd_soc_dpcm *dpcm, *rtd_dpcm = NULL;
151913 +       /*
151914 +        * The following loop will be called only for playback stream
151915 +        * In this platform, there is only one playback device on every SSP
151916 +        */
151917 +       for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_PLAYBACK, dpcm) {
151918 +               rtd_dpcm = dpcm;
151919 +               break;
151920 +       }
151922 +       /*
151923 +        * This following loop will be called only for capture stream
151924 +        * In this platform, there is only one capture device on every SSP
151925 +        */
151926 +       for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_CAPTURE, dpcm) {
151927 +               rtd_dpcm = dpcm;
151928 +               break;
151929 +       }
151931 +       if (!rtd_dpcm)
151932 +               return -EINVAL;
151934 +       /*
151935 +        * The above 2 loops are mutually exclusive based on the stream direction,
151936 +        * thus rtd_dpcm variable will never be overwritten
151937 +        */
151938         /*
151939          * Topology for kblda7219m98373 & kblmax98373 supports only S24_LE,
151940          * where as kblda7219m98927 & kblmax98927 supports S16_LE by default.
151941 @@ -309,9 +331,9 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
151942         /*
151943          * The ADSP will convert the FE rate to 48k, stereo, 24 bit
151944          */
151945 -       if (!strcmp(fe_dai_link->name, "Kbl Audio Port") ||
151946 -           !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") ||
151947 -           !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) {
151948 +       if (!strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Port") ||
151949 +           !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Headset Playback") ||
151950 +           !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Capture Port")) {
151951                 rate->min = rate->max = 48000;
151952                 chan->min = chan->max = 2;
151953                 snd_mask_none(fmt);
151954 @@ -322,7 +344,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
151955          * The speaker on the SSP0 supports S16_LE and not S24_LE.
151956          * thus changing the mask here
151957          */
151958 -       if (!strcmp(be_dai_link->name, "SSP0-Codec"))
151959 +       if (!strcmp(rtd_dpcm->be->dai_link->name, "SSP0-Codec"))
151960                 snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
151962         return 0;
151963 diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
151964 index 8adce6417b02..ecd3f90f4bbe 100644
151965 --- a/sound/soc/intel/boards/sof_sdw.c
151966 +++ b/sound/soc/intel/boards/sof_sdw.c
151967 @@ -187,6 +187,17 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
151968                                         SOF_RT715_DAI_ID_FIX |
151969                                         SOF_SDW_FOUR_SPK),
151970         },
151971 +       /* AlderLake devices */
151972 +       {
151973 +               .callback = sof_sdw_quirk_cb,
151974 +               .matches = {
151975 +                       DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
151976 +                       DMI_MATCH(DMI_PRODUCT_NAME, "Alder Lake Client Platform"),
151977 +               },
151978 +               .driver_data = (void *)(SOF_RT711_JD_SRC_JD1 |
151979 +                                       SOF_SDW_TGL_HDMI |
151980 +                                       SOF_SDW_PCH_DMIC),
151981 +       },
151982         {}
151985 diff --git a/sound/soc/intel/boards/sof_wm8804.c b/sound/soc/intel/boards/sof_wm8804.c
151986 index a46ba13e8eb0..6a181e45143d 100644
151987 --- a/sound/soc/intel/boards/sof_wm8804.c
151988 +++ b/sound/soc/intel/boards/sof_wm8804.c
151989 @@ -124,7 +124,11 @@ static int sof_wm8804_hw_params(struct snd_pcm_substream *substream,
151990         }
151992         snd_soc_dai_set_clkdiv(codec_dai, WM8804_MCLK_DIV, mclk_div);
151993 -       snd_soc_dai_set_pll(codec_dai, 0, 0, sysclk, mclk_freq);
151994 +       ret = snd_soc_dai_set_pll(codec_dai, 0, 0, sysclk, mclk_freq);
151995 +       if (ret < 0) {
151996 +               dev_err(rtd->card->dev, "Failed to set WM8804 PLL\n");
151997 +               return ret;
151998 +       }
152000         ret = snd_soc_dai_set_sysclk(codec_dai, WM8804_TX_CLKSRC_PLL,
152001                                      sysclk, SND_SOC_CLOCK_OUT);
152002 diff --git a/sound/soc/intel/skylake/Makefile b/sound/soc/intel/skylake/Makefile
152003 index dd39149b89b1..1c4649bccec5 100644
152004 --- a/sound/soc/intel/skylake/Makefile
152005 +++ b/sound/soc/intel/skylake/Makefile
152006 @@ -7,7 +7,7 @@ ifdef CONFIG_DEBUG_FS
152007    snd-soc-skl-objs += skl-debug.o
152008  endif
152010 -obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl.o
152011 +obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON) += snd-soc-skl.o
152013  #Skylake Clock device support
152014  snd-soc-skl-ssp-clk-objs := skl-ssp-clk.o
152015 diff --git a/sound/soc/qcom/qdsp6/q6afe-clocks.c b/sound/soc/qcom/qdsp6/q6afe-clocks.c
152016 index f0362f061652..9431656283cd 100644
152017 --- a/sound/soc/qcom/qdsp6/q6afe-clocks.c
152018 +++ b/sound/soc/qcom/qdsp6/q6afe-clocks.c
152019 @@ -11,33 +11,29 @@
152020  #include <linux/slab.h>
152021  #include "q6afe.h"
152023 -#define Q6AFE_CLK(id) &(struct q6afe_clk) {            \
152024 +#define Q6AFE_CLK(id) {                                        \
152025                 .clk_id = id,                           \
152026                 .afe_clk_id     = Q6AFE_##id,           \
152027                 .name = #id,                            \
152028 -               .attributes = LPASS_CLK_ATTRIBUTE_COUPLE_NO, \
152029                 .rate = 19200000,                       \
152030 -               .hw.init = &(struct clk_init_data) {    \
152031 -                       .ops = &clk_q6afe_ops,          \
152032 -                       .name = #id,                    \
152033 -               },                                      \
152034         }
152036 -#define Q6AFE_VOTE_CLK(id, blkid, n) &(struct q6afe_clk) { \
152037 +#define Q6AFE_VOTE_CLK(id, blkid, n) {                 \
152038                 .clk_id = id,                           \
152039                 .afe_clk_id = blkid,                    \
152040 -               .name = #n,                             \
152041 -               .hw.init = &(struct clk_init_data) {    \
152042 -                       .ops = &clk_vote_q6afe_ops,     \
152043 -                       .name = #id,                    \
152044 -               },                                      \
152045 +               .name = n,                              \
152046         }
152048 -struct q6afe_clk {
152049 -       struct device *dev;
152050 +struct q6afe_clk_init {
152051         int clk_id;
152052         int afe_clk_id;
152053         char *name;
152054 +       int rate;
152057 +struct q6afe_clk {
152058 +       struct device *dev;
152059 +       int afe_clk_id;
152060         int attributes;
152061         int rate;
152062         uint32_t handle;
152063 @@ -48,8 +44,7 @@ struct q6afe_clk {
152065  struct q6afe_cc {
152066         struct device *dev;
152067 -       struct q6afe_clk **clks;
152068 -       int num_clks;
152069 +       struct q6afe_clk *clks[Q6AFE_MAX_CLK_ID];
152072  static int clk_q6afe_prepare(struct clk_hw *hw)
152073 @@ -105,7 +100,7 @@ static int clk_vote_q6afe_block(struct clk_hw *hw)
152074         struct q6afe_clk *clk = to_q6afe_clk(hw);
152076         return q6afe_vote_lpass_core_hw(clk->dev, clk->afe_clk_id,
152077 -                                       clk->name, &clk->handle);
152078 +                                       clk_hw_get_name(&clk->hw), &clk->handle);
152081  static void clk_unvote_q6afe_block(struct clk_hw *hw)
152082 @@ -120,84 +115,76 @@ static const struct clk_ops clk_vote_q6afe_ops = {
152083         .unprepare      = clk_unvote_q6afe_block,
152086 -static struct q6afe_clk *q6afe_clks[Q6AFE_MAX_CLK_ID] = {
152087 -       [LPASS_CLK_ID_PRI_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_IBIT),
152088 -       [LPASS_CLK_ID_PRI_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_EBIT),
152089 -       [LPASS_CLK_ID_SEC_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_IBIT),
152090 -       [LPASS_CLK_ID_SEC_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_EBIT),
152091 -       [LPASS_CLK_ID_TER_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_IBIT),
152092 -       [LPASS_CLK_ID_TER_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_EBIT),
152093 -       [LPASS_CLK_ID_QUAD_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_IBIT),
152094 -       [LPASS_CLK_ID_QUAD_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_EBIT),
152095 -       [LPASS_CLK_ID_SPEAKER_I2S_IBIT] =
152096 -                               Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_IBIT),
152097 -       [LPASS_CLK_ID_SPEAKER_I2S_EBIT] =
152098 -                               Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_EBIT),
152099 -       [LPASS_CLK_ID_SPEAKER_I2S_OSR] =
152100 -                               Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_OSR),
152101 -       [LPASS_CLK_ID_QUI_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_IBIT),
152102 -       [LPASS_CLK_ID_QUI_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_EBIT),
152103 -       [LPASS_CLK_ID_SEN_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_IBIT),
152104 -       [LPASS_CLK_ID_SEN_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_EBIT),
152105 -       [LPASS_CLK_ID_INT0_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT0_MI2S_IBIT),
152106 -       [LPASS_CLK_ID_INT1_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT1_MI2S_IBIT),
152107 -       [LPASS_CLK_ID_INT2_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT2_MI2S_IBIT),
152108 -       [LPASS_CLK_ID_INT3_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT3_MI2S_IBIT),
152109 -       [LPASS_CLK_ID_INT4_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT4_MI2S_IBIT),
152110 -       [LPASS_CLK_ID_INT5_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT5_MI2S_IBIT),
152111 -       [LPASS_CLK_ID_INT6_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT6_MI2S_IBIT),
152112 -       [LPASS_CLK_ID_QUI_MI2S_OSR] = Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_OSR),
152113 -       [LPASS_CLK_ID_PRI_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_IBIT),
152114 -       [LPASS_CLK_ID_PRI_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_EBIT),
152115 -       [LPASS_CLK_ID_SEC_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_IBIT),
152116 -       [LPASS_CLK_ID_SEC_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_EBIT),
152117 -       [LPASS_CLK_ID_TER_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_IBIT),
152118 -       [LPASS_CLK_ID_TER_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_EBIT),
152119 -       [LPASS_CLK_ID_QUAD_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_IBIT),
152120 -       [LPASS_CLK_ID_QUAD_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_EBIT),
152121 -       [LPASS_CLK_ID_QUIN_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_IBIT),
152122 -       [LPASS_CLK_ID_QUIN_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_EBIT),
152123 -       [LPASS_CLK_ID_QUI_PCM_OSR] = Q6AFE_CLK(LPASS_CLK_ID_QUI_PCM_OSR),
152124 -       [LPASS_CLK_ID_PRI_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_IBIT),
152125 -       [LPASS_CLK_ID_PRI_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_EBIT),
152126 -       [LPASS_CLK_ID_SEC_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_IBIT),
152127 -       [LPASS_CLK_ID_SEC_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_EBIT),
152128 -       [LPASS_CLK_ID_TER_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_IBIT),
152129 -       [LPASS_CLK_ID_TER_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_EBIT),
152130 -       [LPASS_CLK_ID_QUAD_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_IBIT),
152131 -       [LPASS_CLK_ID_QUAD_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_EBIT),
152132 -       [LPASS_CLK_ID_QUIN_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_IBIT),
152133 -       [LPASS_CLK_ID_QUIN_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_EBIT),
152134 -       [LPASS_CLK_ID_QUIN_TDM_OSR] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_OSR),
152135 -       [LPASS_CLK_ID_MCLK_1] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_1),
152136 -       [LPASS_CLK_ID_MCLK_2] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_2),
152137 -       [LPASS_CLK_ID_MCLK_3] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_3),
152138 -       [LPASS_CLK_ID_MCLK_4] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_4),
152139 -       [LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE] =
152140 -               Q6AFE_CLK(LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE),
152141 -       [LPASS_CLK_ID_INT_MCLK_0] = Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_0),
152142 -       [LPASS_CLK_ID_INT_MCLK_1] = Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_1),
152143 -       [LPASS_CLK_ID_WSA_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_MCLK),
152144 -       [LPASS_CLK_ID_WSA_CORE_NPL_MCLK] =
152145 -                               Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_NPL_MCLK),
152146 -       [LPASS_CLK_ID_VA_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_MCLK),
152147 -       [LPASS_CLK_ID_TX_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_MCLK),
152148 -       [LPASS_CLK_ID_TX_CORE_NPL_MCLK] =
152149 -                       Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_NPL_MCLK),
152150 -       [LPASS_CLK_ID_RX_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_MCLK),
152151 -       [LPASS_CLK_ID_RX_CORE_NPL_MCLK] =
152152 -                               Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_NPL_MCLK),
152153 -       [LPASS_CLK_ID_VA_CORE_2X_MCLK] =
152154 -                               Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_2X_MCLK),
152155 -       [LPASS_HW_AVTIMER_VOTE] = Q6AFE_VOTE_CLK(LPASS_HW_AVTIMER_VOTE,
152156 -                                                Q6AFE_LPASS_CORE_AVTIMER_BLOCK,
152157 -                                                "LPASS_AVTIMER_MACRO"),
152158 -       [LPASS_HW_MACRO_VOTE] = Q6AFE_VOTE_CLK(LPASS_HW_MACRO_VOTE,
152159 -                                               Q6AFE_LPASS_CORE_HW_MACRO_BLOCK,
152160 -                                               "LPASS_HW_MACRO"),
152161 -       [LPASS_HW_DCODEC_VOTE] = Q6AFE_VOTE_CLK(LPASS_HW_DCODEC_VOTE,
152162 -                                       Q6AFE_LPASS_CORE_HW_DCODEC_BLOCK,
152163 -                                       "LPASS_HW_DCODEC"),
152164 +static const struct q6afe_clk_init q6afe_clks[] = {
152165 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_IBIT),
152166 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_EBIT),
152167 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_IBIT),
152168 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_EBIT),
152169 +       Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_IBIT),
152170 +       Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_EBIT),
152171 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_IBIT),
152172 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_EBIT),
152173 +       Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_IBIT),
152174 +       Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_EBIT),
152175 +       Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_OSR),
152176 +       Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_IBIT),
152177 +       Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_EBIT),
152178 +       Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_IBIT),
152179 +       Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_EBIT),
152180 +       Q6AFE_CLK(LPASS_CLK_ID_INT0_MI2S_IBIT),
152181 +       Q6AFE_CLK(LPASS_CLK_ID_INT1_MI2S_IBIT),
152182 +       Q6AFE_CLK(LPASS_CLK_ID_INT2_MI2S_IBIT),
152183 +       Q6AFE_CLK(LPASS_CLK_ID_INT3_MI2S_IBIT),
152184 +       Q6AFE_CLK(LPASS_CLK_ID_INT4_MI2S_IBIT),
152185 +       Q6AFE_CLK(LPASS_CLK_ID_INT5_MI2S_IBIT),
152186 +       Q6AFE_CLK(LPASS_CLK_ID_INT6_MI2S_IBIT),
152187 +       Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_OSR),
152188 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_IBIT),
152189 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_EBIT),
152190 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_IBIT),
152191 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_EBIT),
152192 +       Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_IBIT),
152193 +       Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_EBIT),
152194 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_IBIT),
152195 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_EBIT),
152196 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_IBIT),
152197 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_EBIT),
152198 +       Q6AFE_CLK(LPASS_CLK_ID_QUI_PCM_OSR),
152199 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_IBIT),
152200 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_EBIT),
152201 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_IBIT),
152202 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_EBIT),
152203 +       Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_IBIT),
152204 +       Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_EBIT),
152205 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_IBIT),
152206 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_EBIT),
152207 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_IBIT),
152208 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_EBIT),
152209 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_OSR),
152210 +       Q6AFE_CLK(LPASS_CLK_ID_MCLK_1),
152211 +       Q6AFE_CLK(LPASS_CLK_ID_MCLK_2),
152212 +       Q6AFE_CLK(LPASS_CLK_ID_MCLK_3),
152213 +       Q6AFE_CLK(LPASS_CLK_ID_MCLK_4),
152214 +       Q6AFE_CLK(LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE),
152215 +       Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_0),
152216 +       Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_1),
152217 +       Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_MCLK),
152218 +       Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_NPL_MCLK),
152219 +       Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_MCLK),
152220 +       Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_MCLK),
152221 +       Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_NPL_MCLK),
152222 +       Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_MCLK),
152223 +       Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_NPL_MCLK),
152224 +       Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_2X_MCLK),
152225 +       Q6AFE_VOTE_CLK(LPASS_HW_AVTIMER_VOTE,
152226 +                      Q6AFE_LPASS_CORE_AVTIMER_BLOCK,
152227 +                      "LPASS_AVTIMER_MACRO"),
152228 +       Q6AFE_VOTE_CLK(LPASS_HW_MACRO_VOTE,
152229 +                      Q6AFE_LPASS_CORE_HW_MACRO_BLOCK,
152230 +                      "LPASS_HW_MACRO"),
152231 +       Q6AFE_VOTE_CLK(LPASS_HW_DCODEC_VOTE,
152232 +                      Q6AFE_LPASS_CORE_HW_DCODEC_BLOCK,
152233 +                      "LPASS_HW_DCODEC"),
152236  static struct clk_hw *q6afe_of_clk_hw_get(struct of_phandle_args *clkspec,
152237 @@ -207,7 +194,7 @@ static struct clk_hw *q6afe_of_clk_hw_get(struct of_phandle_args *clkspec,
152238         unsigned int idx = clkspec->args[0];
152239         unsigned int attr = clkspec->args[1];
152241 -       if (idx >= cc->num_clks || attr > LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR) {
152242 +       if (idx >= Q6AFE_MAX_CLK_ID || attr > LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR) {
152243                 dev_err(cc->dev, "Invalid clk specifier (%d, %d)\n", idx, attr);
152244                 return ERR_PTR(-EINVAL);
152245         }
152246 @@ -230,20 +217,36 @@ static int q6afe_clock_dev_probe(struct platform_device *pdev)
152247         if (!cc)
152248                 return -ENOMEM;
152250 -       cc->clks = &q6afe_clks[0];
152251 -       cc->num_clks = ARRAY_SIZE(q6afe_clks);
152252 +       cc->dev = dev;
152253         for (i = 0; i < ARRAY_SIZE(q6afe_clks); i++) {
152254 -               if (!q6afe_clks[i])
152255 -                       continue;
152256 +               unsigned int id = q6afe_clks[i].clk_id;
152257 +               struct clk_init_data init = {
152258 +                       .name =  q6afe_clks[i].name,
152259 +               };
152260 +               struct q6afe_clk *clk;
152262 +               clk = devm_kzalloc(dev, sizeof(*clk), GFP_KERNEL);
152263 +               if (!clk)
152264 +                       return -ENOMEM;
152266 +               clk->dev = dev;
152267 +               clk->afe_clk_id = q6afe_clks[i].afe_clk_id;
152268 +               clk->rate = q6afe_clks[i].rate;
152269 +               clk->hw.init = &init;
152271 +               if (clk->rate)
152272 +                       init.ops = &clk_q6afe_ops;
152273 +               else
152274 +                       init.ops = &clk_vote_q6afe_ops;
152276 -               q6afe_clks[i]->dev = dev;
152277 +               cc->clks[id] = clk;
152279 -               ret = devm_clk_hw_register(dev, &q6afe_clks[i]->hw);
152280 +               ret = devm_clk_hw_register(dev, &clk->hw);
152281                 if (ret)
152282                         return ret;
152283         }
152285 -       ret = of_clk_add_hw_provider(dev->of_node, q6afe_of_clk_hw_get, cc);
152286 +       ret = devm_of_clk_add_hw_provider(dev, q6afe_of_clk_hw_get, cc);
152287         if (ret)
152288                 return ret;
152290 diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
152291 index cad1cd1bfdf0..4327b72162ec 100644
152292 --- a/sound/soc/qcom/qdsp6/q6afe.c
152293 +++ b/sound/soc/qcom/qdsp6/q6afe.c
152294 @@ -1681,7 +1681,7 @@ int q6afe_unvote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
152295  EXPORT_SYMBOL(q6afe_unvote_lpass_core_hw);
152297  int q6afe_vote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
152298 -                            char *client_name, uint32_t *client_handle)
152299 +                            const char *client_name, uint32_t *client_handle)
152301         struct q6afe *afe = dev_get_drvdata(dev->parent);
152302         struct afe_cmd_remote_lpass_core_hw_vote_request *vote_cfg;
152303 diff --git a/sound/soc/qcom/qdsp6/q6afe.h b/sound/soc/qcom/qdsp6/q6afe.h
152304 index 22e10269aa10..3845b56c0ed3 100644
152305 --- a/sound/soc/qcom/qdsp6/q6afe.h
152306 +++ b/sound/soc/qcom/qdsp6/q6afe.h
152307 @@ -236,7 +236,7 @@ int q6afe_port_set_sysclk(struct q6afe_port *port, int clk_id,
152308  int q6afe_set_lpass_clock(struct device *dev, int clk_id, int clk_src,
152309                           int clk_root, unsigned int freq);
152310  int q6afe_vote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
152311 -                            char *client_name, uint32_t *client_handle);
152312 +                            const char *client_name, uint32_t *client_handle);
152313  int q6afe_unvote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
152314                                uint32_t client_handle);
152315  #endif /* __Q6AFE_H__ */
152316 diff --git a/sound/soc/samsung/tm2_wm5110.c b/sound/soc/samsung/tm2_wm5110.c
152317 index 9300fef9bf26..125e07f65d2b 100644
152318 --- a/sound/soc/samsung/tm2_wm5110.c
152319 +++ b/sound/soc/samsung/tm2_wm5110.c
152320 @@ -553,7 +553,7 @@ static int tm2_probe(struct platform_device *pdev)
152322                 ret = of_parse_phandle_with_args(dev->of_node, "i2s-controller",
152323                                                  cells_name, i, &args);
152324 -               if (!args.np) {
152325 +               if (ret) {
152326                         dev_err(dev, "i2s-controller property parse error: %d\n", i);
152327                         ret = -EINVAL;
152328                         goto dai_node_put;
152329 diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
152330 index 1029d8d9d800..d2b4632d9c2a 100644
152331 --- a/sound/soc/sh/rcar/core.c
152332 +++ b/sound/soc/sh/rcar/core.c
152333 @@ -1428,8 +1428,75 @@ static int rsnd_hw_params(struct snd_soc_component *component,
152334                 }
152335                 if (io->converted_chan)
152336                         dev_dbg(dev, "convert channels = %d\n", io->converted_chan);
152337 -               if (io->converted_rate)
152338 +               if (io->converted_rate) {
152339 +                       /*
152340 +                        * SRC supports convert rates from params_rate(hw_params)/k_down
152341 +                        * to params_rate(hw_params)*k_up, where k_up is always 6, and
152342 +                        * k_down depends on number of channels and SRC unit.
152343 +                        * So all SRC units can upsample audio up to 6 times regardless
152344 +                        * its number of channels. And all SRC units can downsample
152345 +                        * 2 channel audio up to 6 times too.
152346 +                        */
152347 +                       int k_up = 6;
152348 +                       int k_down = 6;
152349 +                       int channel;
152350 +                       struct rsnd_mod *src_mod = rsnd_io_to_mod_src(io);
152352                         dev_dbg(dev, "convert rate     = %d\n", io->converted_rate);
152354 +                       channel = io->converted_chan ? io->converted_chan :
152355 +                                 params_channels(hw_params);
152357 +                       switch (rsnd_mod_id(src_mod)) {
152358 +                       /*
152359 +                        * SRC0 can downsample 4, 6 and 8 channel audio up to 4 times.
152360 +                        * SRC1, SRC3 and SRC4 can downsample 4 channel audio
152361 +                        * up to 4 times.
152362 +                        * SRC1, SRC3 and SRC4 can downsample 6 and 8 channel audio
152363 +                        * no more than twice.
152364 +                        */
152365 +                       case 1:
152366 +                       case 3:
152367 +                       case 4:
152368 +                               if (channel > 4) {
152369 +                                       k_down = 2;
152370 +                                       break;
152371 +                               }
152372 +                               fallthrough;
152373 +                       case 0:
152374 +                               if (channel > 2)
152375 +                                       k_down = 4;
152376 +                               break;
152378 +                       /* Other SRC units do not support more than 2 channels */
152379 +                       default:
152380 +                               if (channel > 2)
152381 +                                       return -EINVAL;
152382 +                       }
152384 +                       if (params_rate(hw_params) > io->converted_rate * k_down) {
152385 +                               hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->min =
152386 +                                       io->converted_rate * k_down;
152387 +                               hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->max =
152388 +                                       io->converted_rate * k_down;
152389 +                               hw_params->cmask |= SNDRV_PCM_HW_PARAM_RATE;
152390 +                       } else if (params_rate(hw_params) * k_up < io->converted_rate) {
152391 +                               hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->min =
152392 +                                       (io->converted_rate + k_up - 1) / k_up;
152393 +                               hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->max =
152394 +                                       (io->converted_rate + k_up - 1) / k_up;
152395 +                               hw_params->cmask |= SNDRV_PCM_HW_PARAM_RATE;
152396 +                       }
152398 +                       /*
152399 +                        * TBD: Max SRC input and output rates also depend on number
152400 +                        * of channels and SRC unit:
152401 +                        * SRC1, SRC3 and SRC4 do not support more than 128kHz
152402 +                        * for 6 channel and 96kHz for 8 channel audio.
152403 +                        * Perhaps this function should return EINVAL if the input or
152404 +                        * the output rate exceeds the limitation.
152405 +                        */
152406 +               }
152407         }
152409         return rsnd_dai_call(hw_params, io, substream, hw_params);
152410 diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
152411 index d0ded427a836..042207c11651 100644
152412 --- a/sound/soc/sh/rcar/ssi.c
152413 +++ b/sound/soc/sh/rcar/ssi.c
152414 @@ -507,10 +507,15 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
152415                          struct rsnd_priv *priv)
152417         struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
152418 +       int ret;
152420         if (!rsnd_ssi_is_run_mods(mod, io))
152421                 return 0;
152423 +       ret = rsnd_ssi_master_clk_start(mod, io);
152424 +       if (ret < 0)
152425 +               return ret;
152427         ssi->usrcnt++;
152429         rsnd_mod_power_on(mod);
152430 @@ -792,7 +797,6 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
152431                                                        SSI_SYS_STATUS(i * 2),
152432                                                        0xf << (id * 4));
152433                                         stop = true;
152434 -                                       break;
152435                                 }
152436                         }
152437                         break;
152438 @@ -810,7 +814,6 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
152439                                                 SSI_SYS_STATUS((i * 2) + 1),
152440                                                 0xf << 4);
152441                                         stop = true;
152442 -                                       break;
152443                                 }
152444                         }
152445                         break;
152446 @@ -1060,13 +1063,6 @@ static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
152447         return 0;
152450 -static int rsnd_ssi_prepare(struct rsnd_mod *mod,
152451 -                           struct rsnd_dai_stream *io,
152452 -                           struct rsnd_priv *priv)
152454 -       return rsnd_ssi_master_clk_start(mod, io);
152457  static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
152458         .name           = SSI_NAME,
152459         .probe          = rsnd_ssi_common_probe,
152460 @@ -1079,7 +1075,6 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
152461         .pointer        = rsnd_ssi_pio_pointer,
152462         .pcm_new        = rsnd_ssi_pcm_new,
152463         .hw_params      = rsnd_ssi_hw_params,
152464 -       .prepare        = rsnd_ssi_prepare,
152465         .get_status     = rsnd_ssi_get_status,
152468 @@ -1166,7 +1161,6 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
152469         .pcm_new        = rsnd_ssi_pcm_new,
152470         .fallback       = rsnd_ssi_fallback,
152471         .hw_params      = rsnd_ssi_hw_params,
152472 -       .prepare        = rsnd_ssi_prepare,
152473         .get_status     = rsnd_ssi_get_status,
152476 diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
152477 index 246a5e32e22a..b4810266f5e5 100644
152478 --- a/sound/soc/soc-compress.c
152479 +++ b/sound/soc/soc-compress.c
152480 @@ -153,7 +153,9 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
152481         fe->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN;
152482         fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
152484 +       mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
152485         snd_soc_runtime_activate(fe, stream);
152486 +       mutex_unlock(&fe->card->pcm_mutex);
152488         mutex_unlock(&fe->card->mutex);
152490 @@ -181,7 +183,9 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
152492         mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
152494 +       mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
152495         snd_soc_runtime_deactivate(fe, stream);
152496 +       mutex_unlock(&fe->card->pcm_mutex);
152498         fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
152500 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
152501 index b005f9eadd71..2f75a449c45c 100644
152502 --- a/sound/soc/soc-dapm.c
152503 +++ b/sound/soc/soc-dapm.c
152504 @@ -154,7 +154,7 @@ static void dapm_assert_locked(struct snd_soc_dapm_context *dapm)
152505  static void pop_wait(u32 pop_time)
152507         if (pop_time)
152508 -               schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time));
152509 +               schedule_msec_hrtimeout_uninterruptible((pop_time));
152512  __printf(3, 4)
152513 diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
152514 index 6740df541508..3d22c1be6f3d 100644
152515 --- a/sound/soc/tegra/tegra30_i2s.c
152516 +++ b/sound/soc/tegra/tegra30_i2s.c
152517 @@ -58,8 +58,18 @@ static int tegra30_i2s_runtime_resume(struct device *dev)
152518         }
152520         regcache_cache_only(i2s->regmap, false);
152521 +       regcache_mark_dirty(i2s->regmap);
152523 +       ret = regcache_sync(i2s->regmap);
152524 +       if (ret)
152525 +               goto disable_clocks;
152527         return 0;
152529 +disable_clocks:
152530 +       clk_disable_unprepare(i2s->clk_i2s);
152532 +       return ret;
152535  static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
152536 @@ -551,37 +561,11 @@ static int tegra30_i2s_platform_remove(struct platform_device *pdev)
152537         return 0;
152540 -#ifdef CONFIG_PM_SLEEP
152541 -static int tegra30_i2s_suspend(struct device *dev)
152543 -       struct tegra30_i2s *i2s = dev_get_drvdata(dev);
152545 -       regcache_mark_dirty(i2s->regmap);
152547 -       return 0;
152550 -static int tegra30_i2s_resume(struct device *dev)
152552 -       struct tegra30_i2s *i2s = dev_get_drvdata(dev);
152553 -       int ret;
152555 -       ret = pm_runtime_get_sync(dev);
152556 -       if (ret < 0) {
152557 -               pm_runtime_put(dev);
152558 -               return ret;
152559 -       }
152560 -       ret = regcache_sync(i2s->regmap);
152561 -       pm_runtime_put(dev);
152563 -       return ret;
152565 -#endif
152567  static const struct dev_pm_ops tegra30_i2s_pm_ops = {
152568         SET_RUNTIME_PM_OPS(tegra30_i2s_runtime_suspend,
152569                            tegra30_i2s_runtime_resume, NULL)
152570 -       SET_SYSTEM_SLEEP_PM_OPS(tegra30_i2s_suspend, tegra30_i2s_resume)
152571 +       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
152572 +                               pm_runtime_force_resume)
152575  static struct platform_driver tegra30_i2s_driver = {
152576 diff --git a/sound/usb/card.c b/sound/usb/card.c
152577 index 0826a437f8fc..7b7526d3a56e 100644
152578 --- a/sound/usb/card.c
152579 +++ b/sound/usb/card.c
152580 @@ -181,9 +181,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
152581                                 ctrlif, interface);
152582                         return -EINVAL;
152583                 }
152584 -               usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
152586 -               return 0;
152587 +               return usb_driver_claim_interface(&usb_audio_driver, iface,
152588 +                                                 USB_AUDIO_IFACE_UNUSED);
152589         }
152591         if ((altsd->bInterfaceClass != USB_CLASS_AUDIO &&
152592 @@ -203,7 +202,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
152594         if (! snd_usb_parse_audio_interface(chip, interface)) {
152595                 usb_set_interface(dev, interface, 0); /* reset the current interface */
152596 -               usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
152597 +               return usb_driver_claim_interface(&usb_audio_driver, iface,
152598 +                                                 USB_AUDIO_IFACE_UNUSED);
152599         }
152601         return 0;
152602 @@ -862,7 +862,7 @@ static void usb_audio_disconnect(struct usb_interface *intf)
152603         struct snd_card *card;
152604         struct list_head *p;
152606 -       if (chip == (void *)-1L)
152607 +       if (chip == USB_AUDIO_IFACE_UNUSED)
152608                 return;
152610         card = chip->card;
152611 @@ -992,7 +992,7 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
152612         struct usb_mixer_interface *mixer;
152613         struct list_head *p;
152615 -       if (chip == (void *)-1L)
152616 +       if (chip == USB_AUDIO_IFACE_UNUSED)
152617                 return 0;
152619         if (!chip->num_suspended_intf++) {
152620 @@ -1022,7 +1022,7 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
152621         struct list_head *p;
152622         int err = 0;
152624 -       if (chip == (void *)-1L)
152625 +       if (chip == USB_AUDIO_IFACE_UNUSED)
152626                 return 0;
152628         atomic_inc(&chip->active); /* avoid autopm */
152629 diff --git a/sound/usb/clock.c b/sound/usb/clock.c
152630 index a746802d0ac3..17bbde73d4d1 100644
152631 --- a/sound/usb/clock.c
152632 +++ b/sound/usb/clock.c
152633 @@ -296,7 +296,7 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
152635         selector = snd_usb_find_clock_selector(chip->ctrl_intf, entity_id);
152636         if (selector) {
152637 -               int ret, i, cur;
152638 +               int ret, i, cur, err;
152640                 if (selector->bNrInPins == 1) {
152641                         ret = 1;
152642 @@ -324,13 +324,17 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
152643                 ret = __uac_clock_find_source(chip, fmt,
152644                                               selector->baCSourceID[ret - 1],
152645                                               visited, validate);
152646 +               if (ret > 0) {
152647 +                       err = uac_clock_selector_set_val(chip, entity_id, cur);
152648 +                       if (err < 0)
152649 +                               return err;
152650 +               }
152652                 if (!validate || ret > 0 || !chip->autoclock)
152653                         return ret;
152655                 /* The current clock source is invalid, try others. */
152656                 for (i = 1; i <= selector->bNrInPins; i++) {
152657 -                       int err;
152659                         if (i == cur)
152660                                 continue;
152662 @@ -396,7 +400,7 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip,
152664         selector = snd_usb_find_clock_selector_v3(chip->ctrl_intf, entity_id);
152665         if (selector) {
152666 -               int ret, i, cur;
152667 +               int ret, i, cur, err;
152669                 /* the entity ID we are looking for is a selector.
152670                  * find out what it currently selects */
152671 @@ -418,6 +422,12 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip,
152672                 ret = __uac3_clock_find_source(chip, fmt,
152673                                                selector->baCSourceID[ret - 1],
152674                                                visited, validate);
152675 +               if (ret > 0) {
152676 +                       err = uac_clock_selector_set_val(chip, entity_id, cur);
152677 +                       if (err < 0)
152678 +                               return err;
152679 +               }
152681                 if (!validate || ret > 0 || !chip->autoclock)
152682                         return ret;
152684 diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
152685 index 102d53515a76..933586a895e7 100644
152686 --- a/sound/usb/endpoint.c
152687 +++ b/sound/usb/endpoint.c
152688 @@ -1442,11 +1442,11 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
152689         if (snd_BUG_ON(!atomic_read(&ep->running)))
152690                 return;
152692 -       if (ep->sync_source)
152693 -               WRITE_ONCE(ep->sync_source->sync_sink, NULL);
152695 -       if (!atomic_dec_return(&ep->running))
152696 +       if (!atomic_dec_return(&ep->running)) {
152697 +               if (ep->sync_source)
152698 +                       WRITE_ONCE(ep->sync_source->sync_sink, NULL);
152699                 stop_urbs(ep, false);
152700 +       }
152703  /**
152704 diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
152705 index fdbdfb7bce92..fa8e8faf3eb3 100644
152706 --- a/sound/usb/line6/pcm.c
152707 +++ b/sound/usb/line6/pcm.c
152708 @@ -127,7 +127,7 @@ static void line6_wait_clear_audio_urbs(struct snd_line6_pcm *line6pcm,
152709                 if (!alive)
152710                         break;
152711                 set_current_state(TASK_UNINTERRUPTIBLE);
152712 -               schedule_timeout(1);
152713 +               schedule_min_hrtimeout();
152714         } while (--timeout > 0);
152715         if (alive)
152716                 dev_err(line6pcm->line6->ifcdev,
152717 diff --git a/sound/usb/midi.c b/sound/usb/midi.c
152718 index 0c23fa6d8525..cd46ca7cd28d 100644
152719 --- a/sound/usb/midi.c
152720 +++ b/sound/usb/midi.c
152721 @@ -1332,7 +1332,7 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi *umidi,
152723   error:
152724         snd_usbmidi_in_endpoint_delete(ep);
152725 -       return -ENOMEM;
152726 +       return err;
152730 diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
152731 index 646deb6244b1..c5794e83fd80 100644
152732 --- a/sound/usb/mixer_maps.c
152733 +++ b/sound/usb/mixer_maps.c
152734 @@ -337,6 +337,13 @@ static const struct usbmix_name_map bose_companion5_map[] = {
152735         { 0 }   /* terminator */
152738 +/* Sennheiser Communications Headset [PC 8], the dB value is reported as -6 negative maximum  */
152739 +static const struct usbmix_dB_map sennheiser_pc8_dB = {-9500, 0};
152740 +static const struct usbmix_name_map sennheiser_pc8_map[] = {
152741 +       { 9, NULL, .dB = &sennheiser_pc8_dB },
152742 +       { 0 }   /* terminator */
152746   * Dell usb dock with ALC4020 codec had a firmware problem where it got
152747   * screwed up when zero volume is passed; just skip it as a workaround
152748 @@ -593,6 +600,11 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
152749                 .id = USB_ID(0x17aa, 0x1046),
152750                 .map = lenovo_p620_rear_map,
152751         },
152752 +       {
152753 +               /* Sennheiser Communications Headset [PC 8] */
152754 +               .id = USB_ID(0x1395, 0x0025),
152755 +               .map = sennheiser_pc8_map,
152756 +       },
152757         { 0 } /* terminator */
152760 diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
152761 index 1165a5ac60f2..8a8fe2b980a1 100644
152762 --- a/sound/usb/quirks-table.h
152763 +++ b/sound/usb/quirks-table.h
152764 @@ -2376,6 +2376,16 @@ YAMAHA_DEVICE(0x7010, "UB99"),
152765         }
152769 +       USB_DEVICE_VENDOR_SPEC(0x0944, 0x0204),
152770 +       .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
152771 +               .vendor_name = "KORG, Inc.",
152772 +               /* .product_name = "ToneLab EX", */
152773 +               .ifnum = 3,
152774 +               .type = QUIRK_MIDI_STANDARD_INTERFACE,
152775 +       }
152778  /* AKAI devices */
152780         USB_DEVICE(0x09e8, 0x0062),
152781 @@ -3817,6 +3827,69 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
152782                 }
152783         }
152786 +       /*
152787 +        * Pioneer DJ DJM-850
152788 +        * 8 channels playback and 8 channels capture @ 44.1/48/96kHz S24LE
152789 +        * Playback on EP 0x05
152790 +        * Capture on EP 0x86
152791 +        */
152792 +       USB_DEVICE_VENDOR_SPEC(0x08e4, 0x0163),
152793 +       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
152794 +               .ifnum = QUIRK_ANY_INTERFACE,
152795 +               .type = QUIRK_COMPOSITE,
152796 +               .data = (const struct snd_usb_audio_quirk[]) {
152797 +                       {
152798 +                               .ifnum = 0,
152799 +                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
152800 +                               .data = &(const struct audioformat) {
152801 +                                       .formats = SNDRV_PCM_FMTBIT_S24_3LE,
152802 +                                       .channels = 8,
152803 +                                       .iface = 0,
152804 +                                       .altsetting = 1,
152805 +                                       .altset_idx = 1,
152806 +                                       .endpoint = 0x05,
152807 +                                       .ep_attr = USB_ENDPOINT_XFER_ISOC|
152808 +                                           USB_ENDPOINT_SYNC_ASYNC|
152809 +                                               USB_ENDPOINT_USAGE_DATA,
152810 +                                       .rates = SNDRV_PCM_RATE_44100|
152811 +                                               SNDRV_PCM_RATE_48000|
152812 +                                               SNDRV_PCM_RATE_96000,
152813 +                                       .rate_min = 44100,
152814 +                                       .rate_max = 96000,
152815 +                                       .nr_rates = 3,
152816 +                                       .rate_table = (unsigned int[]) { 44100, 48000, 96000 }
152817 +                               }
152818 +                       },
152819 +                       {
152820 +                               .ifnum = 0,
152821 +                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
152822 +                               .data = &(const struct audioformat) {
152823 +                                       .formats = SNDRV_PCM_FMTBIT_S24_3LE,
152824 +                                       .channels = 8,
152825 +                                       .iface = 0,
152826 +                                       .altsetting = 1,
152827 +                                       .altset_idx = 1,
152828 +                                       .endpoint = 0x86,
152829 +                                       .ep_idx = 1,
152830 +                                       .ep_attr = USB_ENDPOINT_XFER_ISOC|
152831 +                                               USB_ENDPOINT_SYNC_ASYNC|
152832 +                                               USB_ENDPOINT_USAGE_DATA,
152833 +                                       .rates = SNDRV_PCM_RATE_44100|
152834 +                                               SNDRV_PCM_RATE_48000|
152835 +                                               SNDRV_PCM_RATE_96000,
152836 +                                       .rate_min = 44100,
152837 +                                       .rate_max = 96000,
152838 +                                       .nr_rates = 3,
152839 +                                       .rate_table = (unsigned int[]) { 44100, 48000, 96000 }
152840 +                               }
152841 +                       },
152842 +                       {
152843 +                               .ifnum = -1
152844 +                       }
152845 +               }
152846 +       }
152849         /*
152850          * Pioneer DJ DJM-450
152851 diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
152852 index 176437a441e6..7c6e83eee71d 100644
152853 --- a/sound/usb/quirks.c
152854 +++ b/sound/usb/quirks.c
152855 @@ -55,8 +55,12 @@ static int create_composite_quirk(struct snd_usb_audio *chip,
152856                 if (!iface)
152857                         continue;
152858                 if (quirk->ifnum != probed_ifnum &&
152859 -                   !usb_interface_claimed(iface))
152860 -                       usb_driver_claim_interface(driver, iface, (void *)-1L);
152861 +                   !usb_interface_claimed(iface)) {
152862 +                       err = usb_driver_claim_interface(driver, iface,
152863 +                                                        USB_AUDIO_IFACE_UNUSED);
152864 +                       if (err < 0)
152865 +                               return err;
152866 +               }
152867         }
152869         return 0;
152870 @@ -426,8 +430,12 @@ static int create_autodetect_quirks(struct snd_usb_audio *chip,
152871                         continue;
152873                 err = create_autodetect_quirk(chip, iface, driver);
152874 -               if (err >= 0)
152875 -                       usb_driver_claim_interface(driver, iface, (void *)-1L);
152876 +               if (err >= 0) {
152877 +                       err = usb_driver_claim_interface(driver, iface,
152878 +                                                        USB_AUDIO_IFACE_UNUSED);
152879 +                       if (err < 0)
152880 +                               return err;
152881 +               }
152882         }
152884         return 0;
152885 diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
152886 index 60b9dd7df6bb..8794c8658ab9 100644
152887 --- a/sound/usb/usbaudio.h
152888 +++ b/sound/usb/usbaudio.h
152889 @@ -61,6 +61,8 @@ struct snd_usb_audio {
152890         struct media_intf_devnode *ctl_intf_media_devnode;
152893 +#define USB_AUDIO_IFACE_UNUSED ((void *)-1L)
152895  #define usb_audio_err(chip, fmt, args...) \
152896         dev_err(&(chip)->dev->dev, fmt, ##args)
152897  #define usb_audio_warn(chip, fmt, args...) \
152898 diff --git a/tools/arch/x86/include/asm/unistd_64.h b/tools/arch/x86/include/asm/unistd_64.h
152899 index 4205ed4158bf..b65c51e8d675 100644
152900 --- a/tools/arch/x86/include/asm/unistd_64.h
152901 +++ b/tools/arch/x86/include/asm/unistd_64.h
152902 @@ -17,3 +17,15 @@
152903  #ifndef __NR_setns
152904  #define __NR_setns 308
152905  #endif
152907 +#ifndef __NR_futex_wait
152908 +# define __NR_futex_wait 443
152909 +#endif
152911 +#ifndef __NR_futex_wake
152912 +# define __NR_futex_wake 444
152913 +#endif
152915 +#ifndef __NR_futex_requeue
152916 +# define __NR_futex_requeue 446
152917 +#endif
152918 diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
152919 index fe9e7b3a4b50..1326fff3629b 100644
152920 --- a/tools/bpf/bpftool/btf.c
152921 +++ b/tools/bpf/bpftool/btf.c
152922 @@ -538,6 +538,7 @@ static int do_dump(int argc, char **argv)
152923                         NEXT_ARG();
152924                         if (argc < 1) {
152925                                 p_err("expecting value for 'format' option\n");
152926 +                               err = -EINVAL;
152927                                 goto done;
152928                         }
152929                         if (strcmp(*argv, "c") == 0) {
152930 @@ -547,11 +548,13 @@ static int do_dump(int argc, char **argv)
152931                         } else {
152932                                 p_err("unrecognized format specifier: '%s', possible values: raw, c",
152933                                       *argv);
152934 +                               err = -EINVAL;
152935                                 goto done;
152936                         }
152937                         NEXT_ARG();
152938                 } else {
152939                         p_err("unrecognized option: '%s'", *argv);
152940 +                       err = -EINVAL;
152941                         goto done;
152942                 }
152943         }
152944 diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
152945 index b86f450e6fce..d9afb730136a 100644
152946 --- a/tools/bpf/bpftool/main.c
152947 +++ b/tools/bpf/bpftool/main.c
152948 @@ -276,7 +276,7 @@ static int do_batch(int argc, char **argv)
152949         int n_argc;
152950         FILE *fp;
152951         char *cp;
152952 -       int err;
152953 +       int err = 0;
152954         int i;
152956         if (argc < 2) {
152957 @@ -370,7 +370,6 @@ static int do_batch(int argc, char **argv)
152958         } else {
152959                 if (!json_output)
152960                         printf("processed %d commands\n", lines);
152961 -               err = 0;
152962         }
152963  err_close:
152964         if (fp != stdin)
152965 diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
152966 index b400364ee054..09ae0381205b 100644
152967 --- a/tools/bpf/bpftool/map.c
152968 +++ b/tools/bpf/bpftool/map.c
152969 @@ -100,7 +100,7 @@ static int do_dump_btf(const struct btf_dumper *d,
152970                        void *value)
152972         __u32 value_id;
152973 -       int ret;
152974 +       int ret = 0;
152976         /* start of key-value pair */
152977         jsonw_start_object(d->jw);
152978 diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
152979 index ce58cff99b66..2a6adca37fe9 100644
152980 --- a/tools/include/uapi/asm-generic/unistd.h
152981 +++ b/tools/include/uapi/asm-generic/unistd.h
152982 @@ -864,8 +864,17 @@ __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
152983  #define __NR_mount_setattr 442
152984  __SYSCALL(__NR_mount_setattr, sys_mount_setattr)
152986 +#define __NR_futex_wait 443
152987 +__SYSCALL(__NR_futex_wait, sys_futex_wait)
152989 +#define __NR_futex_wake 444
152990 +__SYSCALL(__NR_futex_wake, sys_futex_wake)
152992 +#define __NR_futex_waitv 445
152993 +__SC_COMP(__NR_futex_waitv, sys_futex_waitv, compat_sys_futex_waitv)
152995  #undef __NR_syscalls
152996 -#define __NR_syscalls 443
152997 +#define __NR_syscalls 446
153000   * 32 bit systems traditionally used different
153001 diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
153002 index 53b3e199fb25..09ebe3db5f2f 100644
153003 --- a/tools/lib/bpf/bpf_core_read.h
153004 +++ b/tools/lib/bpf/bpf_core_read.h
153005 @@ -88,11 +88,19 @@ enum bpf_enum_value_kind {
153006         const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
153007         unsigned long long val;                                               \
153008                                                                               \
153009 +       /* This is a so-called barrier_var() operation that makes specified   \
153010 +        * variable "a black box" for optimizing compiler.                    \
153011 +        * It forces compiler to perform BYTE_OFFSET relocation on p and use  \
153012 +        * its calculated value in the switch below, instead of applying      \
153013 +        * the same relocation 4 times for each individual memory load.       \
153014 +        */                                                                   \
153015 +       asm volatile("" : "=r"(p) : "0"(p));                                  \
153016 +                                                                             \
153017         switch (__CORE_RELO(s, field, BYTE_SIZE)) {                           \
153018 -       case 1: val = *(const unsigned char *)p;                              \
153019 -       case 2: val = *(const unsigned short *)p;                             \
153020 -       case 4: val = *(const unsigned int *)p;                               \
153021 -       case 8: val = *(const unsigned long long *)p;                         \
153022 +       case 1: val = *(const unsigned char *)p; break;                       \
153023 +       case 2: val = *(const unsigned short *)p; break;                      \
153024 +       case 4: val = *(const unsigned int *)p; break;                        \
153025 +       case 8: val = *(const unsigned long long *)p; break;                  \
153026         }                                                                     \
153027         val <<= __CORE_RELO(s, field, LSHIFT_U64);                            \
153028         if (__CORE_RELO(s, field, SIGNED))                                    \
153029 diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
153030 index f9ef37707888..1c2e91ee041d 100644
153031 --- a/tools/lib/bpf/bpf_tracing.h
153032 +++ b/tools/lib/bpf/bpf_tracing.h
153033 @@ -413,20 +413,38 @@ typeof(name(0)) name(struct pt_regs *ctx)                             \
153034  }                                                                          \
153035  static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
153037 +#define ___bpf_fill0(arr, p, x) do {} while (0)
153038 +#define ___bpf_fill1(arr, p, x) arr[p] = x
153039 +#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
153040 +#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
153041 +#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
153042 +#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
153043 +#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
153044 +#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
153045 +#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
153046 +#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
153047 +#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
153048 +#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
153049 +#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
153050 +#define ___bpf_fill(arr, args...) \
153051 +       ___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
153054   * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
153055   * in a structure.
153056   */
153057 -#define BPF_SEQ_PRINTF(seq, fmt, args...)                                  \
153058 -       ({                                                                  \
153059 -               _Pragma("GCC diagnostic push")                              \
153060 -               _Pragma("GCC diagnostic ignored \"-Wint-conversion\"")      \
153061 -               static const char ___fmt[] = fmt;                           \
153062 -               unsigned long long ___param[] = { args };                   \
153063 -               _Pragma("GCC diagnostic pop")                               \
153064 -               int ___ret = bpf_seq_printf(seq, ___fmt, sizeof(___fmt),    \
153065 -                                           ___param, sizeof(___param));    \
153066 -               ___ret;                                                     \
153067 -       })
153068 +#define BPF_SEQ_PRINTF(seq, fmt, args...)                      \
153069 +({                                                             \
153070 +       static const char ___fmt[] = fmt;                       \
153071 +       unsigned long long ___param[___bpf_narg(args)];         \
153072 +                                                               \
153073 +       _Pragma("GCC diagnostic push")                          \
153074 +       _Pragma("GCC diagnostic ignored \"-Wint-conversion\"")  \
153075 +       ___bpf_fill(___param, args);                            \
153076 +       _Pragma("GCC diagnostic pop")                           \
153077 +                                                               \
153078 +       bpf_seq_printf(seq, ___fmt, sizeof(___fmt),             \
153079 +                      ___param, sizeof(___param));             \
153082  #endif
153083 diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
153084 index 1237bcd1dd17..5b8a6ea44b38 100644
153085 --- a/tools/lib/bpf/btf.h
153086 +++ b/tools/lib/bpf/btf.h
153087 @@ -173,6 +173,7 @@ struct btf_dump_emit_type_decl_opts {
153088         int indent_level;
153089         /* strip all the const/volatile/restrict mods */
153090         bool strip_mods;
153091 +       size_t :0;
153093  #define btf_dump_emit_type_decl_opts__last_field strip_mods
153095 diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
153096 index 3c35eb401931..3d690d4e785c 100644
153097 --- a/tools/lib/bpf/libbpf.h
153098 +++ b/tools/lib/bpf/libbpf.h
153099 @@ -507,6 +507,7 @@ struct xdp_link_info {
153100  struct bpf_xdp_set_link_opts {
153101         size_t sz;
153102         int old_fd;
153103 +       size_t :0;
153105  #define bpf_xdp_set_link_opts__last_field old_fd
153107 diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
153108 index e7a8d847161f..1d80ad4e0de8 100644
153109 --- a/tools/lib/bpf/ringbuf.c
153110 +++ b/tools/lib/bpf/ringbuf.c
153111 @@ -202,9 +202,11 @@ static inline int roundup_len(__u32 len)
153112         return (len + 7) / 8 * 8;
153115 -static int ringbuf_process_ring(struct ring* r)
153116 +static int64_t ringbuf_process_ring(struct ring* r)
153118 -       int *len_ptr, len, err, cnt = 0;
153119 +       int *len_ptr, len, err;
153120 +       /* 64-bit to avoid overflow in case of extreme application behavior */
153121 +       int64_t cnt = 0;
153122         unsigned long cons_pos, prod_pos;
153123         bool got_new_data;
153124         void *sample;
153125 @@ -244,12 +246,14 @@ static int ringbuf_process_ring(struct ring* r)
153128  /* Consume available ring buffer(s) data without event polling.
153129 - * Returns number of records consumed across all registered ring buffers, or
153130 - * negative number if any of the callbacks return error.
153131 + * Returns number of records consumed across all registered ring buffers (or
153132 + * INT_MAX, whichever is less), or negative number if any of the callbacks
153133 + * return error.
153134   */
153135  int ring_buffer__consume(struct ring_buffer *rb)
153137 -       int i, err, res = 0;
153138 +       int64_t err, res = 0;
153139 +       int i;
153141         for (i = 0; i < rb->ring_cnt; i++) {
153142                 struct ring *ring = &rb->rings[i];
153143 @@ -259,18 +263,24 @@ int ring_buffer__consume(struct ring_buffer *rb)
153144                         return err;
153145                 res += err;
153146         }
153147 +       if (res > INT_MAX)
153148 +               return INT_MAX;
153149         return res;
153152  /* Poll for available data and consume records, if any are available.
153153 - * Returns number of records consumed, or negative number, if any of the
153154 - * registered callbacks returned error.
153155 + * Returns number of records consumed (or INT_MAX, whichever is less), or
153156 + * negative number, if any of the registered callbacks returned error.
153157   */
153158  int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
153160 -       int i, cnt, err, res = 0;
153161 +       int i, cnt;
153162 +       int64_t err, res = 0;
153164         cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms);
153165 +       if (cnt < 0)
153166 +               return -errno;
153168         for (i = 0; i < cnt; i++) {
153169                 __u32 ring_id = rb->events[i].data.fd;
153170                 struct ring *ring = &rb->rings[ring_id];
153171 @@ -280,7 +290,9 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
153172                         return err;
153173                 res += err;
153174         }
153175 -       return cnt < 0 ? -errno : res;
153176 +       if (res > INT_MAX)
153177 +               return INT_MAX;
153178 +       return res;
153181  /* Get an fd that can be used to sleep until data is available in the ring(s) */
153182 diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
153183 index d82054225fcc..4d0c02ba3f7d 100644
153184 --- a/tools/lib/perf/include/perf/event.h
153185 +++ b/tools/lib/perf/include/perf/event.h
153186 @@ -8,6 +8,8 @@
153187  #include <linux/bpf.h>
153188  #include <sys/types.h> /* pid_t */
153190 +#define event_contains(obj, mem) ((obj).header.size > offsetof(typeof(obj), mem))
153192  struct perf_record_mmap {
153193         struct perf_event_header header;
153194         __u32                    pid, tid;
153195 @@ -346,8 +348,9 @@ struct perf_record_time_conv {
153196         __u64                    time_zero;
153197         __u64                    time_cycles;
153198         __u64                    time_mask;
153199 -       bool                     cap_user_time_zero;
153200 -       bool                     cap_user_time_short;
153201 +       __u8                     cap_user_time_zero;
153202 +       __u8                     cap_user_time_short;
153203 +       __u8                     reserved[6];   /* For alignment */
153206  struct perf_record_header_feature {
153207 diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
153208 index d8e59d31399a..c955cd683e22 100644
153209 --- a/tools/perf/Makefile.config
153210 +++ b/tools/perf/Makefile.config
153211 @@ -530,6 +530,7 @@ ifndef NO_LIBELF
153212        ifdef LIBBPF_DYNAMIC
153213          ifeq ($(feature-libbpf), 1)
153214            EXTLIBS += -lbpf
153215 +          $(call detected,CONFIG_LIBBPF_DYNAMIC)
153216          else
153217            dummy := $(error Error: No libbpf devel library found, please install libbpf-devel);
153218          endif
153219 diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
153220 index 7bf01cbe582f..86d1b0fae558 100644
153221 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
153222 +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
153223 @@ -364,6 +364,10 @@
153224  440    common  process_madvise         sys_process_madvise
153225  441    common  epoll_pwait2            sys_epoll_pwait2
153226  442    common  mount_setattr           sys_mount_setattr
153227 +443    common  futex_wait              sys_futex_wait
153228 +444    common  futex_wake              sys_futex_wake
153229 +445    common  futex_waitv             sys_futex_waitv
153230 +446    common  futex_requeue           sys_futex_requeue
153233  # Due to a historical design error, certain syscalls are numbered differently
153234 diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
153235 index eac36afab2b3..12346844b354 100644
153236 --- a/tools/perf/bench/bench.h
153237 +++ b/tools/perf/bench/bench.h
153238 @@ -38,9 +38,13 @@ int bench_mem_memcpy(int argc, const char **argv);
153239  int bench_mem_memset(int argc, const char **argv);
153240  int bench_mem_find_bit(int argc, const char **argv);
153241  int bench_futex_hash(int argc, const char **argv);
153242 +int bench_futex2_hash(int argc, const char **argv);
153243  int bench_futex_wake(int argc, const char **argv);
153244 +int bench_futex2_wake(int argc, const char **argv);
153245  int bench_futex_wake_parallel(int argc, const char **argv);
153246 +int bench_futex2_wake_parallel(int argc, const char **argv);
153247  int bench_futex_requeue(int argc, const char **argv);
153248 +int bench_futex2_requeue(int argc, const char **argv);
153249  /* pi futexes */
153250  int bench_futex_lock_pi(int argc, const char **argv);
153251  int bench_epoll_wait(int argc, const char **argv);
153252 diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
153253 index b65373ce5c4f..1068749af40c 100644
153254 --- a/tools/perf/bench/futex-hash.c
153255 +++ b/tools/perf/bench/futex-hash.c
153256 @@ -33,7 +33,7 @@ static unsigned int nthreads = 0;
153257  static unsigned int nsecs    = 10;
153258  /* amount of futexes per thread */
153259  static unsigned int nfutexes = 1024;
153260 -static bool fshared = false, done = false, silent = false;
153261 +static bool fshared = false, done = false, silent = false, futex2 = false;
153262  static int futex_flag = 0;
153264  struct timeval bench__start, bench__end, bench__runtime;
153265 @@ -85,7 +85,10 @@ static void *workerfn(void *arg)
153266                          * such as internal waitqueue handling, thus enlarging
153267                          * the critical region protected by hb->lock.
153268                          */
153269 -                       ret = futex_wait(&w->futex[i], 1234, NULL, futex_flag);
153270 +                       if (!futex2)
153271 +                               ret = futex_wait(&w->futex[i], 1234, NULL, futex_flag);
153272 +                       else
153273 +                               ret = futex2_wait(&w->futex[i], 1234, futex_flag, NULL);
153274                         if (!silent &&
153275                             (!ret || errno != EAGAIN || errno != EWOULDBLOCK))
153276                                 warn("Non-expected futex return call");
153277 @@ -116,7 +119,7 @@ static void print_summary(void)
153278                (int)bench__runtime.tv_sec);
153281 -int bench_futex_hash(int argc, const char **argv)
153282 +static int __bench_futex_hash(int argc, const char **argv)
153284         int ret = 0;
153285         cpu_set_t cpuset;
153286 @@ -148,7 +151,9 @@ int bench_futex_hash(int argc, const char **argv)
153287         if (!worker)
153288                 goto errmem;
153290 -       if (!fshared)
153291 +       if (futex2)
153292 +               futex_flag = FUTEX_32 | (fshared * FUTEX_SHARED_FLAG);
153293 +       else if (!fshared)
153294                 futex_flag = FUTEX_PRIVATE_FLAG;
153296         printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n",
153297 @@ -228,3 +233,14 @@ int bench_futex_hash(int argc, const char **argv)
153298  errmem:
153299         err(EXIT_FAILURE, "calloc");
153302 +int bench_futex_hash(int argc, const char **argv)
153304 +       return __bench_futex_hash(argc, argv);
153307 +int bench_futex2_hash(int argc, const char **argv)
153309 +       futex2 = true;
153310 +       return __bench_futex_hash(argc, argv);
153312 diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
153313 index 5fa23295ee5f..6cdd649b54f4 100644
153314 --- a/tools/perf/bench/futex-requeue.c
153315 +++ b/tools/perf/bench/futex-requeue.c
153316 @@ -2,8 +2,8 @@
153318   * Copyright (C) 2013  Davidlohr Bueso <davidlohr@hp.com>
153319   *
153320 - * futex-requeue: Block a bunch of threads on futex1 and requeue them
153321 - *                on futex2, N at a time.
153322 + * futex-requeue: Block a bunch of threads on addr1 and requeue them
153323 + *                on addr2, N at a time.
153324   *
153325   * This program is particularly useful to measure the latency of nthread
153326   * requeues without waking up any tasks -- thus mimicking a regular futex_wait.
153327 @@ -28,7 +28,10 @@
153328  #include <stdlib.h>
153329  #include <sys/time.h>
153331 -static u_int32_t futex1 = 0, futex2 = 0;
153332 +static u_int32_t addr1 = 0, addr2 = 0;
153334 +static struct futex_requeue rq1 = { .uaddr = &addr1, .flags = FUTEX_32 };
153335 +static struct futex_requeue rq2 = { .uaddr = &addr2, .flags = FUTEX_32 };
153338   * How many tasks to requeue at a time.
153339 @@ -37,7 +40,7 @@ static u_int32_t futex1 = 0, futex2 = 0;
153340  static unsigned int nrequeue = 1;
153342  static pthread_t *worker;
153343 -static bool done = false, silent = false, fshared = false;
153344 +static bool done = false, silent = false, fshared = false, futex2 = false;
153345  static pthread_mutex_t thread_lock;
153346  static pthread_cond_t thread_parent, thread_worker;
153347  static struct stats requeuetime_stats, requeued_stats;
153348 @@ -79,7 +82,11 @@ static void *workerfn(void *arg __maybe_unused)
153349         pthread_cond_wait(&thread_worker, &thread_lock);
153350         pthread_mutex_unlock(&thread_lock);
153352 -       futex_wait(&futex1, 0, NULL, futex_flag);
153353 +       if (!futex2)
153354 +               futex_wait(&addr1, 0, NULL, futex_flag);
153355 +       else
153356 +               futex2_wait(&addr1, 0, futex_flag, NULL);
153358         return NULL;
153361 @@ -111,7 +118,7 @@ static void toggle_done(int sig __maybe_unused,
153362         done = true;
153365 -int bench_futex_requeue(int argc, const char **argv)
153366 +static int __bench_futex_requeue(int argc, const char **argv)
153368         int ret = 0;
153369         unsigned int i, j;
153370 @@ -139,15 +146,20 @@ int bench_futex_requeue(int argc, const char **argv)
153371         if (!worker)
153372                 err(EXIT_FAILURE, "calloc");
153374 -       if (!fshared)
153375 +       if (futex2) {
153376 +               futex_flag = FUTEX_32 | (fshared * FUTEX_SHARED_FLAG);
153377 +               rq1.flags |= FUTEX_SHARED_FLAG * fshared;
153378 +               rq2.flags |= FUTEX_SHARED_FLAG * fshared;
153379 +       } else if (!fshared) {
153380                 futex_flag = FUTEX_PRIVATE_FLAG;
153381 +       }
153383         if (nrequeue > nthreads)
153384                 nrequeue = nthreads;
153386         printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %p), "
153387                "%d at a time.\n\n",  getpid(), nthreads,
153388 -              fshared ? "shared":"private", &futex1, &futex2, nrequeue);
153389 +              fshared ? "shared":"private", &addr1, &addr2, nrequeue);
153391         init_stats(&requeued_stats);
153392         init_stats(&requeuetime_stats);
153393 @@ -176,11 +188,15 @@ int bench_futex_requeue(int argc, const char **argv)
153394                 gettimeofday(&start, NULL);
153395                 while (nrequeued < nthreads) {
153396                         /*
153397 -                        * Do not wakeup any tasks blocked on futex1, allowing
153398 +                        * Do not wakeup any tasks blocked on addr1, allowing
153399                          * us to really measure futex_wait functionality.
153400                          */
153401 -                       nrequeued += futex_cmp_requeue(&futex1, 0, &futex2, 0,
153402 -                                                      nrequeue, futex_flag);
153403 +                       if (!futex2)
153404 +                               nrequeued += futex_cmp_requeue(&addr1, 0, &addr2,
153405 +                                                       0, nrequeue, futex_flag);
153406 +                       else
153407 +                               nrequeued += futex2_requeue(&rq1, &rq2,
153408 +                                                       0, nrequeue, 0, 0);
153409                 }
153411                 gettimeofday(&end, NULL);
153412 @@ -194,8 +210,12 @@ int bench_futex_requeue(int argc, const char **argv)
153413                                j + 1, nrequeued, nthreads, runtime.tv_usec / (double)USEC_PER_MSEC);
153414                 }
153416 -               /* everybody should be blocked on futex2, wake'em up */
153417 -               nrequeued = futex_wake(&futex2, nrequeued, futex_flag);
153418 +               /* everybody should be blocked on addr2, wake'em up */
153419 +               if (!futex2)
153420 +                       nrequeued = futex_wake(&addr2, nrequeued, futex_flag);
153421 +               else
153422 +                       nrequeued = futex2_wake(&addr2, nrequeued, futex_flag);
153424                 if (nthreads != nrequeued)
153425                         warnx("couldn't wakeup all tasks (%d/%d)", nrequeued, nthreads);
153427 @@ -220,3 +240,14 @@ int bench_futex_requeue(int argc, const char **argv)
153428         usage_with_options(bench_futex_requeue_usage, options);
153429         exit(EXIT_FAILURE);
153432 +int bench_futex_requeue(int argc, const char **argv)
153434 +       return __bench_futex_requeue(argc, argv);
153437 +int bench_futex2_requeue(int argc, const char **argv)
153439 +       futex2 = true;
153440 +       return __bench_futex_requeue(argc, argv);
153442 diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c
153443 index 6e6f5247e1fe..cac90fc0bfb3 100644
153444 --- a/tools/perf/bench/futex-wake-parallel.c
153445 +++ b/tools/perf/bench/futex-wake-parallel.c
153446 @@ -17,6 +17,12 @@ int bench_futex_wake_parallel(int argc __maybe_unused, const char **argv __maybe
153447         pr_err("%s: pthread_barrier_t unavailable, disabling this test...\n", __func__);
153448         return 0;
153451 +int bench_futex2_wake_parallel(int argc __maybe_unused, const char **argv __maybe_unused)
153453 +       pr_err("%s: pthread_barrier_t unavailable, disabling this test...\n", __func__);
153454 +       return 0;
153456  #else /* HAVE_PTHREAD_BARRIER */
153457  /* For the CLR_() macros */
153458  #include <string.h>
153459 @@ -47,7 +53,7 @@ static unsigned int nwakes = 1;
153460  static u_int32_t futex = 0;
153462  static pthread_t *blocked_worker;
153463 -static bool done = false, silent = false, fshared = false;
153464 +static bool done = false, silent = false, fshared = false, futex2 = false;
153465  static unsigned int nblocked_threads = 0, nwaking_threads = 0;
153466  static pthread_mutex_t thread_lock;
153467  static pthread_cond_t thread_parent, thread_worker;
153468 @@ -78,7 +84,11 @@ static void *waking_workerfn(void *arg)
153470         gettimeofday(&start, NULL);
153472 -       waker->nwoken = futex_wake(&futex, nwakes, futex_flag);
153473 +       if (!futex2)
153474 +               waker->nwoken = futex_wake(&futex, nwakes, futex_flag);
153475 +       else
153476 +               waker->nwoken = futex2_wake(&futex, nwakes, futex_flag);
153478         if (waker->nwoken != nwakes)
153479                 warnx("couldn't wakeup all tasks (%d/%d)",
153480                       waker->nwoken, nwakes);
153481 @@ -129,8 +139,13 @@ static void *blocked_workerfn(void *arg __maybe_unused)
153482         pthread_mutex_unlock(&thread_lock);
153484         while (1) { /* handle spurious wakeups */
153485 -               if (futex_wait(&futex, 0, NULL, futex_flag) != EINTR)
153486 -                       break;
153487 +               if (!futex2) {
153488 +                       if (futex_wait(&futex, 0, NULL, futex_flag) != EINTR)
153489 +                               break;
153490 +               } else {
153491 +                       if (futex2_wait(&futex, 0, futex_flag, NULL) != EINTR)
153492 +                               break;
153493 +               }
153494         }
153496         pthread_exit(NULL);
153497 @@ -217,7 +232,7 @@ static void toggle_done(int sig __maybe_unused,
153498         done = true;
153501 -int bench_futex_wake_parallel(int argc, const char **argv)
153502 +static int __bench_futex_wake_parallel(int argc, const char **argv)
153504         int ret = 0;
153505         unsigned int i, j;
153506 @@ -261,7 +276,9 @@ int bench_futex_wake_parallel(int argc, const char **argv)
153507         if (!blocked_worker)
153508                 err(EXIT_FAILURE, "calloc");
153510 -       if (!fshared)
153511 +       if (futex2)
153512 +               futex_flag = FUTEX_32 | (fshared * FUTEX_SHARED_FLAG);
153513 +       else if (!fshared)
153514                 futex_flag = FUTEX_PRIVATE_FLAG;
153516         printf("Run summary [PID %d]: blocking on %d threads (at [%s] "
153517 @@ -321,4 +338,16 @@ int bench_futex_wake_parallel(int argc, const char **argv)
153518         free(blocked_worker);
153519         return ret;
153522 +int bench_futex_wake_parallel(int argc, const char **argv)
153524 +       return __bench_futex_wake_parallel(argc, argv);
153527 +int bench_futex2_wake_parallel(int argc, const char **argv)
153529 +       futex2 = true;
153530 +       return __bench_futex_wake_parallel(argc, argv);
153533  #endif /* HAVE_PTHREAD_BARRIER */
153534 diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
153535 index 6d217868f53c..546d2818eed8 100644
153536 --- a/tools/perf/bench/futex-wake.c
153537 +++ b/tools/perf/bench/futex-wake.c
153538 @@ -38,7 +38,7 @@ static u_int32_t futex1 = 0;
153539  static unsigned int nwakes = 1;
153541  pthread_t *worker;
153542 -static bool done = false, silent = false, fshared = false;
153543 +static bool done = false, silent = false, fshared = false, futex2 = false;
153544  static pthread_mutex_t thread_lock;
153545  static pthread_cond_t thread_parent, thread_worker;
153546  static struct stats waketime_stats, wakeup_stats;
153547 @@ -68,8 +68,13 @@ static void *workerfn(void *arg __maybe_unused)
153548         pthread_mutex_unlock(&thread_lock);
153550         while (1) {
153551 -               if (futex_wait(&futex1, 0, NULL, futex_flag) != EINTR)
153552 -                       break;
153553 +               if (!futex2) {
153554 +                       if (futex_wait(&futex1, 0, NULL, futex_flag) != EINTR)
153555 +                               break;
153556 +               } else {
153557 +                       if (futex2_wait(&futex1, 0, futex_flag, NULL) != EINTR)
153558 +                               break;
153559 +               }
153560         }
153562         pthread_exit(NULL);
153563 @@ -117,7 +122,7 @@ static void toggle_done(int sig __maybe_unused,
153564         done = true;
153567 -int bench_futex_wake(int argc, const char **argv)
153568 +static int __bench_futex_wake(int argc, const char **argv)
153570         int ret = 0;
153571         unsigned int i, j;
153572 @@ -147,7 +152,9 @@ int bench_futex_wake(int argc, const char **argv)
153573         if (!worker)
153574                 err(EXIT_FAILURE, "calloc");
153576 -       if (!fshared)
153577 +       if (futex2)
153578 +               futex_flag = FUTEX_32 | (fshared * FUTEX_SHARED_FLAG);
153579 +       else if (!fshared)
153580                 futex_flag = FUTEX_PRIVATE_FLAG;
153582         printf("Run summary [PID %d]: blocking on %d threads (at [%s] futex %p), "
153583 @@ -179,9 +186,14 @@ int bench_futex_wake(int argc, const char **argv)
153585                 /* Ok, all threads are patiently blocked, start waking folks up */
153586                 gettimeofday(&start, NULL);
153587 -               while (nwoken != nthreads)
153588 -                       nwoken += futex_wake(&futex1, nwakes, futex_flag);
153589 +               while (nwoken != nthreads) {
153590 +                       if (!futex2)
153591 +                               nwoken += futex_wake(&futex1, nwakes, futex_flag);
153592 +                       else
153593 +                               nwoken += futex2_wake(&futex1, nwakes, futex_flag);
153594 +               }
153595                 gettimeofday(&end, NULL);
153597                 timersub(&end, &start, &runtime);
153599                 update_stats(&wakeup_stats, nwoken);
153600 @@ -211,3 +223,14 @@ int bench_futex_wake(int argc, const char **argv)
153601         free(worker);
153602         return ret;
153605 +int bench_futex_wake(int argc, const char **argv)
153607 +       return __bench_futex_wake(argc, argv);
153610 +int bench_futex2_wake(int argc, const char **argv)
153612 +       futex2 = true;
153613 +       return __bench_futex_wake(argc, argv);
153615 diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h
153616 index 31b53cc7d5bc..6b2213cf3f64 100644
153617 --- a/tools/perf/bench/futex.h
153618 +++ b/tools/perf/bench/futex.h
153619 @@ -86,4 +86,51 @@ futex_cmp_requeue(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2, int nr_wak
153620         return futex(uaddr, FUTEX_CMP_REQUEUE, nr_wake, nr_requeue, uaddr2,
153621                  val, opflags);
153625 + * futex2_wait - Wait at uaddr if *uaddr == val, until timo.
153626 + * @uaddr: User address to wait for
153627 + * @val:   Expected value at uaddr
153628 + * @flags: Operation options
153629 + * @timo:  Optional timeout
153631 + * Return: 0 on success, error code otherwise
153632 + */
153633 +static inline int futex2_wait(volatile void *uaddr, unsigned long val,
153634 +                             unsigned long flags, struct timespec *timo)
153636 +       return syscall(__NR_futex_wait, uaddr, val, flags, timo);
153640 + * futex2_wake - Wake a number of waiters waiting at uaddr
153641 + * @uaddr: Address to wake
153642 + * @nr:    Number of waiters to wake
153643 + * @flags: Operation options
153645 + * Return: number of waked futexes
153646 + */
153647 +static inline int futex2_wake(volatile void *uaddr, unsigned int nr, unsigned long flags)
153649 +       return syscall(__NR_futex_wake, uaddr, nr, flags);
153653 + * futex2_requeue - Requeue waiters from an address to another one
153654 + * @uaddr1:     Address where waiters are currently waiting on
153655 + * @uaddr2:     New address to wait
153656 + * @nr_wake:    Number of waiters at uaddr1 to be wake
153657 + * @nr_requeue: After waking nr_wake, number of waiters to be requeued
153658 + * @cmpval:     Expected value at uaddr1
153659 + * @flags: Operation options
153661 + * Return: waked futexes + requeued futexes at uaddr1
153662 + */
153663 +static inline int futex2_requeue(volatile struct futex_requeue *uaddr1,
153664 +                                volatile struct futex_requeue *uaddr2,
153665 +                                unsigned int nr_wake, unsigned int nr_requeue,
153666 +                                unsigned int cmpval, unsigned long flags)
153668 +       return syscall(__NR_futex_requeue, uaddr1, uaddr2, nr_wake, nr_requeue, cmpval, flags);
153670  #endif /* _FUTEX_H */
153671 diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
153672 index 62a7b7420a44..e41a95ad2db6 100644
153673 --- a/tools/perf/builtin-bench.c
153674 +++ b/tools/perf/builtin-bench.c
153675 @@ -12,10 +12,11 @@
153676   *
153677   *  sched ... scheduler and IPC performance
153678   *  syscall ... System call performance
153679 - *  mem   ... memory access performance
153680 - *  numa  ... NUMA scheduling and MM performance
153681 - *  futex ... Futex performance
153682 - *  epoll ... Event poll performance
153683 + *  mem    ... memory access performance
153684 + *  numa   ... NUMA scheduling and MM performance
153685 + *  futex  ... Futex performance
153686 + *  futex2 ... Futex2 performance
153687 + *  epoll  ... Event poll performance
153688   */
153689  #include <subcmd/parse-options.h>
153690  #include "builtin.h"
153691 @@ -75,6 +76,14 @@ static struct bench futex_benchmarks[] = {
153692         { NULL,         NULL,                                           NULL                    }
153695 +static struct bench futex2_benchmarks[] = {
153696 +       { "hash",          "Benchmark for futex2 hash table",            bench_futex2_hash      },
153697 +       { "wake",          "Benchmark for futex2 wake calls",            bench_futex2_wake      },
153698 +       { "wake-parallel", "Benchmark for parallel futex2 wake calls",   bench_futex2_wake_parallel },
153699 +       { "requeue",       "Benchmark for futex2 requeue calls",         bench_futex2_requeue   },
153700 +       { NULL,         NULL,                                           NULL                    }
153703  #ifdef HAVE_EVENTFD_SUPPORT
153704  static struct bench epoll_benchmarks[] = {
153705         { "wait",       "Benchmark epoll concurrent epoll_waits",       bench_epoll_wait        },
153706 @@ -105,6 +114,7 @@ static struct collection collections[] = {
153707         { "numa",       "NUMA scheduling and MM benchmarks",            numa_benchmarks         },
153708  #endif
153709         {"futex",       "Futex stressing benchmarks",                   futex_benchmarks        },
153710 +       {"futex2",      "Futex2 stressing benchmarks",                  futex2_benchmarks        },
153711  #ifdef HAVE_EVENTFD_SUPPORT
153712         {"epoll",       "Epoll stressing benchmarks",                   epoll_benchmarks        },
153713  #endif
153714 diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
153715 index 4ea7ec4f496e..008f1683e540 100644
153716 --- a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
153717 +++ b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
153718 @@ -275,7 +275,7 @@
153719    {
153720      "EventName": "l2_pf_hit_l2",
153721      "EventCode": "0x70",
153722 -    "BriefDescription": "L2 prefetch hit in L2.",
153723 +    "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
153724      "UMask": "0xff"
153725    },
153726    {
153727 diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
153728 index 2cfe2d2f3bfd..3c954543d1ae 100644
153729 --- a/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
153730 +++ b/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
153731 @@ -79,10 +79,10 @@
153732      "UMask": "0x70"
153733    },
153734    {
153735 -    "MetricName": "l2_cache_hits_from_l2_hwpf",
153736 +    "EventName": "l2_cache_hits_from_l2_hwpf",
153737 +    "EventCode": "0x70",
153738      "BriefDescription": "L2 Cache Hits from L2 HWPF",
153739 -    "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
153740 -    "MetricGroup": "l2_cache"
153741 +    "UMask": "0xff"
153742    },
153743    {
153744      "EventName": "l3_accesses",
153745 diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
153746 index f61b982f83ca..8ba84a48188d 100644
153747 --- a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
153748 +++ b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
153749 @@ -205,7 +205,7 @@
153750    {
153751      "EventName": "l2_pf_hit_l2",
153752      "EventCode": "0x70",
153753 -    "BriefDescription": "L2 prefetch hit in L2.",
153754 +    "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
153755      "UMask": "0xff"
153756    },
153757    {
153758 diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
153759 index 2ef91e25e661..1c624cee9ef4 100644
153760 --- a/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
153761 +++ b/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
153762 @@ -79,10 +79,10 @@
153763      "UMask": "0x70"
153764    },
153765    {
153766 -    "MetricName": "l2_cache_hits_from_l2_hwpf",
153767 +    "EventName": "l2_cache_hits_from_l2_hwpf",
153768 +    "EventCode": "0x70",
153769      "BriefDescription": "L2 Cache Hits from L2 HWPF",
153770 -    "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
153771 -    "MetricGroup": "l2_cache"
153772 +    "UMask": "0xff"
153773    },
153774    {
153775      "EventName": "l3_accesses",
153776 diff --git a/tools/perf/trace/beauty/fsconfig.sh b/tools/perf/trace/beauty/fsconfig.sh
153777 index 83fb24df05c9..bc6ef7bb7a5f 100755
153778 --- a/tools/perf/trace/beauty/fsconfig.sh
153779 +++ b/tools/perf/trace/beauty/fsconfig.sh
153780 @@ -10,8 +10,7 @@ fi
153781  linux_mount=${linux_header_dir}/mount.h
153783  printf "static const char *fsconfig_cmds[] = {\n"
153784 -regex='^[[:space:]]*+FSCONFIG_([[:alnum:]_]+)[[:space:]]*=[[:space:]]*([[:digit:]]+)[[:space:]]*,[[:space:]]*.*'
153785 -egrep $regex ${linux_mount} | \
153786 -       sed -r "s/$regex/\2 \1/g"       | \
153787 -       xargs printf "\t[%s] = \"%s\",\n"
153788 +ms='[[:space:]]*'
153789 +sed -nr "s/^${ms}FSCONFIG_([[:alnum:]_]+)${ms}=${ms}([[:digit:]]+)${ms},.*/\t[\2] = \"\1\",/p" \
153790 +       ${linux_mount}
153791  printf "};\n"
153792 diff --git a/tools/perf/util/Build b/tools/perf/util/Build
153793 index e3e12f9d4733..5a296ac69415 100644
153794 --- a/tools/perf/util/Build
153795 +++ b/tools/perf/util/Build
153796 @@ -141,7 +141,14 @@ perf-$(CONFIG_LIBELF) += symbol-elf.o
153797  perf-$(CONFIG_LIBELF) += probe-file.o
153798  perf-$(CONFIG_LIBELF) += probe-event.o
153800 +ifdef CONFIG_LIBBPF_DYNAMIC
153801 +  hashmap := 1
153802 +endif
153803  ifndef CONFIG_LIBBPF
153804 +  hashmap := 1
153805 +endif
153807 +ifdef hashmap
153808  perf-y += hashmap.o
153809  endif
153811 diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
153812 index 9760d8e7b386..917a9c707371 100644
153813 --- a/tools/perf/util/jitdump.c
153814 +++ b/tools/perf/util/jitdump.c
153815 @@ -396,21 +396,31 @@ static pid_t jr_entry_tid(struct jit_buf_desc *jd, union jr_entry *jr)
153817  static uint64_t convert_timestamp(struct jit_buf_desc *jd, uint64_t timestamp)
153819 -       struct perf_tsc_conversion tc;
153820 +       struct perf_tsc_conversion tc = { .time_shift = 0, };
153821 +       struct perf_record_time_conv *time_conv = &jd->session->time_conv;
153823         if (!jd->use_arch_timestamp)
153824                 return timestamp;
153826 -       tc.time_shift          = jd->session->time_conv.time_shift;
153827 -       tc.time_mult           = jd->session->time_conv.time_mult;
153828 -       tc.time_zero           = jd->session->time_conv.time_zero;
153829 -       tc.time_cycles         = jd->session->time_conv.time_cycles;
153830 -       tc.time_mask           = jd->session->time_conv.time_mask;
153831 -       tc.cap_user_time_zero  = jd->session->time_conv.cap_user_time_zero;
153832 -       tc.cap_user_time_short = jd->session->time_conv.cap_user_time_short;
153833 +       tc.time_shift = time_conv->time_shift;
153834 +       tc.time_mult  = time_conv->time_mult;
153835 +       tc.time_zero  = time_conv->time_zero;
153837 -       if (!tc.cap_user_time_zero)
153838 -               return 0;
153839 +       /*
153840 +        * The event TIME_CONV was extended for the fields from "time_cycles"
153841 +        * when supported cap_user_time_short, for backward compatibility,
153842 +        * checks the event size and assigns these extended fields if these
153843 +        * fields are contained in the event.
153844 +        */
153845 +       if (event_contains(*time_conv, time_cycles)) {
153846 +               tc.time_cycles         = time_conv->time_cycles;
153847 +               tc.time_mask           = time_conv->time_mask;
153848 +               tc.cap_user_time_zero  = time_conv->cap_user_time_zero;
153849 +               tc.cap_user_time_short = time_conv->cap_user_time_short;
153851 +               if (!tc.cap_user_time_zero)
153852 +                       return 0;
153853 +       }
153855         return tsc_to_perf_time(timestamp, &tc);
153857 diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
153858 index 859832a82496..e9d4e6f4bdf3 100644
153859 --- a/tools/perf/util/session.c
153860 +++ b/tools/perf/util/session.c
153861 @@ -949,6 +949,19 @@ static void perf_event__stat_round_swap(union perf_event *event,
153862         event->stat_round.time = bswap_64(event->stat_round.time);
153865 +static void perf_event__time_conv_swap(union perf_event *event,
153866 +                                      bool sample_id_all __maybe_unused)
153868 +       event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
153869 +       event->time_conv.time_mult  = bswap_64(event->time_conv.time_mult);
153870 +       event->time_conv.time_zero  = bswap_64(event->time_conv.time_zero);
153872 +       if (event_contains(event->time_conv, time_cycles)) {
153873 +               event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
153874 +               event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
153875 +       }
153878  typedef void (*perf_event__swap_op)(union perf_event *event,
153879                                     bool sample_id_all);
153881 @@ -985,7 +998,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
153882         [PERF_RECORD_STAT]                = perf_event__stat_swap,
153883         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
153884         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
153885 -       [PERF_RECORD_TIME_CONV]           = perf_event__all64_swap,
153886 +       [PERF_RECORD_TIME_CONV]           = perf_event__time_conv_swap,
153887         [PERF_RECORD_HEADER_MAX]          = NULL,
153890 diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
153891 index 35c936ce33ef..2664fb65e47a 100644
153892 --- a/tools/perf/util/symbol_fprintf.c
153893 +++ b/tools/perf/util/symbol_fprintf.c
153894 @@ -68,7 +68,7 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
153896         for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) {
153897                 pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
153898 -               fprintf(fp, "%s\n", pos->sym.name);
153899 +               ret += fprintf(fp, "%s\n", pos->sym.name);
153900         }
153902         return ret;
153903 diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c
153904 index 8e54ce47648e..3bf1820c0da1 100644
153905 --- a/tools/power/x86/intel-speed-select/isst-display.c
153906 +++ b/tools/power/x86/intel-speed-select/isst-display.c
153907 @@ -25,10 +25,14 @@ static void printcpulist(int str_len, char *str, int mask_size,
153908                         index = snprintf(&str[curr_index],
153909                                          str_len - curr_index, ",");
153910                         curr_index += index;
153911 +                       if (curr_index >= str_len)
153912 +                               break;
153913                 }
153914                 index = snprintf(&str[curr_index], str_len - curr_index, "%d",
153915                                  i);
153916                 curr_index += index;
153917 +               if (curr_index >= str_len)
153918 +                       break;
153919                 first = 0;
153920         }
153922 @@ -64,10 +68,14 @@ static void printcpumask(int str_len, char *str, int mask_size,
153923                 index = snprintf(&str[curr_index], str_len - curr_index, "%08x",
153924                                  mask[i]);
153925                 curr_index += index;
153926 +               if (curr_index >= str_len)
153927 +                       break;
153928                 if (i) {
153929                         strncat(&str[curr_index], ",", str_len - curr_index);
153930                         curr_index++;
153931                 }
153932 +               if (curr_index >= str_len)
153933 +                       break;
153934         }
153936         free(mask);
153937 @@ -185,7 +193,7 @@ static void _isst_pbf_display_information(int cpu, FILE *outf, int level,
153938                                           int disp_level)
153940         char header[256];
153941 -       char value[256];
153942 +       char value[512];
153944         snprintf(header, sizeof(header), "speed-select-base-freq-properties");
153945         format_and_print(outf, disp_level, header, NULL);
153946 @@ -349,7 +357,7 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
153947                                    struct isst_pkg_ctdp *pkg_dev)
153949         char header[256];
153950 -       char value[256];
153951 +       char value[512];
153952         static int level;
153953         int i;
153955 diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
153956 index a7c4f0772e53..002697021474 100644
153957 --- a/tools/power/x86/turbostat/turbostat.c
153958 +++ b/tools/power/x86/turbostat/turbostat.c
153959 @@ -291,13 +291,16 @@ struct msr_sum_array {
153960  /* The percpu MSR sum array.*/
153961  struct msr_sum_array *per_cpu_msr_sum;
153963 -int idx_to_offset(int idx)
153964 +off_t idx_to_offset(int idx)
153966 -       int offset;
153967 +       off_t offset;
153969         switch (idx) {
153970         case IDX_PKG_ENERGY:
153971 -               offset = MSR_PKG_ENERGY_STATUS;
153972 +               if (do_rapl & RAPL_AMD_F17H)
153973 +                       offset = MSR_PKG_ENERGY_STAT;
153974 +               else
153975 +                       offset = MSR_PKG_ENERGY_STATUS;
153976                 break;
153977         case IDX_DRAM_ENERGY:
153978                 offset = MSR_DRAM_ENERGY_STATUS;
153979 @@ -320,12 +323,13 @@ int idx_to_offset(int idx)
153980         return offset;
153983 -int offset_to_idx(int offset)
153984 +int offset_to_idx(off_t offset)
153986         int idx;
153988         switch (offset) {
153989         case MSR_PKG_ENERGY_STATUS:
153990 +       case MSR_PKG_ENERGY_STAT:
153991                 idx = IDX_PKG_ENERGY;
153992                 break;
153993         case MSR_DRAM_ENERGY_STATUS:
153994 @@ -353,7 +357,7 @@ int idx_valid(int idx)
153996         switch (idx) {
153997         case IDX_PKG_ENERGY:
153998 -               return do_rapl & RAPL_PKG;
153999 +               return do_rapl & (RAPL_PKG | RAPL_AMD_F17H);
154000         case IDX_DRAM_ENERGY:
154001                 return do_rapl & RAPL_DRAM;
154002         case IDX_PP0_ENERGY:
154003 @@ -3272,7 +3276,7 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg
154005         for (i = IDX_PKG_ENERGY; i < IDX_COUNT; i++) {
154006                 unsigned long long msr_cur, msr_last;
154007 -               int offset;
154008 +               off_t offset;
154010                 if (!idx_valid(i))
154011                         continue;
154012 @@ -3281,7 +3285,8 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg
154013                         continue;
154014                 ret = get_msr(cpu, offset, &msr_cur);
154015                 if (ret) {
154016 -                       fprintf(outf, "Can not update msr(0x%x)\n", offset);
154017 +                       fprintf(outf, "Can not update msr(0x%llx)\n",
154018 +                               (unsigned long long)offset);
154019                         continue;
154020                 }
154022 @@ -4817,33 +4822,12 @@ double discover_bclk(unsigned int family, unsigned int model)
154023   * below this value, including the Digital Thermal Sensor (DTS),
154024   * Package Thermal Management Sensor (PTM), and thermal event thresholds.
154025   */
154026 -int read_tcc_activation_temp()
154027 +int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
154029         unsigned long long msr;
154030 -       unsigned int tcc, target_c, offset_c;
154032 -       /* Temperature Target MSR is Nehalem and newer only */
154033 -       if (!do_nhm_platform_info)
154034 -               return 0;
154036 -       if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
154037 -               return 0;
154039 -       target_c = (msr >> 16) & 0xFF;
154041 -       offset_c = (msr >> 24) & 0xF;
154043 -       tcc = target_c - offset_c;
154045 -       if (!quiet)
154046 -               fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C) (%d default - %d offset)\n",
154047 -                       base_cpu, msr, tcc, target_c, offset_c);
154049 -       return tcc;
154051 +       unsigned int target_c_local;
154052 +       int cpu;
154054 -int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
154056         /* tcc_activation_temp is used only for dts or ptm */
154057         if (!(do_dts || do_ptm))
154058                 return 0;
154059 @@ -4852,18 +4836,43 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
154060         if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
154061                 return 0;
154063 +       cpu = t->cpu_id;
154064 +       if (cpu_migrate(cpu)) {
154065 +               fprintf(outf, "Could not migrate to CPU %d\n", cpu);
154066 +               return -1;
154067 +       }
154069         if (tcc_activation_temp_override != 0) {
154070                 tcc_activation_temp = tcc_activation_temp_override;
154071 -               fprintf(outf, "Using cmdline TCC Target (%d C)\n", tcc_activation_temp);
154072 +               fprintf(outf, "cpu%d: Using cmdline TCC Target (%d C)\n",
154073 +                       cpu, tcc_activation_temp);
154074                 return 0;
154075         }
154077 -       tcc_activation_temp = read_tcc_activation_temp();
154078 -       if (tcc_activation_temp)
154079 -               return 0;
154080 +       /* Temperature Target MSR is Nehalem and newer only */
154081 +       if (!do_nhm_platform_info)
154082 +               goto guess;
154084 +       if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
154085 +               goto guess;
154087 +       target_c_local = (msr >> 16) & 0xFF;
154089 +       if (!quiet)
154090 +               fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
154091 +                       cpu, msr, target_c_local);
154093 +       if (!target_c_local)
154094 +               goto guess;
154096 +       tcc_activation_temp = target_c_local;
154098 +       return 0;
154100 +guess:
154101         tcc_activation_temp = TJMAX_DEFAULT;
154102 -       fprintf(outf, "Guessing tjMax %d C, Please use -T to specify\n", tcc_activation_temp);
154103 +       fprintf(outf, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
154104 +               cpu, tcc_activation_temp);
154106         return 0;
154108 diff --git a/tools/spi/Makefile b/tools/spi/Makefile
154109 index ada881afb489..0aa6dbd31fb8 100644
154110 --- a/tools/spi/Makefile
154111 +++ b/tools/spi/Makefile
154112 @@ -25,11 +25,12 @@ include $(srctree)/tools/build/Makefile.include
154114  # We need the following to be outside of kernel tree
154116 -$(OUTPUT)include/linux/spi/spidev.h: ../../include/uapi/linux/spi/spidev.h
154117 +$(OUTPUT)include/linux/spi: ../../include/uapi/linux/spi
154118         mkdir -p $(OUTPUT)include/linux/spi 2>&1 || true
154119         ln -sf $(CURDIR)/../../include/uapi/linux/spi/spidev.h $@
154120 +       ln -sf $(CURDIR)/../../include/uapi/linux/spi/spi.h $@
154122 -prepare: $(OUTPUT)include/linux/spi/spidev.h
154123 +prepare: $(OUTPUT)include/linux/spi
154126  # spidev_test
154127 diff --git a/tools/testing/selftests/arm64/mte/Makefile b/tools/testing/selftests/arm64/mte/Makefile
154128 index 0b3af552632a..df15d44aeb8d 100644
154129 --- a/tools/testing/selftests/arm64/mte/Makefile
154130 +++ b/tools/testing/selftests/arm64/mte/Makefile
154131 @@ -6,9 +6,7 @@ SRCS := $(filter-out mte_common_util.c,$(wildcard *.c))
154132  PROGS := $(patsubst %.c,%,$(SRCS))
154134  #Add mte compiler option
154135 -ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep gcc),)
154136  CFLAGS += -march=armv8.5-a+memtag
154137 -endif
154139  #check if the compiler works well
154140  mte_cc_support := $(shell if ($(CC) $(CFLAGS) -E -x c /dev/null -o /dev/null 2>&1) then echo "1"; fi)
154141 diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c
154142 index 39f8908988ea..70665ba88cbb 100644
154143 --- a/tools/testing/selftests/arm64/mte/mte_common_util.c
154144 +++ b/tools/testing/selftests/arm64/mte/mte_common_util.c
154145 @@ -278,22 +278,13 @@ int mte_switch_mode(int mte_option, unsigned long incl_mask)
154146         return 0;
154149 -#define ID_AA64PFR1_MTE_SHIFT          8
154150 -#define ID_AA64PFR1_MTE                        2
154152  int mte_default_setup(void)
154154 -       unsigned long hwcaps = getauxval(AT_HWCAP);
154155 +       unsigned long hwcaps2 = getauxval(AT_HWCAP2);
154156         unsigned long en = 0;
154157         int ret;
154159 -       if (!(hwcaps & HWCAP_CPUID)) {
154160 -               ksft_print_msg("FAIL: CPUID registers unavailable\n");
154161 -               return KSFT_FAIL;
154162 -       }
154163 -       /* Read ID_AA64PFR1_EL1 register */
154164 -       asm volatile("mrs %0, id_aa64pfr1_el1" : "=r"(hwcaps) : : "memory");
154165 -       if (((hwcaps >> ID_AA64PFR1_MTE_SHIFT) & MT_TAG_MASK) != ID_AA64PFR1_MTE) {
154166 +       if (!(hwcaps2 & HWCAP2_MTE)) {
154167                 ksft_print_msg("FAIL: MTE features unavailable\n");
154168                 return KSFT_SKIP;
154169         }
154170 diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
154171 index 044bfdcf5b74..76a325862119 100644
154172 --- a/tools/testing/selftests/bpf/Makefile
154173 +++ b/tools/testing/selftests/bpf/Makefile
154174 @@ -221,7 +221,7 @@ $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile)                \
154175                     DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers
154176  endif
154178 -$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR)
154179 +$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
154180  ifeq ($(VMLINUX_H),)
154181         $(call msg,GEN,,$@)
154182         $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
154183 @@ -346,7 +346,8 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o:                         \
154185  $(TRUNNER_BPF_SKELS): $(TRUNNER_OUTPUT)/%.skel.h:                      \
154186                       $(TRUNNER_OUTPUT)/%.o                             \
154187 -                     | $(BPFTOOL) $(TRUNNER_OUTPUT)
154188 +                     $(BPFTOOL)                                        \
154189 +                     | $(TRUNNER_OUTPUT)
154190         $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
154191         $(Q)$$(BPFTOOL) gen skeleton $$< > $$@
154192  endif
154193 diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
154194 index 06eb956ff7bb..4b517d76257d 100644
154195 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
154196 +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
154197 @@ -210,11 +210,6 @@ static int duration = 0;
154198         .bpf_obj_file = "test_core_reloc_existence.o",                  \
154199         .btf_src_file = "btf__core_reloc_" #name ".o"                   \
154201 -#define FIELD_EXISTS_ERR_CASE(name) {                                  \
154202 -       FIELD_EXISTS_CASE_COMMON(name),                                 \
154203 -       .fails = true,                                                  \
154206  #define BITFIELDS_CASE_COMMON(objfile, test_name_prefix,  name)                \
154207         .case_name = test_name_prefix#name,                             \
154208         .bpf_obj_file = objfile,                                        \
154209 @@ -222,7 +217,7 @@ static int duration = 0;
154211  #define BITFIELDS_CASE(name, ...) {                                    \
154212         BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o",     \
154213 -                             "direct:", name),                         \
154214 +                             "probed:", name),                         \
154215         .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__,     \
154216         .input_len = sizeof(struct core_reloc_##name),                  \
154217         .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output)       \
154218 @@ -230,7 +225,7 @@ static int duration = 0;
154219         .output_len = sizeof(struct core_reloc_bitfields_output),       \
154220  }, {                                                                   \
154221         BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o",     \
154222 -                             "probed:", name),                         \
154223 +                             "direct:", name),                         \
154224         .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__,     \
154225         .input_len = sizeof(struct core_reloc_##name),                  \
154226         .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output)       \
154227 @@ -550,8 +545,7 @@ static struct core_reloc_test_case test_cases[] = {
154228         ARRAYS_ERR_CASE(arrays___err_too_small),
154229         ARRAYS_ERR_CASE(arrays___err_too_shallow),
154230         ARRAYS_ERR_CASE(arrays___err_non_array),
154231 -       ARRAYS_ERR_CASE(arrays___err_wrong_val_type1),
154232 -       ARRAYS_ERR_CASE(arrays___err_wrong_val_type2),
154233 +       ARRAYS_ERR_CASE(arrays___err_wrong_val_type),
154234         ARRAYS_ERR_CASE(arrays___err_bad_zero_sz_arr),
154236         /* enum/ptr/int handling scenarios */
154237 @@ -642,13 +636,25 @@ static struct core_reloc_test_case test_cases[] = {
154238                 },
154239                 .output_len = sizeof(struct core_reloc_existence_output),
154240         },
154242 -       FIELD_EXISTS_ERR_CASE(existence__err_int_sz),
154243 -       FIELD_EXISTS_ERR_CASE(existence__err_int_type),
154244 -       FIELD_EXISTS_ERR_CASE(existence__err_int_kind),
154245 -       FIELD_EXISTS_ERR_CASE(existence__err_arr_kind),
154246 -       FIELD_EXISTS_ERR_CASE(existence__err_arr_value_type),
154247 -       FIELD_EXISTS_ERR_CASE(existence__err_struct_type),
154248 +       {
154249 +               FIELD_EXISTS_CASE_COMMON(existence___wrong_field_defs),
154250 +               .input = STRUCT_TO_CHAR_PTR(core_reloc_existence___wrong_field_defs) {
154251 +               },
154252 +               .input_len = sizeof(struct core_reloc_existence___wrong_field_defs),
154253 +               .output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
154254 +                       .a_exists = 0,
154255 +                       .b_exists = 0,
154256 +                       .c_exists = 0,
154257 +                       .arr_exists = 0,
154258 +                       .s_exists = 0,
154259 +                       .a_value = 0xff000001u,
154260 +                       .b_value = 0xff000002u,
154261 +                       .c_value = 0xff000003u,
154262 +                       .arr_value = 0xff000004u,
154263 +                       .s_value = 0xff000005u,
154264 +               },
154265 +               .output_len = sizeof(struct core_reloc_existence_output),
154266 +       },
154268         /* bitfield relocation checks */
154269         BITFIELDS_CASE(bitfields, {
154270 @@ -857,13 +863,20 @@ void test_core_reloc(void)
154271                           "prog '%s' not found\n", probe_name))
154272                         goto cleanup;
154275 +               if (test_case->btf_src_file) {
154276 +                       err = access(test_case->btf_src_file, R_OK);
154277 +                       if (!ASSERT_OK(err, "btf_src_file"))
154278 +                               goto cleanup;
154279 +               }
154281                 load_attr.obj = obj;
154282                 load_attr.log_level = 0;
154283                 load_attr.target_btf_path = test_case->btf_src_file;
154284                 err = bpf_object__load_xattr(&load_attr);
154285                 if (err) {
154286                         if (!test_case->fails)
154287 -                               CHECK(false, "obj_load", "failed to load prog '%s': %d\n", probe_name, err);
154288 +                               ASSERT_OK(err, "obj_load");
154289                         goto cleanup;
154290                 }
154292 @@ -902,10 +915,8 @@ void test_core_reloc(void)
154293                         goto cleanup;
154294                 }
154296 -               if (test_case->fails) {
154297 -                       CHECK(false, "obj_load_fail", "should fail to load prog '%s'\n", probe_name);
154298 +               if (!ASSERT_FALSE(test_case->fails, "obj_load_should_fail"))
154299                         goto cleanup;
154300 -               }
154302                 equal = memcmp(data->out, test_case->output,
154303                                test_case->output_len) == 0;
154304 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
154305 deleted file mode 100644
154306 index dd0ffa518f36..000000000000
154307 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
154308 +++ /dev/null
154309 @@ -1,3 +0,0 @@
154310 -#include "core_reloc_types.h"
154312 -void f(struct core_reloc_existence___err_wrong_arr_kind x) {}
154313 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
154314 deleted file mode 100644
154315 index bc83372088ad..000000000000
154316 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
154317 +++ /dev/null
154318 @@ -1,3 +0,0 @@
154319 -#include "core_reloc_types.h"
154321 -void f(struct core_reloc_existence___err_wrong_arr_value_type x) {}
154322 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
154323 deleted file mode 100644
154324 index 917bec41be08..000000000000
154325 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
154326 +++ /dev/null
154327 @@ -1,3 +0,0 @@
154328 -#include "core_reloc_types.h"
154330 -void f(struct core_reloc_existence___err_wrong_int_kind x) {}
154331 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
154332 deleted file mode 100644
154333 index 6ec7e6ec1c91..000000000000
154334 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
154335 +++ /dev/null
154336 @@ -1,3 +0,0 @@
154337 -#include "core_reloc_types.h"
154339 -void f(struct core_reloc_existence___err_wrong_int_sz x) {}
154340 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
154341 deleted file mode 100644
154342 index 7bbcacf2b0d1..000000000000
154343 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
154344 +++ /dev/null
154345 @@ -1,3 +0,0 @@
154346 -#include "core_reloc_types.h"
154348 -void f(struct core_reloc_existence___err_wrong_int_type x) {}
154349 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
154350 deleted file mode 100644
154351 index f384dd38ec70..000000000000
154352 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
154353 +++ /dev/null
154354 @@ -1,3 +0,0 @@
154355 -#include "core_reloc_types.h"
154357 -void f(struct core_reloc_existence___err_wrong_struct_type x) {}
154358 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
154359 new file mode 100644
154360 index 000000000000..d14b496190c3
154361 --- /dev/null
154362 +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
154363 @@ -0,0 +1,3 @@
154364 +#include "core_reloc_types.h"
154366 +void f(struct core_reloc_existence___wrong_field_defs x) {}
154367 diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h
154368 index 9a2850850121..664eea1013aa 100644
154369 --- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
154370 +++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
154371 @@ -700,27 +700,11 @@ struct core_reloc_existence___minimal {
154372         int a;
154375 -struct core_reloc_existence___err_wrong_int_sz {
154376 -       short a;
154379 -struct core_reloc_existence___err_wrong_int_type {
154380 +struct core_reloc_existence___wrong_field_defs {
154381 +       void *a;
154382         int b[1];
154385 -struct core_reloc_existence___err_wrong_int_kind {
154386         struct{ int x; } c;
154389 -struct core_reloc_existence___err_wrong_arr_kind {
154390         int arr;
154393 -struct core_reloc_existence___err_wrong_arr_value_type {
154394 -       short arr[1];
154397 -struct core_reloc_existence___err_wrong_struct_type {
154398         int s;
154401 diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c
154402 index 1b138cd2b187..1b1c798e9248 100644
154403 --- a/tools/testing/selftests/bpf/verifier/array_access.c
154404 +++ b/tools/testing/selftests/bpf/verifier/array_access.c
154405 @@ -186,7 +186,7 @@
154406         },
154407         .fixup_map_hash_48b = { 3 },
154408         .errstr_unpriv = "R0 leaks addr",
154409 -       .errstr = "invalid access to map value, value_size=48 off=44 size=8",
154410 +       .errstr = "R0 unbounded memory access",
154411         .result_unpriv = REJECT,
154412         .result = REJECT,
154413         .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
154414 diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
154415 index 6f3a70df63bc..e00435753008 100644
154416 --- a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
154417 +++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
154418 @@ -120,12 +120,13 @@ __mirror_gre_test()
154419         sleep 5
154421         for ((i = 0; i < count; ++i)); do
154422 +               local sip=$(mirror_gre_ipv6_addr 1 $i)::1
154423                 local dip=$(mirror_gre_ipv6_addr 1 $i)::2
154424                 local htun=h3-gt6-$i
154425                 local message
154427                 icmp6_capture_install $htun
154428 -               mirror_test v$h1 "" $dip $htun 100 10
154429 +               mirror_test v$h1 $sip $dip $htun 100 10
154430                 icmp6_capture_uninstall $htun
154431         done
154433 diff --git a/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
154434 index f813ffefc07e..65f43a7ce9c9 100644
154435 --- a/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
154436 +++ b/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
154437 @@ -55,10 +55,6 @@ port_test()
154438               | jq '.[][][] | select(.name=="physical_ports") |.["occ"]')
154440         [[ $occ -eq $max_ports ]]
154441 -       if [[ $should_fail -eq 0 ]]; then
154442 -               check_err $? "Mismatch ports number: Expected $max_ports, got $occ."
154443 -       else
154444 -               check_err_fail $should_fail $? "Reached more ports than expected"
154445 -       fi
154446 +       check_err_fail $should_fail $? "Attempt to create $max_ports ports (actual result $occ)"
154449 diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
154450 index b0cb1aaffdda..33ddd01689be 100644
154451 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
154452 +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
154453 @@ -507,8 +507,8 @@ do_red_test()
154454         check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0."
154455         local diff=$((limit - backlog))
154456         pct=$((100 * diff / limit))
154457 -       ((0 <= pct && pct <= 5))
154458 -       check_err $? "backlog $backlog / $limit expected <= 5% distance"
154459 +       ((0 <= pct && pct <= 10))
154460 +       check_err $? "backlog $backlog / $limit expected <= 10% distance"
154461         log_test "TC $((vlan - 10)): RED backlog > limit"
154463         stop_traffic
154464 diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
154465 index cc0f07e72cf2..aa74be9f47c8 100644
154466 --- a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
154467 +++ b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
154468 @@ -98,11 +98,7 @@ __tc_flower_test()
154469                         jq -r '[ .[] | select(.kind == "flower") |
154470                         .options | .in_hw ]' | jq .[] | wc -l)
154471         [[ $((offload_count - 1)) -eq $count ]]
154472 -       if [[ $should_fail -eq 0 ]]; then
154473 -               check_err $? "Offload mismatch"
154474 -       else
154475 -               check_err_fail $should_fail $? "Offload more than expacted"
154476 -       fi
154477 +       check_err_fail $should_fail $? "Attempt to offload $count rules (actual result $((offload_count - 1)))"
154480  tc_flower_test()
154481 diff --git a/tools/testing/selftests/futex/functional/.gitignore b/tools/testing/selftests/futex/functional/.gitignore
154482 index 0efcd494daab..af7557e821da 100644
154483 --- a/tools/testing/selftests/futex/functional/.gitignore
154484 +++ b/tools/testing/selftests/futex/functional/.gitignore
154485 @@ -6,3 +6,6 @@ futex_wait_private_mapped_file
154486  futex_wait_timeout
154487  futex_wait_uninitialized_heap
154488  futex_wait_wouldblock
154489 +futex2_wait
154490 +futex2_waitv
154491 +futex2_requeue
154492 diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
154493 index 23207829ec75..3ccb9ea58ddd 100644
154494 --- a/tools/testing/selftests/futex/functional/Makefile
154495 +++ b/tools/testing/selftests/futex/functional/Makefile
154496 @@ -1,10 +1,11 @@
154497  # SPDX-License-Identifier: GPL-2.0
154498 -INCLUDES := -I../include -I../../
154499 +INCLUDES := -I../include -I../../ -I../../../../../usr/include/
154500  CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES)
154501  LDLIBS := -lpthread -lrt
154503  HEADERS := \
154504         ../include/futextest.h \
154505 +       ../include/futex2test.h \
154506         ../include/atomic.h \
154507         ../include/logging.h
154508  TEST_GEN_FILES := \
154509 @@ -14,7 +15,10 @@ TEST_GEN_FILES := \
154510         futex_requeue_pi_signal_restart \
154511         futex_requeue_pi_mismatched_ops \
154512         futex_wait_uninitialized_heap \
154513 -       futex_wait_private_mapped_file
154514 +       futex_wait_private_mapped_file \
154515 +       futex2_wait \
154516 +       futex2_waitv \
154517 +       futex2_requeue
154519  TEST_PROGS := run.sh
154521 diff --git a/tools/testing/selftests/futex/functional/futex2_requeue.c b/tools/testing/selftests/futex/functional/futex2_requeue.c
154522 new file mode 100644
154523 index 000000000000..1bc3704dc8c2
154524 --- /dev/null
154525 +++ b/tools/testing/selftests/futex/functional/futex2_requeue.c
154526 @@ -0,0 +1,164 @@
154527 +// SPDX-License-Identifier: GPL-2.0-or-later
154528 +/******************************************************************************
154530 + *   Copyright Collabora Ltd., 2021
154532 + * DESCRIPTION
154533 + *     Test requeue mechanism of futex2, using 32bit sized futexes.
154535 + * AUTHOR
154536 + *     André Almeida <andrealmeid@collabora.com>
154538 + * HISTORY
154539 + *      2021-Feb-5: Initial version by André <andrealmeid@collabora.com>
154541 + *****************************************************************************/
154543 +#include <errno.h>
154544 +#include <error.h>
154545 +#include <getopt.h>
154546 +#include <stdio.h>
154547 +#include <stdlib.h>
154548 +#include <string.h>
154549 +#include <time.h>
154550 +#include <pthread.h>
154551 +#include <sys/shm.h>
154552 +#include <limits.h>
154553 +#include "futex2test.h"
154554 +#include "logging.h"
154556 +#define TEST_NAME "futex2-wait"
154557 +#define timeout_ns  30000000
154558 +#define WAKE_WAIT_US 10000
154559 +volatile futex_t *f1;
154561 +void usage(char *prog)
154563 +       printf("Usage: %s\n", prog);
154564 +       printf("  -c    Use color\n");
154565 +       printf("  -h    Display this help message\n");
154566 +       printf("  -v L  Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
154567 +              VQUIET, VCRITICAL, VINFO);
154570 +void *waiterfn(void *arg)
154572 +       struct timespec64 to64;
154574 +       /* setting absolute timeout for futex2 */
154575 +       if (gettime64(CLOCK_MONOTONIC, &to64))
154576 +               error("gettime64 failed\n", errno);
154578 +       to64.tv_nsec += timeout_ns;
154580 +       if (to64.tv_nsec >= 1000000000) {
154581 +               to64.tv_sec++;
154582 +               to64.tv_nsec -= 1000000000;
154583 +       }
154585 +       if (futex2_wait(f1, *f1, FUTEX_32, &to64))
154586 +               printf("waiter failed errno %d\n", errno);
154588 +       return NULL;
154591 +int main(int argc, char *argv[])
154593 +       pthread_t waiter[10];
154594 +       int res, ret = RET_PASS;
154595 +       int c, i;
154596 +       volatile futex_t _f1 = 0;
154597 +       volatile futex_t f2 = 0;
154598 +       struct futex_requeue r1, r2;
154600 +       f1 = &_f1;
154602 +       r1.flags = FUTEX_32;
154603 +       r2.flags = FUTEX_32;
154605 +       r1.uaddr = f1;
154606 +       r2.uaddr = &f2;
154608 +       while ((c = getopt(argc, argv, "cht:v:")) != -1) {
154609 +               switch (c) {
154610 +               case 'c':
154611 +                       log_color(1);
154612 +                       break;
154613 +               case 'h':
154614 +                       usage(basename(argv[0]));
154615 +                       exit(0);
154616 +               case 'v':
154617 +                       log_verbosity(atoi(optarg));
154618 +                       break;
154619 +               default:
154620 +                       usage(basename(argv[0]));
154621 +                       exit(1);
154622 +               }
154623 +       }
154625 +       ksft_print_header();
154626 +       ksft_set_plan(2);
154627 +       ksft_print_msg("%s: Test FUTEX2_REQUEUE\n",
154628 +                      basename(argv[0]));
154630 +       /*
154631 +        * Requeue a waiter from f1 to f2, and wake f2.
154632 +        */
154633 +       if (pthread_create(&waiter[0], NULL, waiterfn, NULL))
154634 +               error("pthread_create failed\n", errno);
154636 +       usleep(WAKE_WAIT_US);
154638 +       res = futex2_requeue(&r1, &r2, 0, 1, 0, 0);
154639 +       if (res != 1) {
154640 +               ksft_test_result_fail("futex2_requeue private returned: %d %s\n",
154641 +                                     res ? errno : res,
154642 +                                     res ? strerror(errno) : "");
154643 +               ret = RET_FAIL;
154644 +       }
154647 +       info("Calling private futex2_wake on f2: %u @ %p with val=%u\n", f2, &f2, f2);
154648 +       res = futex2_wake(&f2, 1, FUTEX_32);
154649 +       if (res != 1) {
154650 +               ksft_test_result_fail("futex2_requeue private returned: %d %s\n",
154651 +                                     res ? errno : res,
154652 +                                     res ? strerror(errno) : "");
154653 +               ret = RET_FAIL;
154654 +       } else {
154655 +               ksft_test_result_pass("futex2_requeue simple succeeds\n");
154656 +       }
154659 +       /*
154660 +        * Create 10 waiters at f1. At futex_requeue, wake 3 and requeue 7.
154661 +        * At futex_wake, wake INT_MAX (should be exaclty 7).
154662 +        */
154663 +       for (i = 0; i < 10; i++) {
154664 +               if (pthread_create(&waiter[i], NULL, waiterfn, NULL))
154665 +                       error("pthread_create failed\n", errno);
154666 +       }
154668 +       usleep(WAKE_WAIT_US);
154670 +       res = futex2_requeue(&r1, &r2, 3, 7, 0, 0);
154671 +       if (res != 10) {
154672 +               ksft_test_result_fail("futex2_requeue private returned: %d %s\n",
154673 +                                     res ? errno : res,
154674 +                                     res ? strerror(errno) : "");
154675 +               ret = RET_FAIL;
154676 +       }
154678 +       res = futex2_wake(&f2, INT_MAX, FUTEX_32);
154679 +       if (res != 7) {
154680 +               ksft_test_result_fail("futex2_requeue private returned: %d %s\n",
154681 +                                     res ? errno : res,
154682 +                                     res ? strerror(errno) : "");
154683 +               ret = RET_FAIL;
154684 +       } else {
154685 +               ksft_test_result_pass("futex2_requeue succeeds\n");
154686 +       }
154688 +       ksft_print_cnts();
154689 +       return ret;
154691 diff --git a/tools/testing/selftests/futex/functional/futex2_wait.c b/tools/testing/selftests/futex/functional/futex2_wait.c
154692 new file mode 100644
154693 index 000000000000..4b5416585c79
154694 --- /dev/null
154695 +++ b/tools/testing/selftests/futex/functional/futex2_wait.c
154696 @@ -0,0 +1,209 @@
154697 +// SPDX-License-Identifier: GPL-2.0-or-later
154698 +/******************************************************************************
154700 + *   Copyright Collabora Ltd., 2021
154702 + * DESCRIPTION
154703 + *     Test wait/wake mechanism of futex2, using 32bit sized futexes.
154705 + * AUTHOR
154706 + *     André Almeida <andrealmeid@collabora.com>
154708 + * HISTORY
154709 + *      2021-Feb-5: Initial version by André <andrealmeid@collabora.com>
154711 + *****************************************************************************/
154713 +#include <errno.h>
154714 +#include <error.h>
154715 +#include <getopt.h>
154716 +#include <stdio.h>
154717 +#include <stdlib.h>
154718 +#include <string.h>
154719 +#include <time.h>
154720 +#include <pthread.h>
154721 +#include <sys/shm.h>
154722 +#include <sys/mman.h>
154723 +#include <fcntl.h>
154724 +#include <string.h>
154725 +#include "futex2test.h"
154726 +#include "logging.h"
154728 +#define TEST_NAME "futex2-wait"
154729 +#define timeout_ns  30000000
154730 +#define WAKE_WAIT_US 10000
154731 +#define SHM_PATH "futex2_shm_file"
154732 +futex_t *f1;
154734 +void usage(char *prog)
154736 +       printf("Usage: %s\n", prog);
154737 +       printf("  -c    Use color\n");
154738 +       printf("  -h    Display this help message\n");
154739 +       printf("  -v L  Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
154740 +              VQUIET, VCRITICAL, VINFO);
154743 +void *waiterfn(void *arg)
154745 +       struct timespec64 to64;
154746 +       unsigned int flags = 0;
154748 +       if (arg)
154749 +               flags = *((unsigned int *) arg);
154751 +       /* setting absolute timeout for futex2 */
154752 +       if (gettime64(CLOCK_MONOTONIC, &to64))
154753 +               error("gettime64 failed\n", errno);
154755 +       to64.tv_nsec += timeout_ns;
154757 +       if (to64.tv_nsec >= 1000000000) {
154758 +               to64.tv_sec++;
154759 +               to64.tv_nsec -= 1000000000;
154760 +       }
154762 +       if (futex2_wait(f1, *f1, FUTEX_32 | flags, &to64))
154763 +               printf("waiter failed errno %d\n", errno);
154765 +       return NULL;
154768 +void *waitershm(void *arg)
154770 +       futex2_wait(arg, 0, FUTEX_32 | FUTEX_SHARED_FLAG, NULL);
154772 +       return NULL;
154775 +int main(int argc, char *argv[])
154777 +       pthread_t waiter;
154778 +       unsigned int flags = FUTEX_SHARED_FLAG;
154779 +       int res, ret = RET_PASS;
154780 +       int c;
154781 +       futex_t f_private = 0;
154783 +       f1 = &f_private;
154785 +       while ((c = getopt(argc, argv, "cht:v:")) != -1) {
154786 +               switch (c) {
154787 +               case 'c':
154788 +                       log_color(1);
154789 +                       break;
154790 +               case 'h':
154791 +                       usage(basename(argv[0]));
154792 +                       exit(0);
154793 +               case 'v':
154794 +                       log_verbosity(atoi(optarg));
154795 +                       break;
154796 +               default:
154797 +                       usage(basename(argv[0]));
154798 +                       exit(1);
154799 +               }
154800 +       }
154802 +       ksft_print_header();
154803 +       ksft_set_plan(3);
154804 +       ksft_print_msg("%s: Test FUTEX2_WAIT\n",
154805 +                      basename(argv[0]));
154807 +       /* Testing a private futex */
154808 +       info("Calling private futex2_wait on f1: %u @ %p with val=%u\n", *f1, f1, *f1);
154810 +       if (pthread_create(&waiter, NULL, waiterfn, NULL))
154811 +               error("pthread_create failed\n", errno);
154813 +       usleep(WAKE_WAIT_US);
154815 +       info("Calling private futex2_wake on f1: %u @ %p with val=%u\n", *f1, f1, *f1);
154816 +       res = futex2_wake(f1, 1, FUTEX_32);
154817 +       if (res != 1) {
154818 +               ksft_test_result_fail("futex2_wake private returned: %d %s\n",
154819 +                                     res ? errno : res,
154820 +                                     res ? strerror(errno) : "");
154821 +               ret = RET_FAIL;
154822 +       } else {
154823 +               ksft_test_result_pass("futex2_wake private succeeds\n");
154824 +       }
154826 +       int shm_id = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666);
154828 +       if (shm_id < 0) {
154829 +               perror("shmget");
154830 +               exit(1);
154831 +       }
154833 +       /* Testing an anon page shared memory */
154834 +       unsigned int *shared_data = shmat(shm_id, NULL, 0);
154836 +       *shared_data = 0;
154837 +       f1 = shared_data;
154839 +       info("Calling shared futex2_wait on f1: %u @ %p with val=%u\n", *f1, f1, *f1);
154841 +       if (pthread_create(&waiter, NULL, waiterfn, &flags))
154842 +               error("pthread_create failed\n", errno);
154844 +       usleep(WAKE_WAIT_US);
154846 +       info("Calling shared futex2_wake on f1: %u @ %p with val=%u\n", *f1, f1, *f1);
154847 +       res = futex2_wake(f1, 1, FUTEX_32 | FUTEX_SHARED_FLAG);
154848 +       if (res != 1) {
154849 +               ksft_test_result_fail("futex2_wake shared (shmget) returned: %d %s\n",
154850 +                                     res ? errno : res,
154851 +                                     res ? strerror(errno) : "");
154852 +               ret = RET_FAIL;
154853 +       } else {
154854 +               ksft_test_result_pass("futex2_wake shared (shmget) succeeds\n");
154855 +       }
154857 +       shmdt(shared_data);
154859 +       /* Testing a file backed shared memory */
154860 +       void *shm;
154861 +       int fd, pid;
154863 +       f_private = 0;
154865 +       fd = open(SHM_PATH, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
154866 +       if (fd < 0) {
154867 +               perror("open");
154868 +               exit(1);
154869 +       }
154871 +       res = ftruncate(fd, sizeof(f_private));
154872 +       if (res) {
154873 +               perror("ftruncate");
154874 +               exit(1);
154875 +       }
154877 +       shm = mmap(NULL, sizeof(f_private), PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
154878 +       if (shm == MAP_FAILED) {
154879 +               perror("mmap");
154880 +               exit(1);
154881 +       }
154883 +       memcpy(shm, &f_private, sizeof(f_private));
154885 +       pthread_create(&waiter, NULL, waitershm, shm);
154887 +       usleep(WAKE_WAIT_US);
154889 +       res = futex2_wake(shm, 1, FUTEX_32 | FUTEX_SHARED_FLAG);
154890 +       if (res != 1) {
154891 +               ksft_test_result_fail("futex2_wake shared (mmap) returned: %d %s\n",
154892 +                                     res ? errno : res,
154893 +                                     res ? strerror(errno) : "");
154894 +               ret = RET_FAIL;
154895 +       } else {
154896 +               ksft_test_result_pass("futex2_wake shared (mmap) succeeds\n");
154897 +       }
154899 +       munmap(shm, sizeof(f_private));
154901 +       remove(SHM_PATH);
154903 +       ksft_print_cnts();
154904 +       return ret;
154906 diff --git a/tools/testing/selftests/futex/functional/futex2_waitv.c b/tools/testing/selftests/futex/functional/futex2_waitv.c
154907 new file mode 100644
154908 index 000000000000..2f81d296d95d
154909 --- /dev/null
154910 +++ b/tools/testing/selftests/futex/functional/futex2_waitv.c
154911 @@ -0,0 +1,157 @@
154912 +// SPDX-License-Identifier: GPL-2.0-or-later
154913 +/******************************************************************************
154915 + *   Copyright Collabora Ltd., 2021
154917 + * DESCRIPTION
154918 + *     Test waitv/wake mechanism of futex2, using 32bit sized futexes.
154920 + * AUTHOR
154921 + *     André Almeida <andrealmeid@collabora.com>
154923 + * HISTORY
154924 + *      2021-Feb-5: Initial version by André <andrealmeid@collabora.com>
154926 + *****************************************************************************/
154928 +#include <errno.h>
154929 +#include <error.h>
154930 +#include <getopt.h>
154931 +#include <stdio.h>
154932 +#include <stdlib.h>
154933 +#include <string.h>
154934 +#include <time.h>
154935 +#include <pthread.h>
154936 +#include <sys/shm.h>
154937 +#include "futex2test.h"
154938 +#include "logging.h"
154940 +#define TEST_NAME "futex2-wait"
154941 +#define timeout_ns  1000000000
154942 +#define WAKE_WAIT_US 10000
154943 +#define NR_FUTEXES 30
154944 +struct futex_waitv waitv[NR_FUTEXES];
154945 +u_int32_t futexes[NR_FUTEXES] = {0};
154947 +void usage(char *prog)
154949 +       printf("Usage: %s\n", prog);
154950 +       printf("  -c    Use color\n");
154951 +       printf("  -h    Display this help message\n");
154952 +       printf("  -v L  Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
154953 +              VQUIET, VCRITICAL, VINFO);
154956 +void *waiterfn(void *arg)
154958 +       struct timespec64 to64;
154959 +       int res;
154961 +       /* setting absolute timeout for futex2 */
154962 +       if (gettime64(CLOCK_MONOTONIC, &to64))
154963 +               error("gettime64 failed\n", errno);
154965 +       to64.tv_sec++;
154967 +       res = futex2_waitv(waitv, NR_FUTEXES, 0, &to64);
154968 +       if (res < 0) {
154969 +               ksft_test_result_fail("futex2_waitv private returned: %d %s\n",
154970 +                                     res ? errno : res,
154971 +                                     res ? strerror(errno) : "");
154972 +       } else if (res != NR_FUTEXES - 1) {
154973 +               ksft_test_result_fail("futex2_waitv private returned: %d %s\n",
154974 +                                     res ? errno : res,
154975 +                                     res ? strerror(errno) : "");
154976 +       }
154978 +       return NULL;
154981 +int main(int argc, char *argv[])
154983 +       pthread_t waiter;
154984 +       int res, ret = RET_PASS;
154985 +       int c, i;
154987 +       while ((c = getopt(argc, argv, "cht:v:")) != -1) {
154988 +               switch (c) {
154989 +               case 'c':
154990 +                       log_color(1);
154991 +                       break;
154992 +               case 'h':
154993 +                       usage(basename(argv[0]));
154994 +                       exit(0);
154995 +               case 'v':
154996 +                       log_verbosity(atoi(optarg));
154997 +                       break;
154998 +               default:
154999 +                       usage(basename(argv[0]));
155000 +                       exit(1);
155001 +               }
155002 +       }
155004 +       ksft_print_header();
155005 +       ksft_set_plan(2);
155006 +       ksft_print_msg("%s: Test FUTEX2_WAITV\n",
155007 +                      basename(argv[0]));
155009 +       for (i = 0; i < NR_FUTEXES; i++) {
155010 +               waitv[i].uaddr = &futexes[i];
155011 +               waitv[i].flags = FUTEX_32;
155012 +               waitv[i].val = 0;
155013 +       }
155015 +       /* Private waitv */
155016 +       if (pthread_create(&waiter, NULL, waiterfn, NULL))
155017 +               error("pthread_create failed\n", errno);
155019 +       usleep(WAKE_WAIT_US);
155021 +       res = futex2_wake(waitv[NR_FUTEXES - 1].uaddr, 1, FUTEX_32);
155022 +       if (res != 1) {
155023 +               ksft_test_result_fail("futex2_waitv private returned: %d %s\n",
155024 +                                     res ? errno : res,
155025 +                                     res ? strerror(errno) : "");
155026 +               ret = RET_FAIL;
155027 +       } else {
155028 +               ksft_test_result_pass("futex2_waitv private succeeds\n");
155029 +       }
155031 +       /* Shared waitv */
155032 +       for (i = 0; i < NR_FUTEXES; i++) {
155033 +               int shm_id = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666);
155035 +               if (shm_id < 0) {
155036 +                       perror("shmget");
155037 +                       exit(1);
155038 +               }
155040 +               unsigned int *shared_data = shmat(shm_id, NULL, 0);
155042 +               *shared_data = 0;
155043 +               waitv[i].uaddr = shared_data;
155044 +               waitv[i].flags = FUTEX_32 | FUTEX_SHARED_FLAG;
155045 +               waitv[i].val = 0;
155046 +       }
155048 +       if (pthread_create(&waiter, NULL, waiterfn, NULL))
155049 +               error("pthread_create failed\n", errno);
155051 +       usleep(WAKE_WAIT_US);
155053 +       res = futex2_wake(waitv[NR_FUTEXES - 1].uaddr, 1, FUTEX_32 | FUTEX_SHARED_FLAG);
155054 +       if (res != 1) {
155055 +               ksft_test_result_fail("futex2_waitv shared returned: %d %s\n",
155056 +                                     res ? errno : res,
155057 +                                     res ? strerror(errno) : "");
155058 +               ret = RET_FAIL;
155059 +       } else {
155060 +               ksft_test_result_pass("futex2_waitv shared succeeds\n");
155061 +       }
155063 +       for (i = 0; i < NR_FUTEXES; i++)
155064 +               shmdt(waitv[i].uaddr);
155066 +       ksft_print_cnts();
155067 +       return ret;
155069 diff --git a/tools/testing/selftests/futex/functional/futex_wait_timeout.c b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
155070 index ee55e6d389a3..b4dffe9e3b44 100644
155071 --- a/tools/testing/selftests/futex/functional/futex_wait_timeout.c
155072 +++ b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
155073 @@ -11,6 +11,7 @@
155074   *
155075   * HISTORY
155076   *      2009-Nov-6: Initial version by Darren Hart <dvhart@linux.intel.com>
155077 + *      2021-Feb-5: Add futex2 test by André <andrealmeid@collabora.com>
155078   *
155079   *****************************************************************************/
155081 @@ -20,7 +21,7 @@
155082  #include <stdlib.h>
155083  #include <string.h>
155084  #include <time.h>
155085 -#include "futextest.h"
155086 +#include "futex2test.h"
155087  #include "logging.h"
155089  #define TEST_NAME "futex-wait-timeout"
155090 @@ -40,7 +41,8 @@ void usage(char *prog)
155091  int main(int argc, char *argv[])
155093         futex_t f1 = FUTEX_INITIALIZER;
155094 -       struct timespec to;
155095 +       struct timespec to = {.tv_sec = 0, .tv_nsec = timeout_ns};
155096 +       struct timespec64 to64;
155097         int res, ret = RET_PASS;
155098         int c;
155100 @@ -65,22 +67,60 @@ int main(int argc, char *argv[])
155101         }
155103         ksft_print_header();
155104 -       ksft_set_plan(1);
155105 +       ksft_set_plan(3);
155106         ksft_print_msg("%s: Block on a futex and wait for timeout\n",
155107                basename(argv[0]));
155108         ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns);
155110 -       /* initialize timeout */
155111 -       to.tv_sec = 0;
155112 -       to.tv_nsec = timeout_ns;
155114         info("Calling futex_wait on f1: %u @ %p\n", f1, &f1);
155115         res = futex_wait(&f1, f1, &to, FUTEX_PRIVATE_FLAG);
155116         if (!res || errno != ETIMEDOUT) {
155117 -               fail("futex_wait returned %d\n", ret < 0 ? errno : ret);
155118 +               ksft_test_result_fail("futex_wait returned %d\n", ret < 0 ? errno : ret);
155119 +               ret = RET_FAIL;
155120 +       } else {
155121 +               ksft_test_result_pass("futex_wait timeout succeeds\n");
155122 +       }
155124 +       /* setting absolute monotonic timeout for futex2 */
155125 +       if (gettime64(CLOCK_MONOTONIC, &to64))
155126 +               error("gettime64 failed\n", errno);
155128 +       to64.tv_nsec += timeout_ns;
155130 +       if (to64.tv_nsec >= 1000000000) {
155131 +               to64.tv_sec++;
155132 +               to64.tv_nsec -= 1000000000;
155133 +       }
155135 +       info("Calling futex2_wait on f1: %u @ %p\n", f1, &f1);
155136 +       res = futex2_wait(&f1, f1, FUTEX_32, &to64);
155137 +       if (!res || errno != ETIMEDOUT) {
155138 +               ksft_test_result_fail("futex2_wait monotonic returned %d\n", ret < 0 ? errno : ret);
155139 +               ret = RET_FAIL;
155140 +       } else {
155141 +               ksft_test_result_pass("futex2_wait monotonic timeout succeeds\n");
155142 +       }
155144 +       /* setting absolute realtime timeout for futex2 */
155145 +       if (gettime64(CLOCK_REALTIME, &to64))
155146 +               error("gettime64 failed\n", errno);
155148 +       to64.tv_nsec += timeout_ns;
155150 +       if (to64.tv_nsec >= 1000000000) {
155151 +               to64.tv_sec++;
155152 +               to64.tv_nsec -= 1000000000;
155153 +       }
155155 +       info("Calling futex2_wait on f1: %u @ %p\n", f1, &f1);
155156 +       res = futex2_wait(&f1, f1, FUTEX_32 | FUTEX_CLOCK_REALTIME, &to64);
155157 +       if (!res || errno != ETIMEDOUT) {
155158 +               ksft_test_result_fail("futex2_wait realtime returned %d\n", ret < 0 ? errno : ret);
155159                 ret = RET_FAIL;
155160 +       } else {
155161 +               ksft_test_result_pass("futex2_wait realtime timeout succeeds\n");
155162         }
155164 -       print_result(TEST_NAME, ret);
155165 +       ksft_print_cnts();
155166         return ret;
155168 diff --git a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
155169 index 0ae390ff8164..ed3660090907 100644
155170 --- a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
155171 +++ b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
155172 @@ -12,6 +12,7 @@
155173   *
155174   * HISTORY
155175   *      2009-Nov-14: Initial version by Gowrishankar <gowrishankar.m@in.ibm.com>
155176 + *      2021-Feb-5: Add futex2 test by André <andrealmeid@collabora.com>
155177   *
155178   *****************************************************************************/
155180 @@ -21,7 +22,7 @@
155181  #include <stdlib.h>
155182  #include <string.h>
155183  #include <time.h>
155184 -#include "futextest.h"
155185 +#include "futex2test.h"
155186  #include "logging.h"
155188  #define TEST_NAME "futex-wait-wouldblock"
155189 @@ -39,6 +40,7 @@ void usage(char *prog)
155190  int main(int argc, char *argv[])
155192         struct timespec to = {.tv_sec = 0, .tv_nsec = timeout_ns};
155193 +       struct timespec64 to64;
155194         futex_t f1 = FUTEX_INITIALIZER;
155195         int res, ret = RET_PASS;
155196         int c;
155197 @@ -61,18 +63,41 @@ int main(int argc, char *argv[])
155198         }
155200         ksft_print_header();
155201 -       ksft_set_plan(1);
155202 +       ksft_set_plan(2);
155203         ksft_print_msg("%s: Test the unexpected futex value in FUTEX_WAIT\n",
155204                basename(argv[0]));
155206         info("Calling futex_wait on f1: %u @ %p with val=%u\n", f1, &f1, f1+1);
155207         res = futex_wait(&f1, f1+1, &to, FUTEX_PRIVATE_FLAG);
155208         if (!res || errno != EWOULDBLOCK) {
155209 -               fail("futex_wait returned: %d %s\n",
155210 +               ksft_test_result_fail("futex_wait returned: %d %s\n",
155211                      res ? errno : res, res ? strerror(errno) : "");
155212                 ret = RET_FAIL;
155213 +       } else {
155214 +               ksft_test_result_pass("futex_wait wouldblock succeeds\n");
155215         }
155217 -       print_result(TEST_NAME, ret);
155218 +       /* setting absolute timeout for futex2 */
155219 +       if (gettime64(CLOCK_MONOTONIC, &to64))
155220 +               error("gettime64 failed\n", errno);
155222 +       to64.tv_nsec += timeout_ns;
155224 +       if (to64.tv_nsec >= 1000000000) {
155225 +               to64.tv_sec++;
155226 +               to64.tv_nsec -= 1000000000;
155227 +       }
155229 +       info("Calling futex2_wait on f1: %u @ %p with val=%u\n", f1, &f1, f1+1);
155230 +       res = futex2_wait(&f1, f1+1, FUTEX_32, &to64);
155231 +       if (!res || errno != EWOULDBLOCK) {
155232 +               ksft_test_result_fail("futex2_wait returned: %d %s\n",
155233 +                    res ? errno : res, res ? strerror(errno) : "");
155234 +               ret = RET_FAIL;
155235 +       } else {
155236 +               ksft_test_result_pass("futex2_wait wouldblock succeeds\n");
155237 +       }
155239 +       ksft_print_cnts();
155240         return ret;
155242 diff --git a/tools/testing/selftests/futex/functional/run.sh b/tools/testing/selftests/futex/functional/run.sh
155243 index 1acb6ace1680..18b3883d7236 100755
155244 --- a/tools/testing/selftests/futex/functional/run.sh
155245 +++ b/tools/testing/selftests/futex/functional/run.sh
155246 @@ -73,3 +73,9 @@ echo
155247  echo
155248  ./futex_wait_uninitialized_heap $COLOR
155249  ./futex_wait_private_mapped_file $COLOR
155251 +echo
155252 +./futex2_wait $COLOR
155254 +echo
155255 +./futex2_waitv $COLOR
155256 diff --git a/tools/testing/selftests/futex/include/futex2test.h b/tools/testing/selftests/futex/include/futex2test.h
155257 new file mode 100644
155258 index 000000000000..e2635006b1a9
155259 --- /dev/null
155260 +++ b/tools/testing/selftests/futex/include/futex2test.h
155261 @@ -0,0 +1,121 @@
155262 +/* SPDX-License-Identifier: GPL-2.0-or-later */
155263 +/******************************************************************************
155265 + *   Copyright Collabora Ltd., 2021
155267 + * DESCRIPTION
155268 + *     Futex2 library addons for old futex library
155270 + * AUTHOR
155271 + *     André Almeida <andrealmeid@collabora.com>
155273 + * HISTORY
155274 + *      2021-Feb-5: Initial version by André <andrealmeid@collabora.com>
155276 + *****************************************************************************/
155277 +#include "futextest.h"
155278 +#include <stdio.h>
155280 +#define NSEC_PER_SEC   1000000000L
155282 +#ifndef FUTEX_8
155283 +# define FUTEX_8       0
155284 +#endif
155285 +#ifndef FUTEX_16
155286 +# define FUTEX_16      1
155287 +#endif
155288 +#ifndef FUTEX_32
155289 +# define FUTEX_32      2
155290 +#endif
155292 +#ifndef FUTEX_SHARED_FLAG
155293 +#define FUTEX_SHARED_FLAG 8
155294 +#endif
155296 +#ifndef FUTEX_WAITV_MAX
155297 +#define FUTEX_WAITV_MAX 128
155298 +struct futex_waitv {
155299 +       void *uaddr;
155300 +       unsigned int val;
155301 +       unsigned int flags;
155303 +#endif
155306 + * - Y2038 section for 32-bit applications -
155308 + * Remove this when glibc is ready for y2038. Then, always compile with
155309 + * `-DTIME_BITS=64` or `-D__USE_TIME_BITS64`. glibc will provide both
155310 + * timespec64 and clock_gettime64 so we won't need to define here.
155311 + */
155312 +#if defined(__i386__) || __TIMESIZE == 32
155313 +# define NR_gettime __NR_clock_gettime64
155314 +#else
155315 +# define NR_gettime __NR_clock_gettime
155316 +#endif
155318 +struct timespec64 {
155319 +       long long tv_sec;       /* seconds */
155320 +       long long tv_nsec;      /* nanoseconds */
155323 +int gettime64(clock_t clockid, struct timespec64 *tv)
155325 +       return syscall(NR_gettime, clockid, tv);
155328 + * - End of Y2038 section -
155329 + */
155332 + * futex2_wait - If (*uaddr == val), wait at uaddr until timo
155333 + * @uaddr: User address to wait on
155334 + * @val:   Expected value at uaddr, return if is not equal
155335 + * @flags: Operation flags
155336 + * @timo:  Optional timeout for operation
155337 + */
155338 +static inline int futex2_wait(volatile void *uaddr, unsigned long val,
155339 +                             unsigned long flags, struct timespec64 *timo)
155341 +       return syscall(__NR_futex_wait, uaddr, val, flags, timo);
155345 + * futex2_wake - Wake a number of waiters at uaddr
155346 + * @uaddr: Address to wake
155347 + * @nr:    Number of waiters to wake
155348 + * @flags: Operation flags
155349 + */
155350 +static inline int futex2_wake(volatile void *uaddr, unsigned int nr, unsigned long flags)
155352 +       return syscall(__NR_futex_wake, uaddr, nr, flags);
155356 + * futex2_waitv - Wait at multiple futexes, wake on any
155357 + * @waiters:    Array of waiters
155358 + * @nr_waiters: Length of waiters array
155359 + * @flags: Operation flags
155360 + * @timo:  Optional timeout for operation
155361 + */
155362 +static inline int futex2_waitv(volatile struct futex_waitv *waiters, unsigned long nr_waiters,
155363 +                             unsigned long flags, struct timespec64 *timo)
155365 +       return syscall(__NR_futex_waitv, waiters, nr_waiters, flags, timo);
155369 + * futex2_requeue - Wake futexes at uaddr1 and requeue from uaddr1 to uaddr2
155370 + * @uaddr1:     Original address to wake and requeue from
155371 + * @uaddr2:     Address to requeue to
155372 + * @nr_wake:    Number of futexes to wake at uaddr1 before requeuing
155373 + * @nr_requeue: Number of futexes to requeue from uaddr1 to uaddr2
155374 + * @cmpval:     If (uaddr1->uaddr != cmpval), return immediatally
155375 + * @flgas:      Operation flags
155376 + */
155377 +static inline int futex2_requeue(struct futex_requeue *uaddr1, struct futex_requeue *uaddr2,
155378 +                                unsigned int nr_wake, unsigned int nr_requeue,
155379 +                                unsigned int cmpval, unsigned long flags)
155381 +       return syscall(__NR_futex_requeue, uaddr1, uaddr2, nr_wake, nr_requeue, cmpval, flags);
155383 diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
155384 index bb2752d78fe3..81edbd23d371 100644
155385 --- a/tools/testing/selftests/kvm/dirty_log_test.c
155386 +++ b/tools/testing/selftests/kvm/dirty_log_test.c
155387 @@ -17,6 +17,7 @@
155388  #include <linux/bitmap.h>
155389  #include <linux/bitops.h>
155390  #include <asm/barrier.h>
155391 +#include <linux/atomic.h>
155393  #include "kvm_util.h"
155394  #include "test_util.h"
155395 @@ -137,12 +138,20 @@ static uint64_t host_clear_count;
155396  static uint64_t host_track_next_count;
155398  /* Whether dirty ring reset is requested, or finished */
155399 -static sem_t dirty_ring_vcpu_stop;
155400 -static sem_t dirty_ring_vcpu_cont;
155401 +static sem_t sem_vcpu_stop;
155402 +static sem_t sem_vcpu_cont;
155404 + * This is only set by main thread, and only cleared by vcpu thread.  It is
155405 + * used to request vcpu thread to stop at the next GUEST_SYNC, since GUEST_SYNC
155406 + * is the only place that we'll guarantee both "dirty bit" and "dirty data"
155407 + * will match.  E.g., SIG_IPI won't guarantee that if the vcpu is interrupted
155408 + * after setting dirty bit but before the data is written.
155409 + */
155410 +static atomic_t vcpu_sync_stop_requested;
155412   * This is updated by the vcpu thread to tell the host whether it's a
155413   * ring-full event.  It should only be read until a sem_wait() of
155414 - * dirty_ring_vcpu_stop and before vcpu continues to run.
155415 + * sem_vcpu_stop and before vcpu continues to run.
155416   */
155417  static bool dirty_ring_vcpu_ring_full;
155419 @@ -234,6 +243,17 @@ static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
155420         kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
155423 +/* Should only be called after a GUEST_SYNC */
155424 +static void vcpu_handle_sync_stop(void)
155426 +       if (atomic_read(&vcpu_sync_stop_requested)) {
155427 +               /* It means main thread is sleeping waiting */
155428 +               atomic_set(&vcpu_sync_stop_requested, false);
155429 +               sem_post(&sem_vcpu_stop);
155430 +               sem_wait_until(&sem_vcpu_cont);
155431 +       }
155434  static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
155436         struct kvm_run *run = vcpu_state(vm, VCPU_ID);
155437 @@ -244,6 +264,8 @@ static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
155438         TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
155439                     "Invalid guest sync status: exit_reason=%s\n",
155440                     exit_reason_str(run->exit_reason));
155442 +       vcpu_handle_sync_stop();
155445  static bool dirty_ring_supported(void)
155446 @@ -301,13 +323,13 @@ static void dirty_ring_wait_vcpu(void)
155448         /* This makes sure that hardware PML cache flushed */
155449         vcpu_kick();
155450 -       sem_wait_until(&dirty_ring_vcpu_stop);
155451 +       sem_wait_until(&sem_vcpu_stop);
155454  static void dirty_ring_continue_vcpu(void)
155456         pr_info("Notifying vcpu to continue\n");
155457 -       sem_post(&dirty_ring_vcpu_cont);
155458 +       sem_post(&sem_vcpu_cont);
155461  static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
155462 @@ -361,11 +383,11 @@ static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
155463                 /* Update the flag first before pause */
155464                 WRITE_ONCE(dirty_ring_vcpu_ring_full,
155465                            run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
155466 -               sem_post(&dirty_ring_vcpu_stop);
155467 +               sem_post(&sem_vcpu_stop);
155468                 pr_info("vcpu stops because %s...\n",
155469                         dirty_ring_vcpu_ring_full ?
155470                         "dirty ring is full" : "vcpu is kicked out");
155471 -               sem_wait_until(&dirty_ring_vcpu_cont);
155472 +               sem_wait_until(&sem_vcpu_cont);
155473                 pr_info("vcpu continues now.\n");
155474         } else {
155475                 TEST_ASSERT(false, "Invalid guest sync status: "
155476 @@ -377,7 +399,7 @@ static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
155477  static void dirty_ring_before_vcpu_join(void)
155479         /* Kick another round of vcpu just to make sure it will quit */
155480 -       sem_post(&dirty_ring_vcpu_cont);
155481 +       sem_post(&sem_vcpu_cont);
155484  struct log_mode {
155485 @@ -505,9 +527,8 @@ static void *vcpu_worker(void *data)
155486          */
155487         sigmask->len = 8;
155488         pthread_sigmask(0, NULL, sigset);
155489 +       sigdelset(sigset, SIG_IPI);
155490         vcpu_ioctl(vm, VCPU_ID, KVM_SET_SIGNAL_MASK, sigmask);
155491 -       sigaddset(sigset, SIG_IPI);
155492 -       pthread_sigmask(SIG_BLOCK, sigset, NULL);
155494         sigemptyset(sigset);
155495         sigaddset(sigset, SIG_IPI);
155496 @@ -768,7 +789,25 @@ static void run_test(enum vm_guest_mode mode, void *arg)
155497                 usleep(p->interval * 1000);
155498                 log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
155499                                              bmap, host_num_pages);
155501 +               /*
155502 +                * See vcpu_sync_stop_requested definition for details on why
155503 +                * we need to stop vcpu when verify data.
155504 +                */
155505 +               atomic_set(&vcpu_sync_stop_requested, true);
155506 +               sem_wait_until(&sem_vcpu_stop);
155507 +               /*
155508 +                * NOTE: for dirty ring, it's possible that we didn't stop at
155509 +                * GUEST_SYNC but instead we stopped because ring is full;
155510 +                * that's okay too because ring full means we're only missing
155511 +                * the flush of the last page, and since we handle the last
155512 +                * page specially verification will succeed anyway.
155513 +                */
155514 +               assert(host_log_mode == LOG_MODE_DIRTY_RING ||
155515 +                      atomic_read(&vcpu_sync_stop_requested) == false);
155516                 vm_dirty_log_verify(mode, bmap);
155517 +               sem_post(&sem_vcpu_cont);
155519                 iteration++;
155520                 sync_global_to_guest(vm, iteration);
155521         }
155522 @@ -818,9 +857,10 @@ int main(int argc, char *argv[])
155523                 .interval = TEST_HOST_LOOP_INTERVAL,
155524         };
155525         int opt, i;
155526 +       sigset_t sigset;
155528 -       sem_init(&dirty_ring_vcpu_stop, 0, 0);
155529 -       sem_init(&dirty_ring_vcpu_cont, 0, 0);
155530 +       sem_init(&sem_vcpu_stop, 0, 0);
155531 +       sem_init(&sem_vcpu_cont, 0, 0);
155533         guest_modes_append_default();
155535 @@ -876,6 +916,11 @@ int main(int argc, char *argv[])
155537         srandom(time(0));
155539 +       /* Ensure that vCPU threads start with SIG_IPI blocked.  */
155540 +       sigemptyset(&sigset);
155541 +       sigaddset(&sigset, SIG_IPI);
155542 +       pthread_sigmask(SIG_BLOCK, &sigset, NULL);
155544         if (host_log_mode_option == LOG_MODE_ALL) {
155545                 /* Run each log mode */
155546                 for (i = 0; i < LOG_MODE_NUM; i++) {
155547 diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
155548 index a5ce26d548e4..0af84ad48aa7 100644
155549 --- a/tools/testing/selftests/lib.mk
155550 +++ b/tools/testing/selftests/lib.mk
155551 @@ -1,6 +1,10 @@
155552  # This mimics the top-level Makefile. We do it explicitly here so that this
155553  # Makefile can operate with or without the kbuild infrastructure.
155554 +ifneq ($(LLVM),)
155555 +CC := clang
155556 +else
155557  CC := $(CROSS_COMPILE)gcc
155558 +endif
155560  ifeq (0,$(MAKELEVEL))
155561      ifeq ($(OUTPUT),)
155562 @@ -74,7 +78,8 @@ ifdef building_out_of_srctree
155563                 rsync -aq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
155564         fi
155565         @if [ "X$(TEST_PROGS)" != "X" ]; then \
155566 -               $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(OUTPUT)/$(TEST_PROGS)) ; \
155567 +               $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
155568 +                                 $(addprefix $(OUTPUT)/,$(TEST_PROGS))) ; \
155569         else \
155570                 $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS)); \
155571         fi
155572 diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
155573 index c02291e9841e..880e3ab9d088 100755
155574 --- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
155575 +++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
155576 @@ -271,7 +271,7 @@ test_span_gre_fdb_roaming()
155578         while ((RET == 0)); do
155579                 bridge fdb del dev $swp3 $h3mac vlan 555 master 2>/dev/null
155580 -               bridge fdb add dev $swp2 $h3mac vlan 555 master
155581 +               bridge fdb add dev $swp2 $h3mac vlan 555 master static
155582                 sleep 1
155583                 fail_test_span_gre_dir $tundev ingress
155585 diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh
155586 index 13db1cb50e57..6406cd76a19d 100644
155587 --- a/tools/testing/selftests/net/forwarding/mirror_lib.sh
155588 +++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh
155589 @@ -20,6 +20,13 @@ mirror_uninstall()
155590         tc filter del dev $swp1 $direction pref 1000
155593 +is_ipv6()
155595 +       local addr=$1; shift
155597 +       [[ -z ${addr//[0-9a-fA-F:]/} ]]
155600  mirror_test()
155602         local vrf_name=$1; shift
155603 @@ -29,9 +36,17 @@ mirror_test()
155604         local pref=$1; shift
155605         local expect=$1; shift
155607 +       if is_ipv6 $dip; then
155608 +               local proto=-6
155609 +               local type="icmp6 type=128" # Echo request.
155610 +       else
155611 +               local proto=
155612 +               local type="icmp echoreq"
155613 +       fi
155615         local t0=$(tc_rule_stats_get $dev $pref)
155616 -       $MZ $vrf_name ${sip:+-A $sip} -B $dip -a own -b bc -q \
155617 -           -c 10 -d 100msec -t icmp type=8
155618 +       $MZ $proto $vrf_name ${sip:+-A $sip} -B $dip -a own -b bc -q \
155619 +           -c 10 -d 100msec -t $type
155620         sleep 0.5
155621         local t1=$(tc_rule_stats_get $dev $pref)
155622         local delta=$((t1 - t0))
155623 diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
155624 index 39edce4f541c..2674ba20d524 100755
155625 --- a/tools/testing/selftests/net/mptcp/diag.sh
155626 +++ b/tools/testing/selftests/net/mptcp/diag.sh
155627 @@ -5,8 +5,9 @@ rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
155628  ns="ns1-$rndh"
155629  ksft_skip=4
155630  test_cnt=1
155631 +timeout_poll=100
155632 +timeout_test=$((timeout_poll * 2 + 1))
155633  ret=0
155634 -pids=()
155636  flush_pids()
155638 @@ -14,18 +15,14 @@ flush_pids()
155639         # give it some time
155640         sleep 1.1
155642 -       for pid in ${pids[@]}; do
155643 -               [ -d /proc/$pid ] && kill -SIGUSR1 $pid >/dev/null 2>&1
155644 -       done
155645 -       pids=()
155646 +       ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGUSR1 &>/dev/null
155649  cleanup()
155651 +       ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGKILL &>/dev/null
155653         ip netns del $ns
155654 -       for pid in ${pids[@]}; do
155655 -               [ -d /proc/$pid ] && kill -9 $pid >/dev/null 2>&1
155656 -       done
155659  ip -Version > /dev/null 2>&1
155660 @@ -79,39 +76,57 @@ trap cleanup EXIT
155661  ip netns add $ns
155662  ip -n $ns link set dev lo up
155664 -echo "a" | ip netns exec $ns ./mptcp_connect -p 10000 -l 0.0.0.0 -t 100 >/dev/null &
155665 +echo "a" | \
155666 +       timeout ${timeout_test} \
155667 +               ip netns exec $ns \
155668 +                       ./mptcp_connect -p 10000 -l -t ${timeout_poll} \
155669 +                               0.0.0.0 >/dev/null &
155670  sleep 0.1
155671 -pids[0]=$!
155672  chk_msk_nr 0 "no msk on netns creation"
155674 -echo "b" | ip netns exec $ns ./mptcp_connect -p 10000 127.0.0.1 -j -t 100 >/dev/null &
155675 +echo "b" | \
155676 +       timeout ${timeout_test} \
155677 +               ip netns exec $ns \
155678 +                       ./mptcp_connect -p 10000 -j -t ${timeout_poll} \
155679 +                               127.0.0.1 >/dev/null &
155680  sleep 0.1
155681 -pids[1]=$!
155682  chk_msk_nr 2 "after MPC handshake "
155683  chk_msk_remote_key_nr 2 "....chk remote_key"
155684  chk_msk_fallback_nr 0 "....chk no fallback"
155685  flush_pids
155688 -echo "a" | ip netns exec $ns ./mptcp_connect -p 10001 -s TCP -l 0.0.0.0 -t 100 >/dev/null &
155689 -pids[0]=$!
155690 +echo "a" | \
155691 +       timeout ${timeout_test} \
155692 +               ip netns exec $ns \
155693 +                       ./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} \
155694 +                               0.0.0.0 >/dev/null &
155695  sleep 0.1
155696 -echo "b" | ip netns exec $ns ./mptcp_connect -p 10001 127.0.0.1 -j -t 100 >/dev/null &
155697 -pids[1]=$!
155698 +echo "b" | \
155699 +       timeout ${timeout_test} \
155700 +               ip netns exec $ns \
155701 +                       ./mptcp_connect -p 10001 -j -t ${timeout_poll} \
155702 +                               127.0.0.1 >/dev/null &
155703  sleep 0.1
155704  chk_msk_fallback_nr 1 "check fallback"
155705  flush_pids
155707  NR_CLIENTS=100
155708  for I in `seq 1 $NR_CLIENTS`; do
155709 -       echo "a" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) -l 0.0.0.0 -t 100 -w 10 >/dev/null  &
155710 -       pids[$((I*2))]=$!
155711 +       echo "a" | \
155712 +               timeout ${timeout_test} \
155713 +                       ip netns exec $ns \
155714 +                               ./mptcp_connect -p $((I+10001)) -l -w 10 \
155715 +                                       -t ${timeout_poll} 0.0.0.0 >/dev/null &
155716  done
155717  sleep 0.1
155719  for I in `seq 1 $NR_CLIENTS`; do
155720 -       echo "b" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) 127.0.0.1 -t 100 -w 10 >/dev/null &
155721 -       pids[$((I*2 + 1))]=$!
155722 +       echo "b" | \
155723 +               timeout ${timeout_test} \
155724 +                       ip netns exec $ns \
155725 +                               ./mptcp_connect -p $((I+10001)) -w 10 \
155726 +                                       -t ${timeout_poll} 127.0.0.1 >/dev/null &
155727  done
155728  sleep 1.5
155730 diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
155731 index 10a030b53b23..65b3b983efc2 100755
155732 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
155733 +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
155734 @@ -11,7 +11,8 @@ cin=""
155735  cout=""
155736  ksft_skip=4
155737  capture=false
155738 -timeout=30
155739 +timeout_poll=30
155740 +timeout_test=$((timeout_poll * 2 + 1))
155741  ipv6=true
155742  ethtool_random_on=true
155743  tc_delay="$((RANDOM%50))"
155744 @@ -273,7 +274,7 @@ check_mptcp_disabled()
155745         ip netns exec ${disabled_ns} sysctl -q net.mptcp.enabled=0
155747         local err=0
155748 -       LANG=C ip netns exec ${disabled_ns} ./mptcp_connect -t $timeout -p 10000 -s MPTCP 127.0.0.1 < "$cin" 2>&1 | \
155749 +       LANG=C ip netns exec ${disabled_ns} ./mptcp_connect -p 10000 -s MPTCP 127.0.0.1 < "$cin" 2>&1 | \
155750                 grep -q "^socket: Protocol not available$" && err=1
155751         ip netns delete ${disabled_ns}
155753 @@ -430,14 +431,20 @@ do_transfer()
155754         local stat_cookietx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
155755         local stat_cookierx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
155757 -       ip netns exec ${listener_ns} ./mptcp_connect -t $timeout -l -p $port -s ${srv_proto} $extra_args $local_addr < "$sin" > "$sout" &
155758 +       timeout ${timeout_test} \
155759 +               ip netns exec ${listener_ns} \
155760 +                       ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
155761 +                               $extra_args $local_addr < "$sin" > "$sout" &
155762         local spid=$!
155764         wait_local_port_listen "${listener_ns}" "${port}"
155766         local start
155767         start=$(date +%s%3N)
155768 -       ip netns exec ${connector_ns} ./mptcp_connect -t $timeout -p $port -s ${cl_proto} $extra_args $connect_addr < "$cin" > "$cout" &
155769 +       timeout ${timeout_test} \
155770 +               ip netns exec ${connector_ns} \
155771 +                       ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
155772 +                               $extra_args $connect_addr < "$cin" > "$cout" &
155773         local cpid=$!
155775         wait $cpid
155776 diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
155777 index ad32240fbfda..43ed99de7734 100755
155778 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
155779 +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
155780 @@ -8,7 +8,8 @@ cin=""
155781  cinsent=""
155782  cout=""
155783  ksft_skip=4
155784 -timeout=30
155785 +timeout_poll=30
155786 +timeout_test=$((timeout_poll * 2 + 1))
155787  mptcp_connect=""
155788  capture=0
155789  do_all_tests=1
155790 @@ -245,17 +246,26 @@ do_transfer()
155791                 local_addr="0.0.0.0"
155792         fi
155794 -       ip netns exec ${listener_ns} $mptcp_connect -t $timeout -l -p $port \
155795 -               -s ${srv_proto} ${local_addr} < "$sin" > "$sout" &
155796 +       timeout ${timeout_test} \
155797 +               ip netns exec ${listener_ns} \
155798 +                       $mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
155799 +                               ${local_addr} < "$sin" > "$sout" &
155800         spid=$!
155802         sleep 1
155804         if [ "$test_link_fail" -eq 0 ];then
155805 -               ip netns exec ${connector_ns} $mptcp_connect -t $timeout -p $port -s ${cl_proto} $connect_addr < "$cin" > "$cout" &
155806 +               timeout ${timeout_test} \
155807 +                       ip netns exec ${connector_ns} \
155808 +                               $mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
155809 +                                       $connect_addr < "$cin" > "$cout" &
155810         else
155811 -               ( cat "$cin" ; sleep 2; link_failure $listener_ns ; cat "$cin" ) | tee "$cinsent" | \
155812 -               ip netns exec ${connector_ns} $mptcp_connect -t $timeout -p $port -s ${cl_proto} $connect_addr > "$cout" &
155813 +               ( cat "$cin" ; sleep 2; link_failure $listener_ns ; cat "$cin" ) | \
155814 +                       tee "$cinsent" | \
155815 +                       timeout ${timeout_test} \
155816 +                               ip netns exec ${connector_ns} \
155817 +                                       $mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
155818 +                                               $connect_addr > "$cout" &
155819         fi
155820         cpid=$!
155822 diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
155823 index f039ee57eb3c..3aeef3bcb101 100755
155824 --- a/tools/testing/selftests/net/mptcp/simult_flows.sh
155825 +++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
155826 @@ -7,7 +7,8 @@ ns2="ns2-$rndh"
155827  ns3="ns3-$rndh"
155828  capture=false
155829  ksft_skip=4
155830 -timeout=30
155831 +timeout_poll=30
155832 +timeout_test=$((timeout_poll * 2 + 1))
155833  test_cnt=1
155834  ret=0
155835  bail=0
155836 @@ -157,14 +158,20 @@ do_transfer()
155837                 sleep 1
155838         fi
155840 -       ip netns exec ${ns3} ./mptcp_connect -jt $timeout -l -p $port 0.0.0.0 < "$sin" > "$sout" &
155841 +       timeout ${timeout_test} \
155842 +               ip netns exec ${ns3} \
155843 +                       ./mptcp_connect -jt ${timeout_poll} -l -p $port \
155844 +                               0.0.0.0 < "$sin" > "$sout" &
155845         local spid=$!
155847         wait_local_port_listen "${ns3}" "${port}"
155849         local start
155850         start=$(date +%s%3N)
155851 -       ip netns exec ${ns1} ./mptcp_connect -jt $timeout -p $port 10.0.3.3 < "$cin" > "$cout" &
155852 +       timeout ${timeout_test} \
155853 +               ip netns exec ${ns1} \
155854 +                       ./mptcp_connect -jt ${timeout_poll} -p $port \
155855 +                               10.0.3.3 < "$cin" > "$cout" &
155856         local cpid=$!
155858         wait $cpid
155859 diff --git a/tools/testing/selftests/powerpc/security/entry_flush.c b/tools/testing/selftests/powerpc/security/entry_flush.c
155860 index 78cf914fa321..68ce377b205e 100644
155861 --- a/tools/testing/selftests/powerpc/security/entry_flush.c
155862 +++ b/tools/testing/selftests/powerpc/security/entry_flush.c
155863 @@ -53,7 +53,7 @@ int entry_flush_test(void)
155865         entry_flush = entry_flush_orig;
155867 -       fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
155868 +       fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
155869         FAIL_IF(fd < 0);
155871         p = (char *)memalign(zero_size, CACHELINE_SIZE);
155872 diff --git a/tools/testing/selftests/powerpc/security/flush_utils.h b/tools/testing/selftests/powerpc/security/flush_utils.h
155873 index 07a5eb301466..7a3d60292916 100644
155874 --- a/tools/testing/selftests/powerpc/security/flush_utils.h
155875 +++ b/tools/testing/selftests/powerpc/security/flush_utils.h
155876 @@ -9,6 +9,10 @@
155878  #define CACHELINE_SIZE 128
155880 +#define PERF_L1D_READ_MISS_CONFIG      ((PERF_COUNT_HW_CACHE_L1D) |            \
155881 +                                       (PERF_COUNT_HW_CACHE_OP_READ << 8) |    \
155882 +                                       (PERF_COUNT_HW_CACHE_RESULT_MISS << 16))
155884  void syscall_loop(char *p, unsigned long iterations,
155885                   unsigned long zero_size);
155887 diff --git a/tools/testing/selftests/powerpc/security/rfi_flush.c b/tools/testing/selftests/powerpc/security/rfi_flush.c
155888 index 7565fd786640..f73484a6470f 100644
155889 --- a/tools/testing/selftests/powerpc/security/rfi_flush.c
155890 +++ b/tools/testing/selftests/powerpc/security/rfi_flush.c
155891 @@ -54,7 +54,7 @@ int rfi_flush_test(void)
155893         rfi_flush = rfi_flush_orig;
155895 -       fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
155896 +       fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
155897         FAIL_IF(fd < 0);
155899         p = (char *)memalign(zero_size, CACHELINE_SIZE);
155900 diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
155901 index d585cc1948cc..6bcee2ec91a9 100644
155902 --- a/tools/testing/selftests/resctrl/Makefile
155903 +++ b/tools/testing/selftests/resctrl/Makefile
155904 @@ -1,5 +1,5 @@
155905  CC = $(CROSS_COMPILE)gcc
155906 -CFLAGS = -g -Wall
155907 +CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2
155908  SRCS=$(wildcard *.c)
155909  OBJS=$(SRCS:.c=.o)
155911 diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
155912 index 38dbf4962e33..5922cc1b0386 100644
155913 --- a/tools/testing/selftests/resctrl/cache.c
155914 +++ b/tools/testing/selftests/resctrl/cache.c
155915 @@ -182,7 +182,7 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
155916         /*
155917          * Measure cache miss from perf.
155918          */
155919 -       if (!strcmp(param->resctrl_val, "cat")) {
155920 +       if (!strncmp(param->resctrl_val, CAT_STR, sizeof(CAT_STR))) {
155921                 ret = get_llc_perf(&llc_perf_miss);
155922                 if (ret < 0)
155923                         return ret;
155924 @@ -192,7 +192,7 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
155925         /*
155926          * Measure llc occupancy from resctrl.
155927          */
155928 -       if (!strcmp(param->resctrl_val, "cqm")) {
155929 +       if (!strncmp(param->resctrl_val, CQM_STR, sizeof(CQM_STR))) {
155930                 ret = get_llc_occu_resctrl(&llc_occu_resc);
155931                 if (ret < 0)
155932                         return ret;
155933 @@ -234,7 +234,7 @@ int cat_val(struct resctrl_val_param *param)
155934         if (ret)
155935                 return ret;
155937 -       if ((strcmp(resctrl_val, "cat") == 0)) {
155938 +       if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
155939                 ret = initialize_llc_perf();
155940                 if (ret)
155941                         return ret;
155942 @@ -242,7 +242,7 @@ int cat_val(struct resctrl_val_param *param)
155944         /* Test runs until the callback setup() tells the test to stop. */
155945         while (1) {
155946 -               if (strcmp(resctrl_val, "cat") == 0) {
155947 +               if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
155948                         ret = param->setup(1, param);
155949                         if (ret) {
155950                                 ret = 0;
155951 diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
155952 index 5da43767b973..20823725daca 100644
155953 --- a/tools/testing/selftests/resctrl/cat_test.c
155954 +++ b/tools/testing/selftests/resctrl/cat_test.c
155955 @@ -17,10 +17,10 @@
155956  #define MAX_DIFF_PERCENT       4
155957  #define MAX_DIFF               1000000
155959 -int count_of_bits;
155960 -char cbm_mask[256];
155961 -unsigned long long_mask;
155962 -unsigned long cache_size;
155963 +static int count_of_bits;
155964 +static char cbm_mask[256];
155965 +static unsigned long long_mask;
155966 +static unsigned long cache_size;
155969   * Change schemata. Write schemata to specified
155970 @@ -136,7 +136,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
155971                 return -1;
155973         /* Get default cbm mask for L3/L2 cache */
155974 -       ret = get_cbm_mask(cache_type);
155975 +       ret = get_cbm_mask(cache_type, cbm_mask);
155976         if (ret)
155977                 return ret;
155979 @@ -164,7 +164,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
155980                 return -1;
155982         struct resctrl_val_param param = {
155983 -               .resctrl_val    = "cat",
155984 +               .resctrl_val    = CAT_STR,
155985                 .cpu_no         = cpu_no,
155986                 .mum_resctrlfs  = 0,
155987                 .setup          = cat_setup,
155988 diff --git a/tools/testing/selftests/resctrl/cqm_test.c b/tools/testing/selftests/resctrl/cqm_test.c
155989 index c8756152bd61..271752e9ef5b 100644
155990 --- a/tools/testing/selftests/resctrl/cqm_test.c
155991 +++ b/tools/testing/selftests/resctrl/cqm_test.c
155992 @@ -16,10 +16,10 @@
155993  #define MAX_DIFF               2000000
155994  #define MAX_DIFF_PERCENT       15
155996 -int count_of_bits;
155997 -char cbm_mask[256];
155998 -unsigned long long_mask;
155999 -unsigned long cache_size;
156000 +static int count_of_bits;
156001 +static char cbm_mask[256];
156002 +static unsigned long long_mask;
156003 +static unsigned long cache_size;
156005  static int cqm_setup(int num, ...)
156007 @@ -86,7 +86,7 @@ static int check_results(struct resctrl_val_param *param, int no_of_bits)
156008                 return errno;
156009         }
156011 -       while (fgets(temp, 1024, fp)) {
156012 +       while (fgets(temp, sizeof(temp), fp)) {
156013                 char *token = strtok(temp, ":\t");
156014                 int fields = 0;
156016 @@ -125,7 +125,7 @@ int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
156017         if (!validate_resctrl_feature_request("cqm"))
156018                 return -1;
156020 -       ret = get_cbm_mask("L3");
156021 +       ret = get_cbm_mask("L3", cbm_mask);
156022         if (ret)
156023                 return ret;
156025 @@ -145,7 +145,7 @@ int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
156026         }
156028         struct resctrl_val_param param = {
156029 -               .resctrl_val    = "cqm",
156030 +               .resctrl_val    = CQM_STR,
156031                 .ctrlgrp        = "c1",
156032                 .mongrp         = "m1",
156033                 .cpu_no         = cpu_no,
156034 diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
156035 index 79c611c99a3d..51e5cf22632f 100644
156036 --- a/tools/testing/selftests/resctrl/fill_buf.c
156037 +++ b/tools/testing/selftests/resctrl/fill_buf.c
156038 @@ -115,7 +115,7 @@ static int fill_cache_read(unsigned char *start_ptr, unsigned char *end_ptr,
156040         while (1) {
156041                 ret = fill_one_span_read(start_ptr, end_ptr);
156042 -               if (!strcmp(resctrl_val, "cat"))
156043 +               if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
156044                         break;
156045         }
156047 @@ -134,7 +134,7 @@ static int fill_cache_write(unsigned char *start_ptr, unsigned char *end_ptr,
156049         while (1) {
156050                 fill_one_span_write(start_ptr, end_ptr);
156051 -               if (!strcmp(resctrl_val, "cat"))
156052 +               if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
156053                         break;
156054         }
156056 diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
156057 index 7bf8eaa6204b..6449fbd96096 100644
156058 --- a/tools/testing/selftests/resctrl/mba_test.c
156059 +++ b/tools/testing/selftests/resctrl/mba_test.c
156060 @@ -141,7 +141,7 @@ void mba_test_cleanup(void)
156061  int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
156063         struct resctrl_val_param param = {
156064 -               .resctrl_val    = "mba",
156065 +               .resctrl_val    = MBA_STR,
156066                 .ctrlgrp        = "c1",
156067                 .mongrp         = "m1",
156068                 .cpu_no         = cpu_no,
156069 diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
156070 index 4700f7453f81..ec6cfe01c9c2 100644
156071 --- a/tools/testing/selftests/resctrl/mbm_test.c
156072 +++ b/tools/testing/selftests/resctrl/mbm_test.c
156073 @@ -114,7 +114,7 @@ void mbm_test_cleanup(void)
156074  int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd)
156076         struct resctrl_val_param param = {
156077 -               .resctrl_val    = "mbm",
156078 +               .resctrl_val    = MBM_STR,
156079                 .ctrlgrp        = "c1",
156080                 .mongrp         = "m1",
156081                 .span           = span,
156082 diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
156083 index 39bf59c6b9c5..9dcc96e1ad3d 100644
156084 --- a/tools/testing/selftests/resctrl/resctrl.h
156085 +++ b/tools/testing/selftests/resctrl/resctrl.h
156086 @@ -28,6 +28,10 @@
156087  #define RESCTRL_PATH           "/sys/fs/resctrl"
156088  #define PHYS_ID_PATH           "/sys/devices/system/cpu/cpu"
156089  #define CBM_MASK_PATH          "/sys/fs/resctrl/info"
156090 +#define L3_PATH                        "/sys/fs/resctrl/info/L3"
156091 +#define MB_PATH                        "/sys/fs/resctrl/info/MB"
156092 +#define L3_MON_PATH            "/sys/fs/resctrl/info/L3_MON"
156093 +#define L3_MON_FEATURES_PATH   "/sys/fs/resctrl/info/L3_MON/mon_features"
156095  #define PARENT_EXIT(err_msg)                   \
156096         do {                                    \
156097 @@ -62,11 +66,16 @@ struct resctrl_val_param {
156098         int             (*setup)(int num, ...);
156101 -pid_t bm_pid, ppid;
156102 -int tests_run;
156103 +#define MBM_STR                        "mbm"
156104 +#define MBA_STR                        "mba"
156105 +#define CQM_STR                        "cqm"
156106 +#define CAT_STR                        "cat"
156108 -char llc_occup_path[1024];
156109 -bool is_amd;
156110 +extern pid_t bm_pid, ppid;
156111 +extern int tests_run;
156113 +extern char llc_occup_path[1024];
156114 +extern bool is_amd;
156116  bool check_resctrlfs_support(void);
156117  int filter_dmesg(void);
156118 @@ -74,7 +83,7 @@ int remount_resctrlfs(bool mum_resctrlfs);
156119  int get_resource_id(int cpu_no, int *resource_id);
156120  int umount_resctrlfs(void);
156121  int validate_bw_report_request(char *bw_report);
156122 -bool validate_resctrl_feature_request(char *resctrl_val);
156123 +bool validate_resctrl_feature_request(const char *resctrl_val);
156124  char *fgrep(FILE *inf, const char *str);
156125  int taskset_benchmark(pid_t bm_pid, int cpu_no);
156126  void run_benchmark(int signum, siginfo_t *info, void *ucontext);
156127 @@ -92,7 +101,7 @@ void tests_cleanup(void);
156128  void mbm_test_cleanup(void);
156129  int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd);
156130  void mba_test_cleanup(void);
156131 -int get_cbm_mask(char *cache_type);
156132 +int get_cbm_mask(char *cache_type, char *cbm_mask);
156133  int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size);
156134  void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
156135  int cat_val(struct resctrl_val_param *param);
156136 diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
156137 index 425cc85ac883..ac2269610aa9 100644
156138 --- a/tools/testing/selftests/resctrl/resctrl_tests.c
156139 +++ b/tools/testing/selftests/resctrl/resctrl_tests.c
156140 @@ -73,7 +73,7 @@ int main(int argc, char **argv)
156141                 }
156142         }
156144 -       while ((c = getopt(argc_new, argv, "ht:b:")) != -1) {
156145 +       while ((c = getopt(argc_new, argv, "ht:b:n:p:")) != -1) {
156146                 char *token;
156148                 switch (c) {
156149 @@ -85,13 +85,13 @@ int main(int argc, char **argv)
156150                         cqm_test = false;
156151                         cat_test = false;
156152                         while (token) {
156153 -                               if (!strcmp(token, "mbm")) {
156154 +                               if (!strncmp(token, MBM_STR, sizeof(MBM_STR))) {
156155                                         mbm_test = true;
156156 -                               } else if (!strcmp(token, "mba")) {
156157 +                               } else if (!strncmp(token, MBA_STR, sizeof(MBA_STR))) {
156158                                         mba_test = true;
156159 -                               } else if (!strcmp(token, "cqm")) {
156160 +                               } else if (!strncmp(token, CQM_STR, sizeof(CQM_STR))) {
156161                                         cqm_test = true;
156162 -                               } else if (!strcmp(token, "cat")) {
156163 +                               } else if (!strncmp(token, CAT_STR, sizeof(CAT_STR))) {
156164                                         cat_test = true;
156165                                 } else {
156166                                         printf("invalid argument\n");
156167 @@ -161,7 +161,7 @@ int main(int argc, char **argv)
156168         if (!is_amd && mbm_test) {
156169                 printf("# Starting MBM BW change ...\n");
156170                 if (!has_ben)
156171 -                       sprintf(benchmark_cmd[5], "%s", "mba");
156172 +                       sprintf(benchmark_cmd[5], "%s", MBA_STR);
156173                 res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
156174                 printf("%sok MBM: bw change\n", res ? "not " : "");
156175                 mbm_test_cleanup();
156176 @@ -181,7 +181,7 @@ int main(int argc, char **argv)
156177         if (cqm_test) {
156178                 printf("# Starting CQM test ...\n");
156179                 if (!has_ben)
156180 -                       sprintf(benchmark_cmd[5], "%s", "cqm");
156181 +                       sprintf(benchmark_cmd[5], "%s", CQM_STR);
156182                 res = cqm_resctrl_val(cpu_no, no_of_bits, benchmark_cmd);
156183                 printf("%sok CQM: test\n", res ? "not " : "");
156184                 cqm_test_cleanup();
156185 diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
156186 index 520fea3606d1..8df557894059 100644
156187 --- a/tools/testing/selftests/resctrl/resctrl_val.c
156188 +++ b/tools/testing/selftests/resctrl/resctrl_val.c
156189 @@ -221,8 +221,8 @@ static int read_from_imc_dir(char *imc_dir, int count)
156190   */
156191  static int num_of_imcs(void)
156193 +       char imc_dir[512], *temp;
156194         unsigned int count = 0;
156195 -       char imc_dir[512];
156196         struct dirent *ep;
156197         int ret;
156198         DIR *dp;
156199 @@ -230,7 +230,25 @@ static int num_of_imcs(void)
156200         dp = opendir(DYN_PMU_PATH);
156201         if (dp) {
156202                 while ((ep = readdir(dp))) {
156203 -                       if (strstr(ep->d_name, UNCORE_IMC)) {
156204 +                       temp = strstr(ep->d_name, UNCORE_IMC);
156205 +                       if (!temp)
156206 +                               continue;
156208 +                       /*
156209 +                        * imc counters are named as "uncore_imc_<n>", hence
156210 +                        * increment the pointer to point to <n>. Note that
156211 +                        * sizeof(UNCORE_IMC) would count for null character as
156212 +                        * well and hence the last underscore character in
156213 +                        * uncore_imc'_' need not be counted.
156214 +                        */
156215 +                       temp = temp + sizeof(UNCORE_IMC);
156217 +                       /*
156218 +                        * Some directories under "DYN_PMU_PATH" could have
156219 +                        * names like "uncore_imc_free_running", hence, check if
156220 +                        * first character is a numerical digit or not.
156221 +                        */
156222 +                       if (temp[0] >= '0' && temp[0] <= '9') {
156223                                 sprintf(imc_dir, "%s/%s/", DYN_PMU_PATH,
156224                                         ep->d_name);
156225                                 ret = read_from_imc_dir(imc_dir, count);
156226 @@ -282,9 +300,9 @@ static int initialize_mem_bw_imc(void)
156227   * Memory B/W utilized by a process on a socket can be calculated using
156228   * iMC counters. Perf events are used to read these counters.
156229   *
156230 - * Return: >= 0 on success. < 0 on failure.
156231 + * Return: = 0 on success. < 0 on failure.
156232   */
156233 -static float get_mem_bw_imc(int cpu_no, char *bw_report)
156234 +static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
156236         float reads, writes, of_mul_read, of_mul_write;
156237         int imc, j, ret;
156238 @@ -355,13 +373,18 @@ static float get_mem_bw_imc(int cpu_no, char *bw_report)
156239                 close(imc_counters_config[imc][WRITE].fd);
156240         }
156242 -       if (strcmp(bw_report, "reads") == 0)
156243 -               return reads;
156244 +       if (strcmp(bw_report, "reads") == 0) {
156245 +               *bw_imc = reads;
156246 +               return 0;
156247 +       }
156249 -       if (strcmp(bw_report, "writes") == 0)
156250 -               return writes;
156251 +       if (strcmp(bw_report, "writes") == 0) {
156252 +               *bw_imc = writes;
156253 +               return 0;
156254 +       }
156256 -       return (reads + writes);
156257 +       *bw_imc = reads + writes;
156258 +       return 0;
156261  void set_mbm_path(const char *ctrlgrp, const char *mongrp, int resource_id)
156262 @@ -397,10 +420,10 @@ static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
156263                 return;
156264         }
156266 -       if (strcmp(resctrl_val, "mbm") == 0)
156267 +       if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)))
156268                 set_mbm_path(ctrlgrp, mongrp, resource_id);
156270 -       if ((strcmp(resctrl_val, "mba") == 0)) {
156271 +       if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
156272                 if (ctrlgrp)
156273                         sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH,
156274                                 RESCTRL_PATH, ctrlgrp, resource_id);
156275 @@ -420,9 +443,8 @@ static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
156276   * 1. If con_mon grp is given, then read from it
156277   * 2. If con_mon grp is not given, then read from root con_mon grp
156278   */
156279 -static unsigned long get_mem_bw_resctrl(void)
156280 +static int get_mem_bw_resctrl(unsigned long *mbm_total)
156282 -       unsigned long mbm_total = 0;
156283         FILE *fp;
156285         fp = fopen(mbm_total_path, "r");
156286 @@ -431,7 +453,7 @@ static unsigned long get_mem_bw_resctrl(void)
156288                 return -1;
156289         }
156290 -       if (fscanf(fp, "%lu", &mbm_total) <= 0) {
156291 +       if (fscanf(fp, "%lu", mbm_total) <= 0) {
156292                 perror("Could not get mbm local bytes");
156293                 fclose(fp);
156295 @@ -439,7 +461,7 @@ static unsigned long get_mem_bw_resctrl(void)
156296         }
156297         fclose(fp);
156299 -       return mbm_total;
156300 +       return 0;
156303  pid_t bm_pid, ppid;
156304 @@ -524,14 +546,15 @@ static void initialize_llc_occu_resctrl(const char *ctrlgrp, const char *mongrp,
156305                 return;
156306         }
156308 -       if (strcmp(resctrl_val, "cqm") == 0)
156309 +       if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
156310                 set_cqm_path(ctrlgrp, mongrp, resource_id);
156313  static int
156314  measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
156316 -       unsigned long bw_imc, bw_resc, bw_resc_end;
156317 +       unsigned long bw_resc, bw_resc_end;
156318 +       float bw_imc;
156319         int ret;
156321         /*
156322 @@ -541,13 +564,13 @@ measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
156323          * Compare the two values to validate resctrl value.
156324          * It takes 1sec to measure the data.
156325          */
156326 -       bw_imc = get_mem_bw_imc(param->cpu_no, param->bw_report);
156327 -       if (bw_imc <= 0)
156328 -               return bw_imc;
156329 +       ret = get_mem_bw_imc(param->cpu_no, param->bw_report, &bw_imc);
156330 +       if (ret < 0)
156331 +               return ret;
156333 -       bw_resc_end = get_mem_bw_resctrl();
156334 -       if (bw_resc_end <= 0)
156335 -               return bw_resc_end;
156336 +       ret = get_mem_bw_resctrl(&bw_resc_end);
156337 +       if (ret < 0)
156338 +               return ret;
156340         bw_resc = (bw_resc_end - *bw_resc_start) / MB;
156341         ret = print_results_bw(param->filename, bm_pid, bw_imc, bw_resc);
156342 @@ -579,8 +602,8 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
156343         if (strcmp(param->filename, "") == 0)
156344                 sprintf(param->filename, "stdio");
156346 -       if ((strcmp(resctrl_val, "mba")) == 0 ||
156347 -           (strcmp(resctrl_val, "mbm")) == 0) {
156348 +       if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) ||
156349 +           !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
156350                 ret = validate_bw_report_request(param->bw_report);
156351                 if (ret)
156352                         return ret;
156353 @@ -674,15 +697,15 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
156354         if (ret)
156355                 goto out;
156357 -       if ((strcmp(resctrl_val, "mbm") == 0) ||
156358 -           (strcmp(resctrl_val, "mba") == 0)) {
156359 +       if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
156360 +           !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
156361                 ret = initialize_mem_bw_imc();
156362                 if (ret)
156363                         goto out;
156365                 initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
156366                                           param->cpu_no, resctrl_val);
156367 -       } else if (strcmp(resctrl_val, "cqm") == 0)
156368 +       } else if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
156369                 initialize_llc_occu_resctrl(param->ctrlgrp, param->mongrp,
156370                                             param->cpu_no, resctrl_val);
156372 @@ -710,8 +733,8 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
156374         /* Test runs until the callback setup() tells the test to stop. */
156375         while (1) {
156376 -               if ((strcmp(resctrl_val, "mbm") == 0) ||
156377 -                   (strcmp(resctrl_val, "mba") == 0)) {
156378 +               if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
156379 +                   !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
156380                         ret = param->setup(1, param);
156381                         if (ret) {
156382                                 ret = 0;
156383 @@ -721,7 +744,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
156384                         ret = measure_vals(param, &bw_resc_start);
156385                         if (ret)
156386                                 break;
156387 -               } else if (strcmp(resctrl_val, "cqm") == 0) {
156388 +               } else if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR))) {
156389                         ret = param->setup(1, param);
156390                         if (ret) {
156391                                 ret = 0;
156392 diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c
156393 index 19c0ec4045a4..b57170f53861 100644
156394 --- a/tools/testing/selftests/resctrl/resctrlfs.c
156395 +++ b/tools/testing/selftests/resctrl/resctrlfs.c
156396 @@ -49,8 +49,6 @@ static int find_resctrl_mount(char *buffer)
156397         return -ENOENT;
156400 -char cbm_mask[256];
156403   * remount_resctrlfs - Remount resctrl FS at /sys/fs/resctrl
156404   * @mum_resctrlfs:     Should the resctrl FS be remounted?
156405 @@ -205,16 +203,18 @@ int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size)
156407   * get_cbm_mask - Get cbm mask for given cache
156408   * @cache_type:        Cache level L2/L3
156410 - * Mask is stored in cbm_mask which is global variable.
156411 + * @cbm_mask:  cbm_mask returned as a string
156412   *
156413   * Return: = 0 on success, < 0 on failure.
156414   */
156415 -int get_cbm_mask(char *cache_type)
156416 +int get_cbm_mask(char *cache_type, char *cbm_mask)
156418         char cbm_mask_path[1024];
156419         FILE *fp;
156421 +       if (!cbm_mask)
156422 +               return -1;
156424         sprintf(cbm_mask_path, "%s/%s/cbm_mask", CBM_MASK_PATH, cache_type);
156426         fp = fopen(cbm_mask_path, "r");
156427 @@ -334,7 +334,7 @@ void run_benchmark(int signum, siginfo_t *info, void *ucontext)
156428                 operation = atoi(benchmark_cmd[4]);
156429                 sprintf(resctrl_val, "%s", benchmark_cmd[5]);
156431 -               if (strcmp(resctrl_val, "cqm") != 0)
156432 +               if (strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
156433                         buffer_span = span * MB;
156434                 else
156435                         buffer_span = span;
156436 @@ -459,8 +459,8 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
156437                 goto out;
156439         /* Create mon grp and write pid into it for "mbm" and "cqm" test */
156440 -       if ((strcmp(resctrl_val, "cqm") == 0) ||
156441 -           (strcmp(resctrl_val, "mbm") == 0)) {
156442 +       if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)) ||
156443 +           !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
156444                 if (strlen(mongrp)) {
156445                         sprintf(monitorgroup_p, "%s/mon_groups", controlgroup);
156446                         sprintf(monitorgroup, "%s/%s", monitorgroup_p, mongrp);
156447 @@ -505,9 +505,9 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
156448         int resource_id, ret = 0;
156449         FILE *fp;
156451 -       if ((strcmp(resctrl_val, "mba") != 0) &&
156452 -           (strcmp(resctrl_val, "cat") != 0) &&
156453 -           (strcmp(resctrl_val, "cqm") != 0))
156454 +       if (strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) &&
156455 +           strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) &&
156456 +           strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
156457                 return -ENOENT;
156459         if (!schemata) {
156460 @@ -528,9 +528,10 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
156461         else
156462                 sprintf(controlgroup, "%s/schemata", RESCTRL_PATH);
156464 -       if (!strcmp(resctrl_val, "cat") || !strcmp(resctrl_val, "cqm"))
156465 +       if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) ||
156466 +           !strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
156467                 sprintf(schema, "%s%d%c%s", "L3:", resource_id, '=', schemata);
156468 -       if (strcmp(resctrl_val, "mba") == 0)
156469 +       if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)))
156470                 sprintf(schema, "%s%d%c%s", "MB:", resource_id, '=', schemata);
156472         fp = fopen(controlgroup, "w");
156473 @@ -615,26 +616,56 @@ char *fgrep(FILE *inf, const char *str)
156474   * validate_resctrl_feature_request - Check if requested feature is valid.
156475   * @resctrl_val:       Requested feature
156476   *
156477 - * Return: 0 on success, non-zero on failure
156478 + * Return: True if the feature is supported, else false
156479   */
156480 -bool validate_resctrl_feature_request(char *resctrl_val)
156481 +bool validate_resctrl_feature_request(const char *resctrl_val)
156483 -       FILE *inf = fopen("/proc/cpuinfo", "r");
156484 +       struct stat statbuf;
156485         bool found = false;
156486         char *res;
156487 +       FILE *inf;
156489 -       if (!inf)
156490 +       if (!resctrl_val)
156491                 return false;
156493 -       res = fgrep(inf, "flags");
156495 -       if (res) {
156496 -               char *s = strchr(res, ':');
156497 +       if (remount_resctrlfs(false))
156498 +               return false;
156500 -               found = s && !strstr(s, resctrl_val);
156501 -               free(res);
156502 +       if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
156503 +               if (!stat(L3_PATH, &statbuf))
156504 +                       return true;
156505 +       } else if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
156506 +               if (!stat(MB_PATH, &statbuf))
156507 +                       return true;
156508 +       } else if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
156509 +                  !strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
156510 +               if (!stat(L3_MON_PATH, &statbuf)) {
156511 +                       inf = fopen(L3_MON_FEATURES_PATH, "r");
156512 +                       if (!inf)
156513 +                               return false;
156515 +                       if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
156516 +                               res = fgrep(inf, "llc_occupancy");
156517 +                               if (res) {
156518 +                                       found = true;
156519 +                                       free(res);
156520 +                               }
156521 +                       }
156523 +                       if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
156524 +                               res = fgrep(inf, "mbm_total_bytes");
156525 +                               if (res) {
156526 +                                       free(res);
156527 +                                       res = fgrep(inf, "mbm_local_bytes");
156528 +                                       if (res) {
156529 +                                               found = true;
156530 +                                               free(res);
156531 +                                       }
156532 +                               }
156533 +                       }
156534 +                       fclose(inf);
156535 +               }
156536         }
156537 -       fclose(inf);
156539         return found;
156541 diff --git a/tools/testing/selftests/x86/thunks_32.S b/tools/testing/selftests/x86/thunks_32.S
156542 index a71d92da8f46..f3f56e681e9f 100644
156543 --- a/tools/testing/selftests/x86/thunks_32.S
156544 +++ b/tools/testing/selftests/x86/thunks_32.S
156545 @@ -45,3 +45,5 @@ call64_from_32:
156546         ret
156548  .size call64_from_32, .-call64_from_32
156550 +.section .note.GNU-stack,"",%progbits
156551 diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
156552 index 62bd908ecd58..f08f5e82460b 100644
156553 --- a/virt/kvm/coalesced_mmio.c
156554 +++ b/virt/kvm/coalesced_mmio.c
156555 @@ -174,21 +174,36 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
156556                                            struct kvm_coalesced_mmio_zone *zone)
156558         struct kvm_coalesced_mmio_dev *dev, *tmp;
156559 +       int r;
156561         if (zone->pio != 1 && zone->pio != 0)
156562                 return -EINVAL;
156564         mutex_lock(&kvm->slots_lock);
156566 -       list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
156567 +       list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) {
156568                 if (zone->pio == dev->zone.pio &&
156569                     coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
156570 -                       kvm_io_bus_unregister_dev(kvm,
156571 +                       r = kvm_io_bus_unregister_dev(kvm,
156572                                 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
156573                         kvm_iodevice_destructor(&dev->dev);
156575 +                       /*
156576 +                        * On failure, unregister destroys all devices on the
156577 +                        * bus _except_ the target device, i.e. coalesced_zones
156578 +                        * has been modified.  No need to restart the walk as
156579 +                        * there aren't any zones left.
156580 +                        */
156581 +                       if (r)
156582 +                               break;
156583                 }
156584 +       }
156586         mutex_unlock(&kvm->slots_lock);
156588 +       /*
156589 +        * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's
156590 +        * perspective, the coalesced MMIO is most definitely unregistered.
156591 +        */
156592         return 0;
156594 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
156595 index 383df23514b9..5cabc6c748db 100644
156596 --- a/virt/kvm/kvm_main.c
156597 +++ b/virt/kvm/kvm_main.c
156598 @@ -2758,8 +2758,8 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
156599         if (val < grow_start)
156600                 val = grow_start;
156602 -       if (val > halt_poll_ns)
156603 -               val = halt_poll_ns;
156604 +       if (val > vcpu->kvm->max_halt_poll_ns)
156605 +               val = vcpu->kvm->max_halt_poll_ns;
156607         vcpu->halt_poll_ns = val;
156608  out:
156609 @@ -2838,7 +2838,8 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
156610                                 goto out;
156611                         }
156612                         poll_end = cur = ktime_get();
156613 -               } while (single_task_running() && ktime_before(cur, stop));
156614 +               } while (single_task_running() && !need_resched() &&
156615 +                        ktime_before(cur, stop));
156616         }
156618         prepare_to_rcuwait(&vcpu->wait);
156619 @@ -4486,15 +4487,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
156622  /* Caller must hold slots_lock. */
156623 -void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
156624 -                              struct kvm_io_device *dev)
156625 +int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
156626 +                             struct kvm_io_device *dev)
156628         int i, j;
156629         struct kvm_io_bus *new_bus, *bus;
156631         bus = kvm_get_bus(kvm, bus_idx);
156632         if (!bus)
156633 -               return;
156634 +               return 0;
156636         for (i = 0; i < bus->dev_count; i++)
156637                 if (bus->range[i].dev == dev) {
156638 @@ -4502,7 +4503,7 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
156639                 }
156641         if (i == bus->dev_count)
156642 -               return;
156643 +               return 0;
156645         new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
156646                           GFP_KERNEL_ACCOUNT);
156647 @@ -4511,7 +4512,13 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
156648                 new_bus->dev_count--;
156649                 memcpy(new_bus->range + i, bus->range + i + 1,
156650                                 flex_array_size(new_bus, range, new_bus->dev_count - i));
156651 -       } else {
156652 +       }
156654 +       rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
156655 +       synchronize_srcu_expedited(&kvm->srcu);
156657 +       /* Destroy the old bus _after_ installing the (null) bus. */
156658 +       if (!new_bus) {
156659                 pr_err("kvm: failed to shrink bus, removing it completely\n");
156660                 for (j = 0; j < bus->dev_count; j++) {
156661                         if (j == i)
156662 @@ -4520,10 +4527,8 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
156663                 }
156664         }
156666 -       rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
156667 -       synchronize_srcu_expedited(&kvm->srcu);
156668         kfree(bus);
156669 -       return;
156670 +       return new_bus ? 0 : -ENOMEM;
156673  struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,